repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
pravin-asp/Python-Learnings | 8,057,358,679,883 | 924639713c95c9aa31fe1bf4bc066bb760943d28 | d583d57ebb0bfcd22e3660ca7265076d8eaa587c | /Arguments.py | 203d268751492feb3879be0d950f5d0867c498d6 | []
| no_license | https://github.com/pravin-asp/Python-Learnings | 59bc2ad021d170eac03187023aed3899f0b51525 | e26c67b8c5396e88df801f7c826a7830e39f5cd2 | refs/heads/master | 2023-04-09T23:20:04.922036 | 2021-04-13T10:08:16 | 2021-04-13T10:08:16 | 347,560,715 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Types of Arguments
# 1. Positional Arguments
# 2. Keyword Arguments
# 3. Default Arguments
# 4. Variable length Arguments
# Positional Arguments
def add(n1, n2):
print(n1 + n2)
add(10, 20)
# Keyword Arguments
def wish(name, msg):
print('Hi', name, msg)
wish(msg = 'Welcom', name = 'Python')
wish(name = 'Welcom', msg = 'Python')
wish('Welcom', msg = 'Python')
#wish(name = 'Welcom', 'Python') # Show an error
# Default Arguments
def wish(a, n, name = 'Admin'):
print('Hi', name, n, a)
wish(20, 10, name = 'Python')
wish(10, 20)
# Variable Length Arguments
def CalculateTot(*n):
total = 0
for subject in n:
total += subject
print(total)
CalculateTot(90)
CalculateTot(100, 39, 4)
CalculateTot(123, 49, 99, 49, 49)
CalculateTot()
# Keyword Variable Length Arguments
def CalculateTot(**n):#key word arguments --> dictionary
for sub, mark in n.items():
print(sub, 'scored', mark)
CalculateTot(tamil = 90)
CalculateTot(tamil = 100, english = 39, maths = 4)
#CalculateTot()
# Function --> set or group of instructions with a name
# Module --> Set or group of functions saved to a file
# Library --> set or group of modules
| UTF-8 | Python | false | false | 1,149 | py | 100 | Arguments.py | 100 | 0.677981 | 0.636205 | 0 | 59 | 18.474576 | 56 |
menshikoff/PY111-april | 7,567,732,418,310 | 3eda8b2647ce240a10a6f167f9d7abc116dafcc6 | ce0c55fe1c66fb88871026720fdd61fce49ac850 | /Tests/b0_test_linear_search.py | 6474ae82ece0339f1656f1c145addd8e634380f3 | []
| no_license | https://github.com/menshikoff/PY111-april | 6ebb30503ee6dfa265e3f1d25ad8af3fc57b9b0d | 069254cd083cbab6a7732c0e42e4686b60c18e3f | refs/heads/master | 2020-05-16T14:17:52.018093 | 2019-04-30T09:04:01 | 2019-04-30T09:04:01 | 182,120,177 | 2 | 0 | null | true | 2019-04-18T16:17:00 | 2019-04-18T16:17:00 | 2019-04-18T15:35:19 | 2019-04-18T16:11:41 | 22 | 0 | 0 | 0 | null | false | false | import unittest
import random
from Tasks.b0_linear_search import min_search
class MyTestCase(unittest.TestCase):
def test_min(self):
arr = [i for i in range(1, 10)]
self.assertEqual(min_search(arr), 0, "Minimal element is wrong, right answer: " + str(min(arr)))
def test_min_again(self):
arr = [i for i in range(10, -3, -1)]
self.assertEqual(min_search(arr), 12, "Minimal element is wrong, right answer: " + str(min(arr)))
def test_min_one_more(self):
arr = [random.randint(-100, 100) for _ in range(300)]
self.assertEqual(min_search(arr), arr.index(min(arr)), "Minimal element is wrong, right answer: " + str(min(arr)))
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 687 | py | 11 | b0_test_linear_search.py | 10 | 0.6754 | 0.646288 | 0 | 22 | 30.227273 | 116 |
MyRobotLab/InMoov2 | 4,226,247,863,374 | 78cd232b80b3327ac290226a007805e0946748a1 | 399ec2999d570873246ba14b2850d2e4a9c3cc76 | /system/startScripts/InMoovCustom_start.py | 360ff6511eda93a8cd9261c5df4d013cc8c18fd7 | [
"Apache-2.0"
]
| permissive | https://github.com/MyRobotLab/InMoov2 | c0b24ce2d6543285487dac7236265a066e4b818f | e45749b5a03d012382d59fca2bb1b3cc9a82f4d1 | refs/heads/master | 2023-08-31T15:52:46.823844 | 2023-08-30T19:32:03 | 2023-08-30T19:32:03 | 230,450,014 | 7 | 5 | Apache-2.0 | false | 2023-09-05T22:15:20 | 2019-12-27T13:36:27 | 2023-08-20T16:26:35 | 2023-09-05T22:15:19 | 59,612 | 8 | 5 | 5 | Python | false | false | #########################################
# i01_InMoovCustom_start.py
# categories: inmoov2
# more info @: http://myrobotlab.org/service/InMoov
#########################################
import os
ThisFilePart = 'data/InMoov2/InMoovCustom.py'
customFilename="InMoovCustom.py"
def saveCustom(customFilename):
customPath='data/InMoov2/'
customFile = customFilename
customWriter = 0
customWriter = open(customPath+customFile, "w+")
L =[
"###############################################################################\n"
"# InMoovCustom.py\n"
"# categories: inmoov2\n"
"# more info @: http://myrobotlab.org/service/InMoov\n"
"# #############################################################################\n"
"# YOUR INMOOV CUSTOM SCRIPT\n"
"# Here you can add your own commands to play and test with InMoov\n"
"# This python script is located in the directory data/InMoov2/\n"
"# Those commands are safe, you can copy them to your other MRL versions\n"
"# ##############################################################################\n"
"\n"
"\n"
"## These samples would be executed when starting i01:\n"
"\n"
"## Play a neoPixel animation while the robot speaking\n"
"#i01_neoPixel.playAnimation('Flash Random', 255, 255, 255, 1)\n"
"## Talk something\n"
"#i01_mouth.speakBlocking('he is a replicant, or not?')\n"
"## Stop neoPixel\n"
"#i01_neoPixel.stopAnimation()\n"
"## Move the index servo\n"
"#i01_rightHand_index.moveTo(20)\n"
"\n"
"## Another example, this could be executed via aiml:\n"
"def myScripts():\n"
" #execfile('data/InMoov2/myScript1.py')\n"
" execfile('myScript2.py')\n"
"\n"
"## Another example, that could be executed via aiml:\n"
"def myScript2():\n"
" print('I feel good')\n"
"\n"
]
customWriter.writelines(L)
customWriter.close()
def CheckFileExist(File):
if not os.path.isfile(File):
saveCustom(customFilename)
runtime.info("custom file created : data/InMoov2/InMoovCustom.py")
python.execFile('data/InMoov2/InMoovCustom.py')
#execfile('data/InMoov2/InMoovCustom.py')
else:
python.execFile('data/InMoov2/InMoovCustom.py')
#execfile('data/InMoov2/InMoovCustom.py')
def CheckDirectoryExist():
if not os.path.exists("data/InMoov2"):
os.makedirs("data/InMoov2")
CheckDirectoryExist()
CheckFileExist(ThisFilePart)
| UTF-8 | Python | false | false | 2,508 | py | 677 | InMoovCustom_start.py | 266 | 0.559809 | 0.543461 | 0 | 70 | 33.814286 | 88 |
Caoang327/EECS545Project-Efficient-Online-Bandit-Multiclass-learning | 14,224,931,697,763 | cf9d7eab92a64f7584aa864b965417b8648b58bd | 2862f9aa32fb45867d0f8c1ce81583e2958691af | /PNewtron_algorithm/PWNetron_cov_v3.py | d57a4ff538588257c113eaca3c4e2b8d99d89f70 | []
| no_license | https://github.com/Caoang327/EECS545Project-Efficient-Online-Bandit-Multiclass-learning | 3e1738db078740639c3238c47867d947bd6cef87 | 13721b686bb19ae7ea6c7bc18a98bf15b6ce246b | refs/heads/master | 2020-04-10T11:28:11.437694 | 2018-12-14T03:52:48 | 2018-12-14T03:52:48 | 160,994,524 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import scipy.io as sio
X = np.loadtxt('CovtypedataX.dat')
Y = np.loadtxt('CovtypedataY.dat')
def predict_label(W,x):
out = np.dot(W,x)
return np.argmax(out)+1
def compute_P(W,x,alpha=10):
p = alpha*np.dot(W,x)
e_p = np.exp(p - np.max(p))
soft_max = e_p / e_p.sum()
return soft_max
def random_sample(P,p):
P_accu = 0
index = 0
for i in range(P.shape[0]):
P_accu = P_accu + P[i,0]
if P_accu > p:
index = i+1
break
return index
gamma = 2**(-7)
alpha = 10
betta = 0.01
k = 7
d = X.shape[0]
D = 1
correct = 0
t = 0
W = np.zeros([k,d])
W_slid = W
np.random.seed(0)
A_accu = 1/D
bt = 0
counter = 0
accu = np.zeros([X.shape[1],1])
print_fre = 100
for i in range(X.shape[1]):
counter = counter + 1
x = X[:,i].reshape(-1,1)
y = int(Y[i])
pt = compute_P(W_slid, x, alpha)
##pt = compute_P2(W_slid,x)
pt_silde = (1 - gamma) * pt + gamma / k
##if np.random.random() >= gamma:
## W = W_slid
## y_hat = predict_label(W,x)
## else:
## W = np.zeros([k,d])
## roll = np.random.randint(1,k+1)
## y_hat = roll
y_hat = random_sample(pt_silde, np.random.random())
if y_hat == y:
eyt = np.zeros([k, 1])
eyt[y - 1][0] = 1
delta = (1 - pt[y - 1, 0]) / (pt_silde[y - 1, 0]) * np.kron((1 / k - eyt), x)
kt = pt_silde[y - 1, 0]
correct += 1
else:
eyt = np.zeros([k, 1])
eyt[y_hat - 1, 0] = 1
delta = (pt[y_hat - 1, 0] / pt_silde[y_hat - 1, 0]) * np.kron(eyt - 1 / k, x)
kt = 1
delta = delta.reshape(-1, 1)
A_accu = A_accu + kt * betta * (delta ** 2)
W_T = W_slid
W_Slack = W_T.reshape(-1, 1)
bt = bt + (1 - kt * betta * np.dot(delta.reshape(1, -1), W_Slack)) * delta
W_slid = -(1.0 / A_accu) * bt
W_slid = W_slid.reshape([k, -1])
accu[i, 0] = correct * 1.0 / counter
if counter % print_fre == 1:
print(counter)
print(correct * 1.0 / counter)
file_name = 'PWNeutron_accu_cov_g_'+str(gamma)+'.mat'
sio.savemat(file_name,{'accu':accu}) | UTF-8 | Python | false | false | 2,107 | py | 15 | PWNetron_cov_v3.py | 13 | 0.50878 | 0.472235 | 0 | 77 | 26.376623 | 85 |
emna7/holbertonschool-web_back_end | 16,234,976,402,694 | 822e235ec393f5580eaeb380d0315e399d974ac1 | 9404a8593ff2d82133897c9e187523d301df7888 | /0x09-Unittests_and_integration_tests/test_utils.py | 392c7ef62fb9c858277dc360bb7c4c0be4a44612 | []
| no_license | https://github.com/emna7/holbertonschool-web_back_end | ac2bc16e47f464530c4dee23497488c77377977e | 744e6cb3bb67b2caa30f967708243b5474046961 | refs/heads/main | 2023-03-06T17:56:10.699982 | 2021-02-12T21:24:04 | 2021-02-12T21:24:04 | 305,394,170 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
""" Test SUITE Unittest module Task """
import requests
from unittest import mock
from unittest.mock import patch, PropertyMock
import unittest
from parameterized import parameterized
from utils import access_nested_map, get_json, memoize
class TestAccessNestedMap(unittest.TestCase):
""" Class for testing Nested Map function """
@parameterized.expand([
({"a": 1}, ("a",), 1),
({"a": {"b": 2}}, ("a",), {'b': 2}),
({"a": {"b": 2}}, ("a", "b"), 2)
])
def test_access_nested_map(self, map, path, expected_output):
""" Test method return output """
real_output = access_nested_map(map, path)
self.assertEqual(real_output, expected_output)
@parameterized.expand([
({}, ("a",)),
({"a": 1}, ("a", "b"))
])
def test_access_nested_map_exception(self, nested_map, path):
'''
Tests access_nested_map for raised expections.
'''
self.assertRaises(KeyError, access_nested_map, nested_map, path)
class TestGetJson(unittest.TestCase):
'''
get_json tests.
'''
@parameterized.expand([
('http://example.com', {"payload": True}),
('http://holberton.io', {"payload": False})
])
def test_get_json(self, url, expected_result):
'''
Tests if get_json function returns the expected result.
'''
with mock.patch('utils.requests') as mock_request:
mock_request.get.return_value = expected_result
x = mock_request.get(url)
self.assertEqual(expected_result, x)
class TestMemoize(unittest.TestCase):
"""
utils.memoize tests.
"""
def test_memoize(self):
'''
Test memoize.
'''
class TestClass:
''' TestClass for memoize. '''
def a_method(self):
''' Returns 42. '''
return 42
@memoize
def a_property(self):
''' Returns the class a_method . '''
return self.a_method()
with patch.object(TestClass, 'a_method') as am:
am.return_value = 42
tc = TestClass()
self.assertEqual(tc.a_property, am.return_value)
self.assertEqual(tc.a_property, am.return_value)
am.assert_called_once()
| UTF-8 | Python | false | false | 2,348 | py | 61 | test_utils.py | 54 | 0.555792 | 0.54983 | 0 | 78 | 29.102564 | 72 |
keiichi-ando/py-trakaddress | 6,236,292,513,858 | 09c03a8cb449abd95ffd0d2c4bbc2b867f8ff329 | 68fd9acf6dcb916a6ddfdfddef14da4719296c34 | /main.py | da42916c57c5d867c8d6c7128dfffd7ee65f674f | []
| no_license | https://github.com/keiichi-ando/py-trakaddress | f842214747f98c5e7cae2aa5a3d67057545c2016 | 454b1a9a9a20528342bb85ef5083bf40c04e8844 | refs/heads/main | 2023-04-10T02:44:49.092263 | 2021-04-22T14:17:10 | 2021-04-22T14:17:10 | 352,099,236 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import sys
import src.settings
from src.master.address import AddressUtil
from src.master.address import AddressParser
from model.address import Address
if __name__ == "__main__":
args = sys.argv
if (len(args) > 1):
if (args[1] == "getzip"):
_util = AddressUtil()
_util.getzip()
# _util.zip_extract('/home/ando/python/address/src/master/../../data/ken_all.zip')
else:
_parser = AddressParser()
_myad = Address(args[1])
_myad = _parser.parse(_myad)
print(_myad.full, chr(9), _myad.pref, chr(9), _myad.city, chr(9), _myad.town)
| UTF-8 | Python | false | false | 663 | py | 10 | main.py | 7 | 0.573152 | 0.562594 | 0 | 23 | 27.782609 | 94 |
fengqilr/util | 17,549,236,377,528 | e89e5379edf7fb1fd3d76bb59d497afe9b5f335b | ce6c32c0f6af8830b2fcd04ae5adec05dafb4782 | /utility.py | ad6da6a5dc207a02453a0bcaf675acde4c97e16d | []
| no_license | https://github.com/fengqilr/util | 27efdb54d531e3d37bee72610f725f0ee86f1383 | 8e4eca2cd582c90ce77d13bf06a4393eb9ab89b3 | refs/heads/master | 2020-12-06T13:36:58.949121 | 2017-07-31T05:40:33 | 2017-07-31T05:40:33 | 66,831,606 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import copy
from pymongo import MongoClient
from pymongo import MongoReplicaSetClient
from pymongo import ReadPreference
import traceback
import socket
from email.mime.text import MIMEText
import smtplib
import logging
import os
from sklearn import cluster, covariance, manifold, metrics
from sklearn import cross_validation
from sklearn.grid_search import ParameterGrid
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
import datetime
# import xgboost
###### logger module ##########
# logger = logging.getLogger('logger_name')
# logger.setLevel(logging.INFO)
# formatter = logging.Formatter('[%(asctime)s %(levelname)s]: %(message)s')
# fh = logging.FileHandler(os.path.join(os.getcwd(), 'logger_name'))
# ch = logging.StreamHandler()
# fh.setFormatter(formatter)
# ch.setFormatter(formatter)
# logger.addHandler(fh)
# logger.addHandler(ch)
# ##### 连接 mysql 模块
# import MySQLdb
# with MySQLdb.connect(host='127.0.0.1', port=3306, user='root', passwd='lr187703', db='tieba_new_word_model') as conn:
# cur = conn.cursor()
# conn.set_character_set('utf8')
# cur.execute('SET NAMES utf8;')
# cur.execute('SET CHARACTER SET utf8;')
# cur.execute('SET character_set_connection=utf8;')
def affinity_propagation_cluster(sim_matrix, variate_list, preference_value=1):
"""
用affinity_propagation算法聚类并画图
:param sim_matrix:
:return:
"""
grouping, labels = cluster.affinity_propagation(sim_matrix, preference=preference_value*np.median(sim_matrix))
result_df = pd.DataFrame()
result_df['variate'] = variate_list
result_df['cluster'] = list(labels)
result_df['variate'] = result_df['variate'].map(lambda i: "%s, " % i)
result_df = result_df.groupby(['cluster']).sum()
result_df['variate'] = result_df['variate'].map(lambda j: j[:-2])
print "==="*10, u"聚类分组结果输出", "==="*10
for cluster_index, values_data in result_df.iterrows():
print('Cluster %i: %s' % ((cluster_index + 1), values_data.to_dict().get('variate')))
return result_df
def cross_validate_model_param(clf, param, dataset, label):
"""
将cross validation验证模型的结果存入数据库
:param clf: 模型
:param param: 参数
:param dataset: x数据
:param label: y数据
:param k_fold:
:param scoring:
:param table: table name
:return:
"""
scores = cross_validation.cross_val_score(clf, X=dataset, y=label, scoring=param['scoring'], cv=param['cv'])
param.update({"score": scores.mean(), 'updt': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")})
print "%s: %s" % (param['scoring'], scores.mean())
with MongodbUtils(ip="localhost", port=37017, collection="model_result", table=param['table']) as col:
col.insert(param)
def cross_validate_model_param_v2(clf, param, dataset, label):
"""
将cross validation验证模型的结果存入数据库
:param clf: 模型
:param param: 参数
:param dataset: x数据
:param label: y数据
:param k_fold:
:param scoring:
:param table: table name
:return:
"""
# scores = cross_validation.cross_val_score(clf, X=dataset, y=label, scoring=param['scoring'], cv=param['cv'])
rst = []
k_fold = cross_validation.KFold(n=dataset.shape[0], n_folds=param['cv'], shuffle=True)
for train_idx, test_idx in k_fold:
x_train = dataset[train_idx]
y_train = label[train_idx]
x_test = dataset[test_idx]
y_test = label[test_idx]
clf.fit(x_train, y_train)
y_train_predict = clf.predict(x_train)
y_test_predict = clf.predict(x_test)
score_dict = {}
print metrics.classification_report(y_test, y_test_predict)
if "f1" in param['scoring']:
f1_train_score = metrics.f1_score(y_train, y_train_predict)
f1_test_score = metrics.f1_score(y_test, y_test_predict)
score_dict.update({"f1_train_score": f1_train_score, "f1_test_score": f1_test_score})
if "roc" in param['scoring']:
roc_train_score = metrics.roc_auc_score(y_train, y_train_predict)
roc_test_score = metrics.roc_auc_score(y_test, y_test_predict)
score_dict.update({"roc_train_score": roc_train_score, "roc_test_score": roc_test_score})
if "precision" in param['scoring']:
precision_train_score = metrics.precision_score(y_train, y_train_predict)
precision_test_score = metrics.precision_score(y_test, y_test_predict)
score_dict.update({"precision_train_score": precision_train_score, "precision_test_score": precision_test_score})
if "recall" in param['scoring']:
recall_train_score = metrics.recall_score(y_train, y_train_predict)
recall_test_score = metrics.recall_score(y_test, y_test_predict)
score_dict.update({"recall_train_score": recall_train_score, "recall_test_score": recall_test_score})
if "accuracy" in param['scoring']:
recall_train_score = metrics.accuracy_score(y_train, y_train_predict)
recall_test_score = metrics.accuracy_score(y_test, y_test_predict)
score_dict.update({"accuracy_train_score": recall_train_score, "accuracy_test_score": recall_test_score})
rst.append(score_dict)
rst_df = pd.DataFrame(rst)
mean_score = rst_df.mean()
mean_score_dict = mean_score.to_dict()
param.update(mean_score_dict)
# param.update({'score_detailed': rst_df})
param.update({'updt': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")})
print mean_score
with MongodbUtils(ip="localhost", port=37017, collection="model_result", table=param['table']) as col:
col.insert(param)
def grid_search_param_model(model_name, grid_search_param_dict, X, y, data_process_param_dict):
param_grid = list(ParameterGrid(grid_search_param_dict))
if model_name == "tree":
for param in param_grid:
tree_clf = DecisionTreeClassifier(max_depth=param.get("max_depth", None))
param.update({'model': model_name})
print param
param.update(data_process_param_dict)
cross_validate_model_param_v2(tree_clf, param, dataset=X, label= y)
if model_name == "adaboosting":
for param in param_grid:
tree_clf = DecisionTreeClassifier(max_depth=param.get("max_depth", None))
ada_boost_clf = AdaBoostClassifier(base_estimator=tree_clf, n_estimators=param.get("n_estimators", None), learning_rate=param.get("learning_rate", None))
param.update({'model': model_name})
print param
param.update(data_process_param_dict)
cross_validate_model_param_v2(ada_boost_clf, param, dataset=X, label= y)
elif model_name == 'linearsvc':
for param in param_grid:
print "%s" % param
param.update({'model': model_name})
linsvc_clf = LinearSVC(C=param.get("C", None))
if "class_weight" in param.keys():
class_weight = param['class_weight']
class_weight_key = class_weight.keys()
class_weight_key = [unicode(key) for key in class_weight_key]
class_weight_value = class_weight.values()
class_weight = dict(zip(class_weight_key, class_weight_value))
param.update({'class_weight': class_weight})
param.update(data_process_param_dict)
cross_validate_model_param_v2(linsvc_clf, param, dataset=X, label= y)
elif model_name == "random_forest":
for param in param_grid:
print "%s" % param
param.update({'model': model_name})
rf_clf = RandomForestClassifier(n_estimators=param.get("n_estimators", None), max_depth=param.get("max_depth"))
param.update(data_process_param_dict)
cross_validate_model_param_v2(rf_clf, param, dataset=X, label=y)
elif model_name == "svm":
for param in param_grid:
print "%s" % param
param.update({'model': model_name})
svm_clf = SVC(C=param.get("C"), gamma=param.get("gamma"))
param.update(data_process_param_dict)
cross_validate_model_param_v2(svm_clf, param, dataset=X, label=y)
def calculate_cos_sim_matrix(v_arr):
"""
根据向量数组计算cos相似度矩阵
:param v_arr:
:return:
"""
v_module = np.array([[np.sqrt(tieba_v.dot(tieba_v))] for tieba_v in v_arr])
v_normal = np.divide(v_arr, v_module)
forum_similarity = v_normal.dot(v_normal.T)
return forum_similarity
| UTF-8 | Python | false | false | 8,865 | py | 2 | utility.py | 1 | 0.643029 | 0.636833 | 0 | 209 | 40.69378 | 165 |
Param9498/ComputerNetworks | 15,418,932,614,282 | ab9060dca74e526a9ebfd09839f7ca7387f1905d | 612162759bb265976a5644c37160e643304312b2 | /tcp_multichat_client.py | abb1c1ab7d682d8140f386474046983d3595ad97 | []
| no_license | https://github.com/Param9498/ComputerNetworks | 01c9aeac212879f374c067997ffe1f590c165215 | 9b83bfb922c84f43f9467477f32a7de756c4f24a | refs/heads/master | 2021-07-23T14:34:17.051692 | 2017-11-02T03:25:50 | 2017-11-02T03:25:50 | 109,213,014 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import socket
import time
import _thread as thread
import sys
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect((socket.gethostname(), 9999))
nickName = input("Please enter your nickname for this chat room: ")
clientSocket.send(nickName.encode('ascii'))
isWorking = True
def sendingThread():
global isWorking
while True:
test = input()
if test is "":
data = input("Me: ")
message = data
message = message.encode('ascii')
clientSocket.send(message)
if data == "EXIT":
isWorking = False
return
def receivingThread():
global isWorking
while True:
if isWorking is False:
return
data = clientSocket.recv(1024)
message = data.decode('ascii')
print(message)
thread.start_new_thread(sendingThread, ())
thread.start_new_thread(receivingThread, ())
time.sleep(10000) | UTF-8 | Python | false | false | 968 | py | 16 | tcp_multichat_client.py | 14 | 0.625 | 0.61157 | 0 | 40 | 23.225 | 67 |
cash2one/xai | 11,656,541,262,942 | 4990850c191df69155f96629322b3d00f6550c94 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_beginnings.py | 615fb4d50d95df33d675932e4e15aa5565a8eb31 | [
"MIT"
]
| permissive | https://github.com/cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from xai.brain.wordbase.nouns._beginning import _BEGINNING
#calss header
class _BEGINNINGS(_BEGINNING, ):
def __init__(self,):
_BEGINNING.__init__(self)
self.name = "BEGINNINGS"
self.specie = 'nouns'
self.basic = "beginning"
self.jsondata = {}
| UTF-8 | Python | false | false | 259 | py | 37,275 | _beginnings.py | 37,266 | 0.675676 | 0.675676 | 0 | 10 | 24.7 | 58 |
Divakersoni/Roster-Beta- | 4,561,255,277,525 | 2ed28f8d87ea85ae2d7f01f93f49782649e14a5c | f4d2d5dfdfd20197e28abf83dee3c86a465ab695 | /roster/timet/migrations/0006_master_final_status.py | 39f67ab34818b5072db004d8deb2de6ed1e295e5 | []
| no_license | https://github.com/Divakersoni/Roster-Beta- | c5bf6c343749a38a165f80e1d0ecb17ccfb42b86 | cb30e4ec9d1295b98753d7343e1d382c8d840c7f | refs/heads/master | 2021-05-11T07:17:37.907691 | 2018-01-18T17:24:21 | 2018-01-18T17:24:21 | 118,014,886 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-08 05:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timet', '0005_auto_20171106_0311'),
]
operations = [
migrations.AddField(
model_name='master_final',
name='status',
field=models.SmallIntegerField(choices=[(0, 'Inactive'), (1, 'Active')], default=1),
),
]
| UTF-8 | Python | false | false | 499 | py | 42 | 0006_master_final_status.py | 33 | 0.59519 | 0.523046 | 0 | 20 | 23.95 | 96 |
kingspp/rainbow-print | 1,589,137,913,565 | 8b30d013c60b99b014fcc007e768a18b6203622a | d7bf179c1884dda929e56d5c402e59feb976fc06 | /tests/test_simple.py | 57ba4f4914ae5361e9363820a7a07407ed683f20 | [
"MIT"
]
| permissive | https://github.com/kingspp/rainbow-print | 402d905a8dec57b0d42d08df07230a845728e431 | 4f3b9a7c3ad93dadb5da713e437a165292c1f038 | refs/heads/master | 2022-11-10T20:18:05.294849 | 2020-06-27T18:02:03 | 2020-06-27T18:02:03 | 275,147,444 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # the inclusion of the tests module is not meant to offer best practices for
# testing in general, but rather to support the `find_packages` example in
# setup.py that excludes installing the "tests" package
import unittest
import random
from rainbow_print import rlogging
import logging
logging.basicConfig(level=logging.INFO)
logger = rlogging.getLogger(__name__)
class TestSimple(unittest.TestCase):
def generate_random_data(self, string=False):
d = {
"Episode": 1,
"Ep Len": random.randrange(0, 200),
"Actor Loss": round(random.uniform(0, 1), 2),
"Critic Loss": round(random.uniform(0, 1), 2),
"Loss": round(random.uniform(0, 1), 2),
"Gamma": round(random.uniform(0, 1), 4),
"INT Reward": random.randrange(0, 50),
"EXT Reward": random.randrange(0, 50),
"Reward": random.randrange(0, 100),
"Mode": ["Explore", "Exploit"][random.randrange(0, 2)]
}
if string:
str_builder = ''
for k, v in d.items():
str_builder += f"{k}:{v},"
return str_builder
return d
# def test_print_dict(self):
# printr.update_light_palette()
# for i in range(10):
# printr(self.generate_random_data(string=False))
def test_print_str(self):
for i in range(10):
logger.info(self.generate_random_data(string=True), sep=',')
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 1,515 | py | 6 | test_simple.py | 4 | 0.581518 | 0.559736 | 0 | 46 | 31.934783 | 76 |
h1st-ai/contrib | 5,420,248,768,544 | 4bf247bd6621cce48e962530702dd570bab005c5 | b74038ab83908450fd7a481720538520287b96b3 | /h1st_contrib/iot_mgmt/maint_ops/migrations/0017_auto_20180420_0030.py | bf4ba103878714215bf752ef6a7f1508da6272ce | [
"Apache-2.0"
]
| permissive | https://github.com/h1st-ai/contrib | 72a5d8b648c08324bddf65c48b280780bfa277a5 | 0297cf1bec3f9b11b97f575daacfe7fd38873887 | refs/heads/main | 2022-12-02T06:03:38.302211 | 2022-11-08T20:02:31 | 2022-11-08T20:02:31 | 289,650,802 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-20 07:30
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('IoT_MaintOps', '0016_auto_20180420_0014'),
]
operations = [
migrations.AlterModelOptions(
name='alert',
options={'ordering': ('equipment_general_type', 'equipment_unique_type_group', 'equipment_instance', 'risk_score_name', 'threshold')},
),
]
| UTF-8 | Python | false | false | 518 | py | 202 | 0017_auto_20180420_0030.py | 196 | 0.627413 | 0.561776 | 0 | 19 | 26.263158 | 146 |
nsoranzo/pybel | 15,418,932,621,644 | fad1c1801db5bafa548a32a25ceb791b51633b4f | 5e5a8270f07ac3ca2017b2c4c0fdc903bb4fc25e | /tests/test_import.py | 68c67a9b93604b83a1ebc8b542c117cef38a54fc | [
"Apache-2.0"
]
| permissive | https://github.com/nsoranzo/pybel | 37f2553103ba721925d49a7fafbd1b0b3177a936 | 3663d24614124509871043d9d411ed400ddba385 | refs/heads/master | 2021-01-18T11:33:28.069016 | 2017-03-08T00:00:32 | 2017-03-08T00:00:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import logging
import tempfile
import unittest
from pathlib import Path
import pybel
from pybel import BELGraph
from pybel import to_bytes, from_bytes, to_graphml
from pybel.constants import GENE, CITATION, ANNOTATIONS, EVIDENCE
from pybel.io import to_json_dict, from_json_dict
from pybel.parser import BelParser
from pybel.parser.parse_exceptions import *
from tests.constants import BelReconstitutionMixin, test_bel_simple, TestTokenParserBase, SET_CITATION_TEST, \
test_citation_dict, test_set_evidence, mock_bel_resources, test_bel_thorough, test_bel_slushy, test_evidence_text
logging.getLogger('requests').setLevel(logging.WARNING)
class TestThoroughIo(BelReconstitutionMixin):
@classmethod
def setUpClass(cls):
@mock_bel_resources
def help_build_graph(mock):
graph = pybel.from_path(test_bel_thorough, complete_origin=False, allow_nested=True)
return graph
cls.graph = help_build_graph()
def test_path(self):
self.bel_thorough_reconstituted(self.graph)
def test_bytes(self):
graph_bytes = to_bytes(self.graph)
graph = from_bytes(graph_bytes)
self.bel_thorough_reconstituted(graph)
def test_json(self):
graph_json = to_json_dict(self.graph)
graph = from_json_dict(graph_json)
self.bel_thorough_reconstituted(graph)
def test_graphml(self):
handle, path = tempfile.mkstemp()
with open(path, 'wb') as f:
to_graphml(self.graph, f)
class TestSlushyIo(BelReconstitutionMixin):
@classmethod
def setUpClass(cls):
@mock_bel_resources
def help_build_graph(mock):
graph = pybel.from_path(test_bel_slushy, complete_origin=True)
return graph
cls.graph = help_build_graph()
def test_slushy(self):
self.bel_slushy_reconstituted(self.graph)
def test_bytes(self):
graph_bytes = to_bytes(self.graph)
graph = from_bytes(graph_bytes)
self.bel_slushy_reconstituted(graph)
def test_json(self):
graph_json = to_json_dict(self.graph)
graph = from_json_dict(graph_json)
self.bel_slushy_reconstituted(graph)
def test_graphml(self):
handle, path = tempfile.mkstemp()
with open(path, 'wb') as f:
to_graphml(self.graph, f)
def test_bytes_io_slushy(self):
g_bytes = pybel.to_bytes(self.graph)
pybel.from_bytes(g_bytes)
class TestImport(BelReconstitutionMixin, unittest.TestCase):
@mock_bel_resources
def test_from_fileUrl(self, mock_get):
g = pybel.from_url(Path(test_bel_simple).as_uri())
self.bel_simple_reconstituted(g)
class TestRegex(unittest.TestCase):
def setUp(self):
self.graph = BELGraph()
self.parser = BelParser(self.graph, namespace_dicts={}, namespace_expressions={'dbSNP': 'rs[0-9]*'})
def test_match(self):
lines = [
SET_CITATION_TEST,
test_set_evidence,
'g(dbSNP:rs10234) -- g(dbSNP:rs10235)'
]
self.parser.parse_lines(lines)
self.assertIn((GENE, 'dbSNP', 'rs10234'), self.parser.graph)
self.assertIn((GENE, 'dbSNP', 'rs10235'), self.parser.graph)
def test_no_match(self):
lines = [
SET_CITATION_TEST,
test_set_evidence,
'g(dbSNP:10234) -- g(dbSNP:rr10235)'
]
with self.assertRaises(MissingNamespaceRegexWarning):
self.parser.parse_lines(lines)
class TestFull(TestTokenParserBase):
def setUp(self):
self.namespaces = {
'TESTNS': {
"1": "GRP",
"2": "GRP"
}
}
self.annotations = {
'TestAnnotation1': {'A', 'B', 'C'},
'TestAnnotation2': {'X', 'Y', 'Z'},
'TestAnnotation3': {'D', 'E', 'F'}
}
self.graph = BELGraph()
self.parser = BelParser(self.graph, namespace_dicts=self.namespaces, annotation_dicts=self.annotations)
def test_no_add_duplicates(self):
s = 'r(TESTNS:1) -> r(TESTNS:2)'
statements = [
SET_CITATION_TEST,
test_set_evidence,
s
]
self.parser.complete_origin = True
self.parser.parse_lines(statements)
self.assertEqual(4, self.parser.graph.number_of_nodes())
self.parser.parseString(s)
self.assertEqual(4, self.parser.graph.number_of_nodes())
def test_semantic_failure(self):
statement = "bp(TESTNS:1) -- p(TESTNS:2)"
with self.assertRaises(InvalidFunctionSemantic):
self.parser.parseString(statement)
def test_lenient_semantic_no_failure(self):
statements = [
SET_CITATION_TEST,
test_set_evidence,
"bp(ABASD) -- p(ABASF)"
]
self.graph = BELGraph()
self.parser = BelParser(self.graph, namespace_dicts=self.namespaces, allow_naked_names=True)
self.parser.parse_lines(statements)
def test_missing_citation(self):
statements = [
test_set_evidence,
'SET TestAnnotation1 = "A"',
'SET TestAnnotation2 = "X"',
'g(TESTNS:1) -> g(TESTNS:2)'
]
with self.assertRaises(MissingCitationException):
self.parser.parse_lines(statements)
def test_annotations(self):
statements = [
SET_CITATION_TEST,
test_set_evidence,
'SET TestAnnotation1 = "A"',
'SET TestAnnotation2 = "X"',
'g(TESTNS:1) -> g(TESTNS:2)'
]
self.parser.parse_lines(statements)
test_node_1 = GENE, 'TESTNS', '1'
test_node_2 = GENE, 'TESTNS', '2'
self.assertEqual(2, self.parser.graph.number_of_nodes())
self.assertHasNode(test_node_1)
self.assertHasNode(test_node_2)
self.assertEqual(1, self.parser.graph.number_of_edges())
kwargs = {
ANNOTATIONS: {
'TestAnnotation1': 'A',
'TestAnnotation2': 'X',
},
EVIDENCE: test_evidence_text,
CITATION: test_citation_dict
}
self.assertHasEdge(test_node_1, test_node_2, **kwargs)
def test_annotations_withList(self):
statements = [
SET_CITATION_TEST,
test_set_evidence,
'SET TestAnnotation1 = {"A","B"}',
'SET TestAnnotation2 = "X"',
'g(TESTNS:1) -> g(TESTNS:2)'
]
self.parser.parse_lines(statements)
test_node_1 = GENE, 'TESTNS', '1'
test_node_2 = GENE, 'TESTNS', '2'
self.assertEqual(2, self.parser.graph.number_of_nodes())
self.assertHasNode(test_node_1)
self.assertHasNode(test_node_2)
self.assertEqual(2, self.parser.graph.number_of_edges())
kwargs = {ANNOTATIONS: {'TestAnnotation1': 'A', 'TestAnnotation2': 'X'}, CITATION: test_citation_dict}
self.assertHasEdge(test_node_1, test_node_2, **kwargs)
kwargs = {ANNOTATIONS: {'TestAnnotation1': 'B', 'TestAnnotation2': 'X'}, CITATION: test_citation_dict}
self.assertHasEdge(test_node_1, test_node_2, **kwargs)
def test_annotations_withMultiList(self):
statements = [
SET_CITATION_TEST,
test_set_evidence,
'SET TestAnnotation1 = {"A","B"}',
'SET TestAnnotation2 = "X"',
'SET TestAnnotation3 = {"D","E"}',
'g(TESTNS:1) -> g(TESTNS:2)'
]
self.parser.parse_lines(statements)
test_node_1 = GENE, 'TESTNS', '1'
test_node_2 = GENE, 'TESTNS', '2'
self.assertEqual(2, self.parser.graph.number_of_nodes())
self.assertHasNode(test_node_1)
self.assertHasNode(test_node_2)
self.assertEqual(4, self.parser.graph.number_of_edges())
kwargs = {
ANNOTATIONS: {
'TestAnnotation1': 'A',
'TestAnnotation2': 'X',
'TestAnnotation3': 'D'
},
CITATION: test_citation_dict
}
self.assertHasEdge(test_node_1, test_node_2, **kwargs)
kwargs = {
ANNOTATIONS: {
'TestAnnotation1': 'A',
'TestAnnotation2': 'X',
'TestAnnotation3': 'E'
},
CITATION: test_citation_dict
}
self.assertHasEdge(test_node_1, test_node_2, **kwargs)
kwargs = {
ANNOTATIONS: {
'TestAnnotation1': 'B',
'TestAnnotation2': 'X',
'TestAnnotation3': 'D'
},
CITATION: test_citation_dict
}
self.assertHasEdge(test_node_1, test_node_2, **kwargs)
kwargs = {
ANNOTATIONS: {
'TestAnnotation1': 'B',
'TestAnnotation2': 'X',
'TestAnnotation3': 'E'
},
CITATION: test_citation_dict
}
self.assertHasEdge(test_node_1, test_node_2, **kwargs)
| UTF-8 | Python | false | false | 9,055 | py | 52 | test_import.py | 41 | 0.574489 | 0.561568 | 0 | 289 | 30.33218 | 117 |
Jobenland/MFC-WebApp | 4,535,485,496,317 | b594508e45aa56d6111732a27095c25c5f8ea760 | 887e7267fc8c8689bd552fb92b12646a341e0e3f | /config.py | 94027e1a89dd3ee3606bcbeb658ec7cfb123fd1c | [
"MIT"
]
| permissive | https://github.com/Jobenland/MFC-WebApp | eb583914436d9ade29a14381edab5e6930af5e08 | aa5817ca32d8a797400971f4fcd40344cd6509ae | refs/heads/master | 2022-12-12T10:39:16.363839 | 2020-02-03T20:37:30 | 2020-02-03T20:37:30 | 235,836,760 | 0 | 0 | MIT | false | 2022-12-07T23:54:10 | 2020-01-23T16:30:37 | 2020-02-03T20:37:32 | 2022-12-07T23:54:09 | 235 | 0 | 0 | 3 | Python | false | false | import os
basedir = os.path.abspath(os.path.dirname(__file__))
#can leave blank if opt out of secret key option
class Config(object):
DEBUG = False
TESTING = False
SECRET_KEY = '<insert-your-secret-key-here>'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///db.sqlite'
SHOW_CAPTCHA = False
RECAPTCHA_SITE_KEY = '<insert-your-site-key-here>'
RECAPTCHA_SECRET_KEY = '<insert-your-secret-key-here>'
class DevelopmentConfig(Config):
DEBUG = False
class ProductionConfig(Config):
pass
class TestingConfig(Config):
TESTING = False
config = {
'development': DevelopmentConfig,
'production': ProductionConfig,
'testing': TestingConfig
}
| UTF-8 | Python | false | false | 755 | py | 18 | config.py | 9 | 0.663576 | 0.663576 | 0 | 33 | 20.878788 | 58 |
deckardmehdy/coursera | 14,259,291,440,923 | ac43db1f5d73c01d48fa5727ef291965f3b9b25c | 56dc57aba9f37e496d10800e628685ee7a52c4ab | /Course4/Week2/invert_BWT.py | 740a1a628df1c2e52f27f92ba517e8f4f9d3ab23 | []
| no_license | https://github.com/deckardmehdy/coursera | 618137d898e1caa2b9682ad9abac01e756cbd0ff | d3350991445661520495b5f61e16fd553e1d6ed0 | refs/heads/master | 2020-06-24T08:09:28.307117 | 2019-07-25T23:02:00 | 2019-07-25T23:02:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Runs using Python 3
def createFO(count):
letters, start = ["A","C","G","T"], 1
firstOcc = {"A": 0, "C": 0, "G": 0, "T": 0}
for i in range(4):
if count[letters[i]] != 0:
firstOcc[letters[i]] = start
start += count[letters[i]]
return firstOcc
def invert_BWT(lastCol):
# Make a dict for storing position and count of letters
count = {"A": 0, "C": 0, "G": 0, "T": 0}
lastColOccs = []
for i in range(len(lastCol)):
if lastCol[i] != "$":
lastColOccs.append(count[lastCol[i]])
count[lastCol[i]] += 1
else:
lastColOccs.append(0)
# Create dict of first occurances
firstOcc = createFO(count)
# Reconstruct string:
string = "$"
pointer = 0
while len(string) < len(BWT):
# Get the corresonding letter in last column
letter = lastCol[pointer]
occurance = lastColOccs[pointer]
# Find the location of the letter in the first column
pointer = firstOcc[letter] + occurance
# Add the letter to the string
string += letter
string = string[::-1]
return string
################################
####### START OF PROGRAM #######
################################
BWT = str(input())
print(invert_BWT(BWT))
| UTF-8 | Python | false | false | 1,304 | py | 52 | invert_BWT.py | 51 | 0.526074 | 0.513804 | 0 | 48 | 26.166667 | 61 |
nathan5280/Snippets | 8,916,352,116,622 | 77a2f16780a3f6b5911afd296cc641f91d230ba8 | ce4ada9e2587944076a63ea5dd6018f79476df66 | /python36/sqlalchemy_/orm_tutorial/section_2_multi_table/user.py | 66bdb78eb05e6900a647c8ed7f03a3bb7b5d9a0c | []
| no_license | https://github.com/nathan5280/Snippets | 8023385b57aac4c77509fdcde6889ea7cb9a576b | 8f763cf05fa0460a8add4701603b5819134dea23 | refs/heads/master | 2022-02-16T20:46:16.733042 | 2019-01-06T22:53:13 | 2019-01-06T22:53:13 | 107,143,976 | 1 | 1 | null | false | 2022-01-21T19:42:26 | 2017-10-16T15:07:54 | 2019-01-06T22:53:15 | 2022-01-21T19:42:25 | 501 | 1 | 1 | 13 | Python | false | false | from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from .base_model import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(20))
full_name = Column(String(50))
password = Column(String(12))
# No cascading delete of addresses when their user is deleted.
# addresses = relationship("Address", back_populates="user")
# Cascading delete of addresses when user is deleted.
addresses = relationship("Address", back_populates="user", cascade="all, delete, delete-orphan")
equality_keys = ["name", "full_name", "password"]
def __init__(self, *, name: str, full_name: str, password: str):
self.name = name
self.full_name = full_name
self.password = password
def __repr__(self):
return f"User: id={self.id}, name={self.name}, full_name='{self.full_name}', password={self.password}"
def __eq__(self, other):
attr_equal = all([self.__dict__[k] == other.__dict__[k] for k in User.equality_keys])
return attr_equal
| UTF-8 | Python | false | false | 1,106 | py | 249 | user.py | 215 | 0.64557 | 0.640145 | 0 | 34 | 31.529412 | 110 |
Ivancion/Temporary-Repository-with-Test-Job | 9,603,546,899,288 | 163412301b302d191f0fe1a04ad15e7e59066dc2 | 78847928bd8b1a56e371fbb88afc9cf015dc54db | /pokemon/services.py | 6d8556d01b4144876b93aaa9e4e8a7afa85fefb3 | []
| no_license | https://github.com/Ivancion/Temporary-Repository-with-Test-Job | 7ff93a5bf1ed45737e50ca3dee32a4820b9740df | 2e065575faeb11dd71620e0a28e0ee9d8f7e764d | refs/heads/main | 2023-06-27T04:09:44.403834 | 2021-07-24T17:42:51 | 2021-07-24T17:42:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from io import BytesIO
import requests
import json
from .models import Pokemon
from django.core import files
def fill_db(s, num):
for x in range(s, num):
BASE_URL = 'http://pokeapi.co'
def query(resource_url):
url = f"{BASE_URL}{resource_url}{x}/"
response = requests.get(url)
if response.status_code == 200:
return json.loads(response.text)
return None
pokemon = query('/api/v2/pokemon/')
pokemon_species = query(f"/api/v2/pokemon-species/")
image_url = pokemon['sprites']['other']["official-artwork"]['front_default']
resp = requests.get(image_url)
fp = BytesIO()
fp.write(resp.content)
file_name = image_url.split("/")[-1]
Pokemon(name= pokemon['name'],
id=pokemon['id'],
weight= pokemon['weight'],
height= pokemon['height'],
base_happiness= pokemon_species["base_happiness"],
capture_rate= pokemon_species["capture_rate"],
has_gender_differences= pokemon_species["has_gender_differences"],
is_baby= pokemon_species["is_baby"],
is_legendary= pokemon_species["is_legendary"],
is_mythical= pokemon_species["is_mythical"],
is_playing= False,
).image.save(file_name, files.File(fp))
def get_detail_pokemon(name_pk):
BASE_URL = 'http://pokeapi.co'
def query(resource_url):
url = f"{BASE_URL}{resource_url}{name_pk}/"
response = requests.get(url)
if response.status_code == 200:
return json.loads(response.text)
return None
pokemon = query('/api/v2/pokemon/')
pokemon_species = query(f"/api/v2/pokemon-species/")
return {"id":pokemon['id'],
"name": pokemon['name'],
"weight": pokemon['weight'],
"height": pokemon['height'],
"image": pokemon['sprites']['other']["official-artwork"]['front_default'],
"base_happiness": ["base_happiness"],
"capture_rate": pokemon_species["capture_rate"],
"has_gender_differences": pokemon_species["has_gender_differences"],
"is_baby": pokemon_species["is_baby"],
"is_legendary": pokemon_species["is_legendary"],
"is_mythical": pokemon_species["is_mythical"],
}
| UTF-8 | Python | false | false | 2,431 | py | 21 | services.py | 12 | 0.559852 | 0.555327 | 0 | 76 | 30.973684 | 86 |
DaniilSmirnov/JunctionAPI | 6,579,889,911,031 | 34d125da93a0f7024af1ce157f14fcdd6ac81b1e | 07837422ef3c6a835d006219bb10e7a981e3d847 | /app.py | 37bc2ccacaa1c3f4a6c7440b23d80d818ce59469 | []
| no_license | https://github.com/DaniilSmirnov/JunctionAPI | 47905e2b61a7a0511cf5d097e7ac8dc1b00eb98f | 20b24aedc8eb9db7448d986d7937a0f8540498c3 | refs/heads/master | 2020-09-11T06:26:20.244523 | 2019-11-17T06:16:19 | 2019-11-17T06:16:19 | 221,970,857 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask
from flask_restful import Resource, Api, reqparse
from flask import request
import mysql.connector
from flask_cors import CORS
import json
app = Flask(__name__)
app.config['CORS_HEADERS'] = 'Access-Control-Allow-Origin: *'
cors = CORS(app)
api = Api(app)
cnx = mysql.connector.connect(user='root', password='i130813',
host='127.0.0.1',
database='junction')
def search(query):
import csv
with open('search_index.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile)
response = []
try:
for row in spamreader:
try:
id = int(row[0])
except ValueError:
continue
item = row[1]
if isinstance(query, list):
query = ' '.join(query)
if query.find('iphone') != -1 and query.find(' ') != -1:
query = query.split(" ")
device = str(query[0])
model = str(query[1])
if item.find(device) != -1 and item.find(model) != -1:
response.append(id)
else:
if item.find(query) != -1:
response.append(id)
except UnicodeDecodeError:
return response
class GetCelebrations(Resource):
def get(self):
cursor = cnx.cursor()
query = "select name from celebrations where curdate() >= start and curdate() <= finish;"
cursor.execute(query)
response = []
for item in cursor:
for value in item:
response.append(value)
return response
class GetUserCategories(Resource):
def get(self):
cursor = cnx.cursor()
parser = reqparse.RequestParser()
parser.add_argument('user_id', type=int)
args = parser.parse_args()
user_id = args['user_id']
query = "select idcategories from categories where iduser = %s;"
data = (user_id, )
cursor.execute(query, data)
responce = []
for item in cursor:
for value in item:
responce.append(str(value))
cursor.close()
return responce
class GetWishlists(Resource):
def get(self):
try:
cursor = cnx.cursor()
parser = reqparse.RequestParser()
parser.add_argument('user_id', type=int)
parser.add_argument('name', type=str)
args = parser.parse_args()
user_id = args['user_id']
name = args['name']
# Да простят меня за этот дикий костыль. Аминь.
if name == "common":
# Меня заставили...
query = "select idwishlist, list_name from wishlist where iduser = %s and list_name = 'common';"
data = (user_id,)
cursor.execute(query, data)
wishlist = {}
responce = []
for item in cursor:
i = 0
for value in item:
if i == 0:
wishlist.update({'id': value})
id = value
if i == 1:
wishlist.update({'name': value})
i += 1
if i == 2:
products = []
query = "select idproduct from product where idwishlist = %s;"
data = (id,)
try:
cursor2 = cnx.cursor()
cursor2.execute(query, data)
except mysql.connector.errors.InternalError:
return {'status': 'no wishlists'}
for item2 in cursor2:
for value2 in item2:
products.append(value2)
wishlist.update({'products': products})
i += 1
responce.append(wishlist)
cursor.close()
else:
query = "select idwishlist, list_name from wishlist where iduser = %s;"
data = (user_id, )
cursor.execute(query, data)
wishlist = {}
responce = []
for item in cursor:
i = 0
for value in item:
if i == 0:
wishlist.update({'id': value})
id = value
if i == 1:
wishlist.update({'name': value})
i += 1
if i == 2:
products = []
query = "select idproduct from product where idwishlist = %s;"
data = (id, )
try:
cursor2 = cnx.cursor()
cursor2.execute(query, data)
except mysql.connector.errors.InternalError:
return {'status': 'no wishlists'}
for item2 in cursor2:
for value2 in item2:
products.append(value2)
wishlist.update({'products': products})
i += 1
responce.append(wishlist)
cursor.close()
if isinstance(responce, list):
if len(responce) == 0:
return {'status': 'no wishlists'}
return responce
except BaseException as e:
print(e)
return str(e)
#cursor.close()
class AddWishlist(Resource):
def post(self):
cursor = cnx.cursor()
try:
parser = reqparse.RequestParser()
parser.add_argument('user_id', type=int)
parser.add_argument('name', type=str)
args = parser.parse_args()
user_id = args['user_id']
name = args['name']
query = "insert into wishlist values (default, %s, %s);"
data = (user_id, name)
cursor.execute(query, data)
cnx.commit()
cursor.close()
return {'status': 'success'}
except BaseException as e:
cursor.close()
print(e)
return {'status': str(e)}
class AddProduct(Resource):
def post(self):
cursor = cnx.cursor()
try:
parser = reqparse.RequestParser()
parser.add_argument('wishlist_id', type=int)
parser.add_argument('product_id', type=int)
args = parser.parse_args()
product_id = args['wishlist_id']
wishlist_id = args['product_id']
query = "insert into product values (%s, %s, default);"
data = (wishlist_id, product_id)
cursor.execute(query, data)
cnx.commit()
cursor.close()
return {'status': 'success'}
except BaseException as e:
cursor.close()
print(e)
return {'status': str(e)}
class AssignCategory(Resource):
def post(self):
cursor = cnx.cursor()
try:
parser = reqparse.RequestParser()
parser.add_argument('user_id', type=int)
parser.add_argument('category_id', type=int)
args = parser.parse_args()
user_id = args['user_id']
category_id = args['category_id']
query = "insert into categories values (%s, %s, default);"
data = (category_id, user_id)
cursor.execute(query, data)
cnx.commit()
cursor.close()
return {'status': 'success'}
except BaseException as e:
cursor.close()
return {'status': str(e)}
class MoveProduct(Resource):
def post(self):
cursor = cnx.cursor()
try:
parser = reqparse.RequestParser()
parser.add_argument('from_id', type=int)
parser.add_argument('to_id', type=int)
parser.add_argument('product_id', type=int)
args = parser.parse_args()
from_id = args['from_id']
to_id = args['to_id']
product_id = args['product_id']
query = "update product set idwishlist = %s where idwishlist = %s and idproduct = %s;"
data = (to_id, from_id, product_id)
cursor.execute(query, data)
cnx.commit()
cursor.close()
return {'status': 'success'}
except BaseException as e:
cursor.close()
return {'status': str(e)}
class WillBePayed(Resource):
def post(self):
cursor = cnx.cursor()
try:
parser = reqparse.RequestParser()
parser.add_argument('wishlist_id', type=int)
parser.add_argument('choose', type=str)
parser.add_argument('product_id', type=int)
args = parser.parse_args()
wishlist_id = args['wishlist_id']
choose = args['choose']
product_id = args['product_id']
query = "update product set willbe = %s where idwishlist = %s and idproduct = %s;"
data = (choose, wishlist_id, product_id)
cursor.execute(query, data)
cnx.commit()
cursor.close()
return {'status': 'success'}
except BaseException as e:
cursor.close()
return {'status': str(e)}
class CheckActuality(Resource):
def post(self):
cursor = cnx.cursor()
try:
parser = reqparse.RequestParser()
parser.add_argument('user_id', type=int)
parser.add_argument('category_id', type=int)
parser.add_argument('action', type=bool)
args = parser.parse_args()
user_id = args['user_id']
category_id = args['category_id']
action = args['action']
if not action:
query = "update categories set dislikes = dislikes + 1 where iduser = %s and idcategory = %s;"
data = (user_id, category_id)
cursor.execute(query, data)
cnx.commit()
query = "select dislikes from categories where iduser = %s and idcategory = %s;"
data = (user_id, category_id)
cursor.execute(query, data)
for item in cursor:
for value in item:
if int(value) >= 20:
cursor2 = cnx.cursor()
query = "delete idcategories from categories where idcategories = %s and iduser = %s;"
data = (category_id, user_id)
cursor2.execute(query, data)
cnx.commit()
cursor.close()
cursor2.close()
return {'status': 'сategory removed'}
if action:
query = "update categories set dislikes = dislikes - 1 where iduser = %s and idcategory = %s;"
data = (user_id, category_id)
cursor.execute(query, data)
cnx.commit()
cursor.close()
return {'status': 'success'}
except BaseException as e:
cursor.close()
return {'status': str(e)}
class GetRecommendations(Resource):
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('names', type=str)
parser.add_argument('system', type=str)
parser.add_argument('screen', type=str)
args = parser.parse_args()
names = args['names']
system = args['system']
screen = args['screen']
responce = []
names = names.split(',')
for name in names:
responce += (search(name))
if system == 'ios':
screen = screen.split("x")
height = screen[0]
width = screen[1]
responce += (search('lightning'))
responce += (search('airpods'))
if height == "568" and width == "320":
query = ['iphone 5', 'iphone 5s', 'iphone se']
for item in query:
responce += (search(item))
else:
responce += (search("iphone"))
if height == "667" and width == "375":
query = ['iphone 6', 'iphone 7', 'iphone 8']
for item in query:
responce += (search(item))
else:
responce += (search("iphone"))
if height == "812" and width == "375":
query = ['iphone X', 'iphone XS', 'iphone XR']
for item in query:
responce += (search(item))
else:
responce += (search("iphone"))
else:
responce += (search('micro usb'))
responce += (search('type c'))
responce += (search(system))
cursor = cnx.cursor()
query = "select name from celebrations where curdate() >= start and curdate() <= finish;"
cursor.execute(query)
for item in cursor:
for value in item:
responce += (search(value))
responce = list(set(responce))
responce = str(responce)[1:len(str(responce))-1]
responce = responce.split(" ")
response = ""
for item in responce:
response += str(item)
return {'items': response}
except BaseException as e:
return str(e)
api.add_resource(GetCelebrations, '/GetCelebrations')
api.add_resource(GetWishlists, '/GetWishlists')
api.add_resource(AddWishlist, '/AddWishlist')
api.add_resource(AddProduct, '/AddProduct')
api.add_resource(AssignCategory, '/AssignCategory')
api.add_resource(MoveProduct, '/MoveProduct')
api.add_resource(WillBePayed, '/WillBePayed')
api.add_resource(CheckActuality, '/CheckActuality')
api.add_resource(GetRecommendations, '/GetRecommendations')
if __name__ == '__main__':
app.run(debug=True)
# TODO: Категории на основе подписок пользователей
# TODO: Сделать так чтобы товары не повторялись
# TODO: Платежная система
# TODO: Проверка остались ли у пользователя категории
# TODO: Больше данных в рекомендации
# TODO: Пуши пользователю о праздниках, день рождениях друзей
# TODO: Похожие товары по вишлисту | UTF-8 | Python | false | false | 15,336 | py | 2 | app.py | 2 | 0.475969 | 0.470526 | 0 | 449 | 32.552339 | 114 |
Ammarpad/wikicode | 6,322,191,871,147 | d51805489757952cfb7280812c375d7f68d1fb5a | 58d55995512804c76b7b4b7250d48b8f370bb13d | /wikidata_bad_p373_value2.py | 69aee1a25b413ccf9cd250485c9a2139d07f0bd3 | []
| no_license | https://github.com/Ammarpad/wikicode | 105e46214c891f5f8e274c78fdb45ba033d5dfbb | 4460af096a1d68640c698c90c5cd329aec096675 | refs/heads/master | 2023-05-02T21:58:17.499426 | 2021-04-30T09:23:48 | 2021-04-30T09:23:48 | 370,579,994 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Remove P373 values
# Mike Peel 05-Feb-2020 v1 - start
import pywikibot
import numpy as np
from pywikibot import pagegenerators
from pibot_functions import *
wikidata_site = pywikibot.Site("wikidata", "wikidata")
repo = wikidata_site.data_repository()
# This query no longer works per T274982
exit()
query = "SELECT DISTINCT ?item ?itemLabel WHERE {{ ?statement wikibase:hasViolationForConstraint wds:P373-B6CB2058-B6B7-4E4D-98D3-ED2C4F3D7184 . ?item ?p ?statement . FILTER( ?item NOT IN ( wd:Q4115189, wd:Q13406268, wd:Q15397819 ) ) . }}"
print(query)
generator = pagegenerators.WikidataSPARQLPageGenerator(query, site=wikidata_site)
for page in generator:
# Get the target item
print('\n\n')
try:
item_dict = page.get()
except:
continue
qid = page.title()
print("\nhttp://www.wikidata.org/wiki/" + qid)
try:
sitelink = get_sitelink_title(item_dict['sitelinks']['commonswiki'])
print('http://commons.wikimedia.org/wiki/'+sitelink)
except:
print('No sitelink')
continue
try:
p373 = item_dict['claims']['P373']
for clm in p373:
val = clm.getTarget()
p373cat = u"Category:" + val
if p373cat != sitelink:
print('Remove P373?')
print(' http://www.wikidata.org/wiki/'+qid)
print('http://commons.wikimedia.org/wiki/' + str(p373cat))
# input('OK?')
savemessage = "Remove P373 value that doesn't match the sitelink"
# print(savemessage)
page.removeClaims(clm,summary=savemessage)
except:
continue
# EOF | UTF-8 | Python | false | false | 1,730 | py | 129 | wikidata_bad_p373_value2.py | 127 | 0.613295 | 0.563006 | 0 | 56 | 29.910714 | 251 |
edrhsmith/activiti | 9,371,618,659,878 | c8a12e27b21536a37eafd476232a0093ae3b3e54 | 2ce29104beea357b5a12dc4c2517aa7b32de2716 | /strip_power.py | 9bd1489f620156f1a1346923d43817f9d3e12948 | []
| no_license | https://github.com/edrhsmith/activiti | 716538b3bb939c760e0cb46b187c836f64a1ebb9 | 59a4573db139b9eb6c3a4e4ab934197f72891812 | refs/heads/main | 2023-04-24T14:25:58.365662 | 2021-05-16T09:29:56 | 2021-05-16T09:29:56 | 363,641,931 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
POWER_TAGS = {'gpx': 'power', 'tcx': 'ns3:Watts'}
def main(args):
extension = get_extension(args.input_file)
power_tag_name = get_power_tag(extension)
stripped_lines = []
with open(args.input_file) as f:
for n, line in enumerate(f):
stripped = line.strip()
if stripped.startswith(get_start_tag(power_tag_name)) and stripped.endswith(get_end_tag(power_tag_name)):
continue
stripped_lines.append(line)
with open("new_" + args.input_file, "wt") as g:
for line in stripped_lines:
g.write(line)
def get_start_tag(tag_name):
return '<' + tag_name + '>'
def get_end_tag(tag_name):
return '</' + tag_name + '>'
def get_extension(file_name):
return file_name.split('.')[-1]
def get_power_tag(extension):
return POWER_TAGS[extension]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--input-file", type=str, required=True)
args = parser.parse_args()
if not get_extension(args.input_file) in POWER_TAGS:
raise ValueError("input file must be gpx")
return args
if __name__ == "__main__":
main(parse_args())
| UTF-8 | Python | false | false | 1,200 | py | 2 | strip_power.py | 1 | 0.6125 | 0.610833 | 0 | 49 | 23.489796 | 117 |
mgarkusha/mgarkusha.github.io | 1,752,346,669,614 | 0865e86a6dd040ba59372e596ff400df35880de5 | a9694270a35de24640fad23270d6df173c996eac | /corp/smena/forms.py | 637c5e1ffc88c70faa68a1c249226a9063a17d0e | []
| no_license | https://github.com/mgarkusha/mgarkusha.github.io | eb81a8c381bafebe41c78eae4e5c1a606476a07d | c9d56155f41dc854b94bdc85bbb68341ae8308d8 | refs/heads/master | 2018-11-11T23:16:30.414188 | 2018-10-16T00:34:40 | 2018-10-16T00:34:40 | 105,914,547 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from .models import Smena, Run
class SmenaForm(forms.ModelForm):
class Meta:
model = Smena
fields = ()
class RunForm(forms.ModelForm):
class Meta:
model = Run
fields = ()
| UTF-8 | Python | false | false | 240 | py | 74 | forms.py | 36 | 0.6125 | 0.6125 | 0 | 14 | 16.142857 | 33 |
spatial-image/multiscale-spatial-image | 11,957,188,992,321 | c44a7efba6f3b432ef673fb5b04d61dc1ab5085b | 9319ca57fa136b9fdc2390329ce6b2e2f01039a4 | /multiscale_spatial_image/__init__.py | be793ee6c9e2dea230b51ebfb081378e893e9bc1 | [
"Apache-2.0"
]
| permissive | https://github.com/spatial-image/multiscale-spatial-image | b7aafb4886d275d59fe6bbd397e771a0d5dda231 | efc0ddc167cd2a14ceac4898e4edf08e7789c368 | refs/heads/main | 2023-05-23T19:25:59.521941 | 2023-02-09T04:06:35 | 2023-02-09T04:06:35 | 379,678,181 | 19 | 3 | Apache-2.0 | false | 2023-02-09T04:01:16 | 2021-06-23T17:17:08 | 2023-01-28T22:14:13 | 2023-02-09T04:01:15 | 1,658 | 20 | 5 | 16 | Python | false | false | """multiscale-spatial-image
Generate a multiscale spatial image."""
__all__ = [
"MultiscaleSpatialImage",
"Methods",
"to_multiscale",
"itk_image_to_multiscale",
"__version__",
]
from .__about__ import __version__
from .multiscale_spatial_image import MultiscaleSpatialImage
from .to_multiscale import Methods, to_multiscale, itk_image_to_multiscale | UTF-8 | Python | false | false | 362 | py | 28 | __init__.py | 17 | 0.720994 | 0.720994 | 0 | 16 | 21.6875 | 74 |
daydaychallenge/leetcode-python | 18,957,985,652,625 | d42455106fd1e4d17c7a8abfac137783bc134642 | 53c662619269acdac73cf8b00fdb48ad7f266f8a | /01358/test_number_of_substrings_containing_all_three_characters.py | 53278d8734341f86ec65b3761ddfbbef6216739f | []
| no_license | https://github.com/daydaychallenge/leetcode-python | 17d76f45877edd47f9e80e526980fbb37d34da51 | f68a69eb0afec4ec2516cc7fdfd6315f012af745 | refs/heads/master | 2022-12-12T20:48:38.814070 | 2020-11-08T15:08:55 | 2020-11-08T15:08:55 | 253,716,527 | 0 | 1 | null | false | 2022-09-20T21:23:31 | 2020-04-07T07:17:12 | 2020-11-08T15:09:16 | 2022-09-20T21:23:30 | 182 | 0 | 1 | 24 | Python | false | false | import unittest
from number_of_substrings_containing_all_three_characters import Solution
class TestSolution(unittest.TestCase):
def test_minRemoveToMakeValid(self):
sol = Solution()
self.assertEqual(10, sol.numberOfSubstrings("abcabc"))
self.assertEqual(3, sol.numberOfSubstrings("aaacb"))
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 370 | py | 237 | test_number_of_substrings_containing_all_three_characters.py | 236 | 0.705405 | 0.697297 | 0 | 13 | 27.461538 | 73 |
k8godzilla/-Leetcode | 4,578,435,141,200 | 6a1356e9add6f0bb3e2605236363cc1fd69a7f4c | dbd8180d9c02c22b42baa5227437714ff352fd8e | /1-100/L015v2.py | db0e485bfdd506384d0cd5d74901078d1461451b | []
| no_license | https://github.com/k8godzilla/-Leetcode | 92953dfffc0f06907fa7bd0beea7bc27b16f9efa | 58d5384155f481b1d1b0a7ca69566245dd779554 | refs/heads/master | 2020-06-12T15:35:43.380979 | 2019-08-07T11:14:49 | 2019-08-07T11:14:49 | 194,348,264 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 7 16:43:38 2019
@author: admin
"""
'''
给定一个包含 n 个整数的数组 nums,判断 nums 中是否存在三个元素 a,b,c ,使得 a + b + c = 0 ?找出所有满足条件且不重复的三元组。
注意:答案中不可以包含重复的三元组。
例如, 给定数组 nums = [-1, 0, 1, 2, -1, -4],
满足要求的三元组集合为:
[
[-1, 0, 1],
[-1, -1, 2]
]
'''
class Solution:
def threeSum(self, nums:list):
nums.sort()
self.nums = nums
self.res = []
for i in range(len(nums)):
if i > 0 and nums[i] == nums[i - 1]:
continue
else:
self.twoPointer(nums[i], i + 1, (-1) * self.nums[i])
return self.res
def twoPointer(self, v, j, target):
k = len(self.nums) - 1
while j < k:
if self.nums[j] + self.nums[k] == target:
self.res.append([v, self.nums[j], self.nums[k]])
j += 1
while j < k and self.nums[j] == self.nums[j - 1]:
j += 1
elif self.nums[j] + self.nums[k] < target:
j += 1
while j < k and self.nums[j] == self.nums[j - 1]:
j += 1
else:
k -= 1
while j < k and self.nums[k] == self.nums[k + 1]:
k -= 1 | UTF-8 | Python | false | false | 1,449 | py | 208 | L015v2.py | 207 | 0.424432 | 0.393892 | 0 | 51 | 24.058824 | 81 |
chengshaozhe/sheep_policy | 4,793,183,528,098 | e9603c25b3c1f5522a9ee04f3bf622fb63d74902 | b715012e5ba8c54ff6676aa4c7c7f3c7ed7ee32a | /MCTS.py | 45c18984524446cd0ff2d7708ee7d0b2202739fc | []
| no_license | https://github.com/chengshaozhe/sheep_policy | 569adf80e129ed070e0866e6f53d1b61ddfcd76d | fcbfaaa2506e228036fe8606ca271555fc3f291a | refs/heads/master | 2021-08-08T07:23:59.063663 | 2020-04-19T12:35:37 | 2020-04-19T12:35:37 | 158,331,454 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import numpy as np
import tensorflow as tf
import numpy as np
HIDDEN1_UNITS = 300
HIDDEN2_UNITS = 200
EPS = 1e-8
import Transition
from PreparePolicy import *
import pandas as pd
from viz import *
from reward import *
from gridworld import *
from BeliefUpdate import *
from PreparePolicy import *
from InitialPosition import *
import Attention
import Transition
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.epsilon = 0
self.learning_rate = 0.001
self.model = self._buildDNN()
def _buildDNN(self):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(
400, input_dim=self.state_size, activation='relu'))
model.add(tf.keras.layers.Dense(300, activation='relu'))
model.add(tf.keras.layers.Dense(
self.action_size, activation='linear'))
model.compile(loss='mse',
optimizer=tf.keras.optimizers.Adam(lr=self.learning_rate))
return model
def getQ(self, state):
action_values = self.model.predict(state)
return action_values[0]
def getV(self, state):
action_values = self.model.predict(state)[0]
state_value = np.amax(action_values)
return state_value
def load(self, name):
self.model.load_weights(name)
def __call__(self, state):
action_values = self.model.predict(state)
return action_values
class CriticNetwork:
def __init__(self,state_size, action_size):
self.model = self.create_critic_network(state_size, action_size)
def create_critic_network(self, state_size, action_size):
S = tf.keras.layers.Input(shape=[state_size])
A = tf.keras.layers.Input(shape=[action_size],name='action2')
w1 = tf.keras.layers.Dense(HIDDEN1_UNITS, activation='relu')(S)
a1 = tf.keras.layers.Dense(HIDDEN2_UNITS, activation='linear')(A)
h1 = tf.keras.layers.Dense(HIDDEN2_UNITS, activation='linear')(w1)
h2 = tf.keras.layers.Add()([h1,a1])
h3 = tf.keras.layers.Dense(HIDDEN2_UNITS, activation='relu')(h2)
V = tf.keras.layers.Dense(action_size, activation='linear')(h3)
model = tf.keras.models.Model(inputs=[S,A],outputs=[V])
return model
def __call__(self, states, actor):
action_values = self.model.predict([states, actor.model.predict(states)])
state_value = np.amax(action_values)
return state_value
class ActorNetwork:
def __init__(self, state_size, action_size):
self.model = self.create_actor_network(state_size, action_size)
def create_actor_network(self, state_size, action_size):
S = tf.keras.layers.Input(shape=[state_size])
h0 = tf.keras.layers.Dense(HIDDEN1_UNITS, activation='relu')(S)
h1 = tf.keras.layers.Dense(HIDDEN2_UNITS, activation='relu')(h0)
out = tf.keras.layers.Dense(action_size, activation='softmax')(h1)
model = tf.keras.models.Model(inputs=[S], outputs=[out])
return model
def getQ(self, state):
action_values = self.model.predict(state)[0]
return list(action_values)
def getV(self, state):
action_values = self.model.predict(state)[0]
state_value = np.amax(action_values)
return state_value
def __call__(self, state):
action_values = self.model.predict(state)[0]
action_index_max = np.argmax(action_values)
return action_index_max
class MCTS():
def __init__(self, actor, critic, cpuct, numMCTSSims, state_size, action_size):
self.actor = actor
self.critic = critic
self.cpuct = cpuct
self.numMCTSSims = numMCTSSims
self.Qsa = {} # stores Q values for s,a
self.Nsa = {} # stores #times edge s,a was visited
self.Ns = {} # stores #times board s was visited
self.Ps = {} # stores initial policy (returned by neural net) {s:{a:prob}}
self.Es = {} # stores game ended for s
self.action_size = action_size
self.state_size = state_size
def __call__(self, state):
action = np.argmax(self.getActionProb(state))
return action
def getActionProb(self, state):
for i in range(self.numMCTSSims):
state = tuple(map(int, state))
self.search(state)
s = state
counts = [self.Nsa[(s, a)] if (
s, a) in self.Nsa else 0 for a in range(self.action_size)]
# print(counts)
bestA = np.argmax(counts)
probs = [0] * len(counts)
probs[bestA] = 1
return probs
def search(self, state):
s = state
if s not in self.Es:
self.Es[s] = isTerminal(s)
if self.Es[s] != 0:
return -500
#Expand
if s not in self.Ps:
# leaf node
self.Ps[s] = self.actor.getQ(np.reshape(np.asarray(s), [1, self.state_size]))
# v = self.dqn.getV(np.reshape(np.asarray(s), [1, self.state_size]))
s_arr = np.reshape(np.asarray(s), [1, self.state_size])
v = self.critic(s_arr, actor)
self.Ns[s] = 0
return v
current_best = -float('inf')
best_action = -1
#Selection
for a in range(action_size):
if (s, a) in self.Qsa:
u = self.Qsa[(s, a)] + self.cpuct * self.Ps[s][a] * \
math.sqrt(self.Ns[s]) / (1 + self.Nsa[(s, a)])
else:
u = self.cpuct * self.Ps[s][a] * math.sqrt(self.Ns[s] + EPS)
if u > current_best:
current_best = u
best_action = a
a = best_action
#Evaluation/simulation
sheepAction = sheepActionList[a]
statesList = [list(s[:4]), list(s[4:])]
state_pd = pd.DataFrame(statesList, index=list(range(2)),
columns=['positionX', 'positionY', 'velocityX', 'velocityY'])
wolfAction = takeWolfAction(state_pd, 50)
currentActions = [sheepAction, wolfAction]
next_state = transState(state_pd, currentActions)
next_state = tuple(np.asarray(next_state).flatten())
next_state = tuple(map(int, next_state))
v = self.search(next_state)
#BackUp
if (s, a) in self.Qsa:
self.Qsa[(s, a)] = (self.Nsa[(s, a)] *
self.Qsa[(s, a)] + v) / (self.Nsa[(s, a)] + 1)
self.Nsa[(s, a)] += 1
else:
self.Qsa[(s, a)] = v
self.Nsa[(s, a)] = 1
self.Ns[s] += 1
return v
def isTerminal(state):
agent_state = state[:4]
wolf_state = state[4:8]
agent_coordinates = agent_state[:2]
wolf_coordinates = wolf_state[:2]
def l2_norm(s0, s1, rho=1):
diff = (np.asarray(s0) - np.asarray(s1)) * rho
return np.linalg.norm(diff)
if l2_norm(agent_coordinates, wolf_coordinates) <= 30:
return True
return False
if __name__ == '__main__':
statesListInit = [[10,10,0,0],[10,5,0,0],[15,15,0,0]]
speedList = [8,4,4,4,4,4]
movingRange = [0,0,364,364]
assumeWolfPrecisionList = [50,11,3.3,1.83,0.92,0.31]
circleR = 10
sheepIdentity = 0
wolfIdentity = 1
distractorIdentity = 2
distractor2_Identity = 3
distractor3_Identity = 4
distractor4_Identity = 5
distractorPrecision = 0.5 / 3.14
maxDistanceToFixation = movingRange[3]
minDistanceEachOther = 50
maxDistanceEachOther = 180
minDistanceWolfSheep = 120
numberObjects = 2
PureAttentionModel = 0
HybridModel = 0
IdealObserveModel = 1
if PureAttentionModel:
attentionLimitation = 2
precisionPerSlot = 8.0
precisionForUntracked = 0
memoryratePerSlot = 0.7
memoryrateForUntracked = 0
if HybridModel:
attentionLimitation = 2
precisionPerSlot = 8
precisionForUntracked = 2.5
memoryratePerSlot = 0.7
memoryrateForUntracked = 0.45
if IdealObserveModel:
attentionLimitation = 100
precisionPerSlot = 50
precisionForUntracked = 50
memoryratePerSlot = 0.99
memoryrateForUntracked = 0.99
attentionSwitchFrequency = 12
initialPosition = InitialPosition(movingRange, maxDistanceToFixation, minDistanceEachOther, maxDistanceEachOther, minDistanceWolfSheep)
transState = Transition.Transition(movingRange, speedList)
computePrecisionAndDecay = Attention.AttentionToPrecisionAndDecay(precisionPerSlot, precisionForUntracked, memoryratePerSlot, memoryrateForUntracked)
switchAttention = Attention.AttentionSwitch(attentionLimitation)
updateBelief = BeliefUpdateWithAttention(computePrecisionAndDecay, switchAttention, attentionSwitchFrequency, sheepIdentity)
takeWolfAction = WolfPolicy(sheepIdentity, wolfIdentity, speedList[wolfIdentity])
takeDistractorAction = DistractorPolicy(distractorIdentity, distractorPrecision, speedList[distractorIdentity])
takeDistractorAction2 = DistractorPolicy(distractor2_Identity, distractorPrecision, speedList[distractor2_Identity])
takeDistractorAction3 = DistractorPolicy(distractor3_Identity, distractorPrecision, speedList[distractor3_Identity])
takeDistractorAction4 = DistractorPolicy(distractor4_Identity, distractorPrecision, speedList[distractor4_Identity])
numOfActions = 16
actionAnglesList = [i * (360 / numOfActions)
for i in range(1, numOfActions + 1)]
sheepActionList = [np.array((speedList[sheepIdentity] * np.cos(actionAngles * np.pi / 180),
speedList[sheepIdentity] * np.sin(actionAngles * np.pi / 180))) for actionAngles in actionAnglesList]
state_size = 8
action_size = numOfActions
dqn = DQNAgent(state_size, action_size)
dqn.load("./save/SingleWolf_episode_3000.h5")
actor = ActorNetwork(state_size, action_size)
actor.model.load_weights(
'./ddpg_save/SingleWolf_DDPG_episode_800-actormodel.h5')
critic = CriticNetwork(state_size, action_size)
critic.model.load_weights('./ddpg_save/SingleWolf_DDPG_episode_800-criticmodel.h5')
cpuct = 1
numMCTSSims = 10
mcts = MCTS(actor, critic, cpuct, numMCTSSims, state_size, action_size)
state = (80, 30, 0, 0, 60, 60, 0, 0)
a = np.argmax(mcts.getActionProb(state))
# print(a)
# print(sheepActionList[a])
num_opisodes = 1000
animation = 1
for e in range(num_opisodes):
score = 0
init_positionList = initialPosition(numberObjects)
# print(init_positionList)
if init_positionList == False:
continue
statesList = []
initVelocity = [0, 0]
for initPosition in init_positionList:
statesList.append(initPosition + initVelocity)
oldStates = pd.DataFrame(statesList, index=list(range(numberObjects)),
columns=['positionX', 'positionY', 'velocityX', 'velocityY'])
# wolfPrecision = random.choice(assumeWolfPrecisionList)# [50,11,3.3,1.83,0.92,0.31]
total_reward = 0
for wolfPrecision in assumeWolfPrecisionList:
wolfPrecision = wolfPrecision
done = False
mcts = MCTS(actor, critic, cpuct, numMCTSSims,
state_size, action_size)
for time in range(1000):
loss = 0
action_input = np.zeros([1, action_size])
oldStates_array = np.asarray(oldStates).flatten()
oldStates_input = np.reshape(oldStates_array, [1, state_size])
# epsilon -= 1.0 / EXPLORE
# epsilon = 0
# if np.random.rand() <= epsilon:
# action_index = random.randrange(action_size)
# else:
# action_index = actor(oldStates_input)
# action_input[0][action_index] = 1
stateList = list(oldStates_array)
action_index = np.argmax(mcts.getActionProb(stateList))
sheepAction = sheepActionList[action_index]
wolfAction = takeWolfAction(oldStates, wolfPrecision)
currentActions = [sheepAction, wolfAction]
currentStates = transState(oldStates, currentActions)
currentStates_array = np.asarray(currentStates).flatten()
# reward = stateReward(currentStates_array,
# currentActions, movingRange, time)
# pygame viz
if animation:
import pygame
from pygame.color import THECOLORS
from pygame.locals import *
agent_state = oldStates_input.flatten()[:4]
wolf_state = oldStates_input.flatten()[4:8]
agent_coordinates = list(agent_state[:2])
wolf_coordinates = list(wolf_state[:2])
agent_coordinates = list(map(int, agent_coordinates))
wolf_coordinates = list(map(int, wolf_coordinates))
pygame.init()
screen_size = [movingRange[2], movingRange[3]]
screen = pygame.display.set_mode(screen_size)
circleR = 10
screen.fill([0, 0, 0])
color = [THECOLORS['green'], THECOLORS['red']] + \
[THECOLORS['blue']] * (numberObjects - 2)
position_list = [agent_coordinates, wolf_coordinates]
# print(reward)
for drawposition in position_list:
pygame.draw.circle(
screen, color[int(position_list.index(drawposition))], drawposition, circleR)
pygame.display.flip()
# pygame.time.wait(0.2)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if isTerminal(currentStates_array):
done = 1
else:
done = 0
currentStates_input = np.reshape(
currentStates_array, [1, state_size])
# total_reward += reward
oldStates = currentStates
if done:
score = time
print (score)
break
if time == 999:
print ("score:", 999)
| UTF-8 | Python | false | false | 14,719 | py | 43 | MCTS.py | 27 | 0.583735 | 0.563353 | 0 | 423 | 33.79669 | 153 |
luyu00/ClinicalTransformerNER | 11,897,059,420,551 | f1f608bf8b315988a13d99449f44c2c58fefa785 | a70227a2227c6f581fafe7be99c87dbdbf7b9697 | /requirements.txt | 22f35c86586d9d2590a93da85dab4265b70ee205 | [
"MIT"
]
| permissive | https://github.com/luyu00/ClinicalTransformerNER | dac906af89a5e97820d58f322d00e5069853bfc2 | c1c7b58c94bef07bdcd191287c6a58a0c3641951 | refs/heads/master | 2023-01-21T15:47:21.601184 | 2020-12-04T02:32:23 | 2020-12-04T02:32:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
torch>=1.6.0
transformers>=3.0.1
tqdm>=4.36.1
numpy
packaging
| UTF-8 | Python | false | false | 105 | txt | 3 | requirements.txt | 1 | 0.638095 | 0.533333 | 0 | 8 | 12.125 | 23 |
Purplship/purplship | 1,666,447,333,733 | 767055e4fd50e18b62748fbe31b5f5f76fbcaef5 | 93d56c2c0b40e777755b64f5245f2c4e44499a13 | /schemas/usps/usps_lib/sdc_get_locations_response.py | 109a81db45db7e560e9f10f2c59e426893646298 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/Purplship/purplship | e58caf92d9d1e4437ebb15a3f51c11c838964670 | ea151d50d7cac0685f61aba25bf6c1082e56e115 | refs/heads/main | 2023-06-02T07:31:54.838521 | 2023-05-25T18:37:50 | 2023-05-25T18:37:50 | 128,151,112 | 21 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Sat Jun 18 01:03:12 2022 by generateDS.py version 2.40.13.
# Python 3.8.6 (v3.8.6:db455296be, Sep 23 2020, 13:31:39) [Clang 6.0 (clang-600.0.57)]
#
# Command line options:
# ('--no-namespace-defs', '')
# ('-o', './usps_lib/sdc_get_locations_response.py')
#
# Command line arguments:
# ./schemas/SDCGetLocationsResponse.xsd
#
# Command line:
# /Users/danielkobina/Workspace/project/karrio/.venv/karrio/bin/generateDS --no-namespace-defs -o "./usps_lib/sdc_get_locations_response.py" ./schemas/SDCGetLocationsResponse.xsd
#
# Current working directory (os.getcwd()):
# usps
#
import sys
try:
ModulenotfoundExp_ = ModuleNotFoundError
except NameError:
ModulenotfoundExp_ = ImportError
from six.moves import zip_longest
import os
import re as re_
import base64
import datetime as datetime_
import decimal as decimal_
from lxml import etree as etree_
Validate_simpletypes_ = True
SaveElementTreeNode = True
TagNamePrefix = ""
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
try:
if isinstance(infile, os.PathLike):
infile = os.path.join(infile)
except AttributeError:
pass
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
def parsexmlstring_(instring, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
element = etree_.fromstring(instring, parser=parser, **kwargs)
return element
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for an example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
# Additionally, the generatedsnamespaces module can contain a python
# dictionary named GenerateDSNamespaceTypePrefixes that associates element
# types with the namespace prefixes that are to be added to the
# "xsi:type" attribute value. See the _exportAttributes method of
# any generated element type and the generation of "xsi:type" for an
# example of the use of this table.
# An example table:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceTypePrefixes = {
# "ElementtypeC": "aaa:",
# "ElementtypeD": "bbb:",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ModulenotfoundExp_ :
GenerateDSNamespaceDefs_ = {}
try:
from generatedsnamespaces import GenerateDSNamespaceTypePrefixes as GenerateDSNamespaceTypePrefixes_
except ModulenotfoundExp_ :
GenerateDSNamespaceTypePrefixes_ = {}
#
# You can replace the following class definition by defining an
# importable module named "generatedscollector" containing a class
# named "GdsCollector". See the default class definition below for
# clues about the possible content of that class.
#
try:
from generatedscollector import GdsCollector as GdsCollector_
except ModulenotfoundExp_ :
class GdsCollector_(object):
def __init__(self, messages=None):
if messages is None:
self.messages = []
else:
self.messages = messages
def add_message(self, msg):
self.messages.append(msg)
def get_messages(self):
return self.messages
def clear_messages(self):
self.messages = []
def print_messages(self):
for msg in self.messages:
print("Warning: {}".format(msg))
def write_messages(self, outstream):
for msg in self.messages:
outstream.write("Warning: {}\n".format(msg))
#
# The super-class for enum types
#
try:
from enum import Enum
except ModulenotfoundExp_ :
Enum = object
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ModulenotfoundExp_ as exp:
try:
from generatedssupersuper import GeneratedsSuperSuper
except ModulenotfoundExp_ as exp:
class GeneratedsSuperSuper(object):
pass
class GeneratedsSuper(GeneratedsSuperSuper):
__hash__ = object.__hash__
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def __str__(self):
settings = {
'str_pretty_print': True,
'str_indent_level': 0,
'str_namespaceprefix': '',
'str_name': self.__class__.__name__,
'str_namespacedefs': '',
}
for n in settings:
if hasattr(self, n):
settings[n] = getattr(self, n)
if sys.version_info.major == 2:
from StringIO import StringIO
else:
from io import StringIO
output = StringIO()
self.export(
output,
settings['str_indent_level'],
pretty_print=settings['str_pretty_print'],
namespaceprefix_=settings['str_namespaceprefix'],
name_=settings['str_name'],
namespacedef_=settings['str_namespacedefs']
)
strval = output.getvalue()
output.close()
return strval
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_parse_string(self, input_data, node=None, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data).decode('ascii')
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % int(input_data)
def gds_parse_integer(self, input_data, node=None, input_name=''):
try:
ival = int(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires integer value: %s' % exp)
return ival
def gds_validate_integer(self, input_data, node=None, input_name=''):
try:
value = int(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires integer value')
return value
def gds_format_integer_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integer values')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % float(input_data)).rstrip('0')
def gds_parse_float(self, input_data, node=None, input_name=''):
try:
fval_ = float(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires float or double value: %s' % exp)
return fval_
def gds_validate_float(self, input_data, node=None, input_name=''):
try:
value = float(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires float value')
return value
def gds_format_float_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of float values')
return values
def gds_format_decimal(self, input_data, input_name=''):
return_value = '%s' % input_data
if '.' in return_value:
return_value = return_value.rstrip('0')
if return_value.endswith('.'):
return_value = return_value.rstrip('.')
return return_value
def gds_parse_decimal(self, input_data, node=None, input_name=''):
try:
decimal_value = decimal_.Decimal(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires decimal value')
return decimal_value
def gds_validate_decimal(self, input_data, node=None, input_name=''):
try:
value = decimal_.Decimal(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires decimal value')
return value
def gds_format_decimal_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return ' '.join([self.gds_format_decimal(item) for item in input_data])
def gds_validate_decimal_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
decimal_.Decimal(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of decimal values')
return values
def gds_format_double(self, input_data, input_name=''):
return '%s' % input_data
def gds_parse_double(self, input_data, node=None, input_name=''):
try:
fval_ = float(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires double or float value: %s' % exp)
return fval_
def gds_validate_double(self, input_data, node=None, input_name=''):
try:
value = float(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires double or float value')
return value
def gds_format_double_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(
node, 'Requires sequence of double or float values')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_parse_boolean(self, input_data, node=None, input_name=''):
input_data = input_data.strip()
if input_data in ('true', '1'):
bval = True
elif input_data in ('false', '0'):
bval = False
else:
raise_parse_error(node, 'Requires boolean value')
return bval
def gds_validate_boolean(self, input_data, node=None, input_name=''):
if input_data not in (True, 1, False, 0, ):
raise_parse_error(
node,
'Requires boolean value '
'(one of True, 1, False, 0)')
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
value = self.gds_parse_boolean(value, node, input_name)
if value not in (True, 1, False, 0, ):
raise_parse_error(
node,
'Requires sequence of boolean values '
'(one of True, 1, False, 0)')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (
time_parts[0], "{}".format(micro_seconds).rjust(6, "0"), )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns.
# The target value must match at least one of the patterns
# in order for the test to succeed.
found1 = True
target = str(target)
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
mo = re_.search(patterns2, target)
if mo is not None and len(mo.group(0)) == len(target):
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_check_cardinality_(
self, value, input_name,
min_occurs=0, max_occurs=1, required=None):
if value is None:
length = 0
elif isinstance(value, list):
length = len(value)
else:
length = 1
if required is not None :
if required and length < 1:
self.gds_collector_.add_message(
"Required value {}{} is missing".format(
input_name, self.gds_get_node_lineno_()))
if length < min_occurs:
self.gds_collector_.add_message(
"Number of values for {}{} is below "
"the minimum allowed, "
"expected at least {}, found {}".format(
input_name, self.gds_get_node_lineno_(),
min_occurs, length))
elif length > max_occurs:
self.gds_collector_.add_message(
"Number of values for {}{} is above "
"the maximum allowed, "
"expected at most {}, found {}".format(
input_name, self.gds_get_node_lineno_(),
max_occurs, length))
def gds_validate_builtin_ST_(
self, validator, value, input_name,
min_occurs=None, max_occurs=None, required=None):
if value is not None:
try:
validator(value, input_name=input_name)
except GDSParseError as parse_error:
self.gds_collector_.add_message(str(parse_error))
def gds_validate_defined_ST_(
self, validator, value, input_name,
min_occurs=None, max_occurs=None, required=None):
if value is not None:
try:
validator(value)
except GDSParseError as parse_error:
self.gds_collector_.add_message(str(parse_error))
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
# provide default value in case option --disable-xml is used.
content = ""
content = etree_.tostring(node, encoding="unicode")
return content
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.items()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
if ExternalEncoding:
encoding = ExternalEncoding
else:
encoding = 'utf-8'
return instring.encode(encoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
def excl_select_objs_(obj):
return (obj[0] != 'parent_object_' and
obj[0] != 'gds_collector_')
if type(self) != type(other):
return False
return all(x == y for x, y in zip_longest(
filter(excl_select_objs_, self.__dict__.items()),
filter(excl_select_objs_, other.__dict__.items())))
def __ne__(self, other):
return not self.__eq__(other)
# Django ETL transform hooks.
def gds_djo_etl_transform(self):
pass
def gds_djo_etl_transform_db_obj(self, dbobj):
pass
# SQLAlchemy ETL transform hooks.
def gds_sqa_etl_transform(self):
return 0, None
def gds_sqa_etl_transform_db_obj(self, dbobj):
pass
def gds_get_node_lineno_(self):
if (hasattr(self, "gds_elementtree_node_") and
self.gds_elementtree_node_ is not None):
return ' near line {}'.format(
self.gds_elementtree_node_.sourceline)
else:
return ""
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = ''
# Set this to false in order to deactivate during export, the use of
# name space prefixes captured from the input document.
UseCapturedNS_ = True
CapturedNsmap_ = {}
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
s1 = s1.replace('\n', ' ')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
if prefix == 'xml':
namespace = 'http://www.w3.org/XML/1998/namespace'
else:
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
def encode_str_2_3(instr):
return instr
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if node is not None:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name_=name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name,
base64.b64encode(self.value),
self.name))
def to_etree(self, element, mapping_=None, reverse_mapping_=None, nsmap_=None):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(
element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self, mapping_=None, reverse_mapping_=None, nsmap_=None):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0,
optional=0, child_attrs=None, choice=None):
self.name = name
self.data_type = data_type
self.container = container
self.child_attrs = child_attrs
self.choice = choice
self.optional = optional
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs
def get_child_attrs(self): return self.child_attrs
def set_choice(self, choice): self.choice = choice
def get_choice(self): return self.choice
def set_optional(self, optional): self.optional = optional
def get_optional(self): return self.optional
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class SDCGetLocationsResponse(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Release=None, MailClass=None, OriginZIP=None, OriginCity=None, OriginState=None, DestZIP=None, DestCity=None, DestState=None, AcceptDate=None, AcceptTime=None, Expedited=None, NonExpedited=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.Release = Release
self.Release_nsprefix_ = None
self.MailClass = MailClass
self.MailClass_nsprefix_ = None
self.OriginZIP = OriginZIP
self.OriginZIP_nsprefix_ = None
self.OriginCity = OriginCity
self.OriginCity_nsprefix_ = None
self.OriginState = OriginState
self.OriginState_nsprefix_ = None
self.DestZIP = DestZIP
self.DestZIP_nsprefix_ = None
self.DestCity = DestCity
self.DestCity_nsprefix_ = None
self.DestState = DestState
self.DestState_nsprefix_ = None
if isinstance(AcceptDate, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(AcceptDate, '%Y-%m-%d').date()
else:
initvalue_ = AcceptDate
self.AcceptDate = initvalue_
self.AcceptDate_nsprefix_ = None
self.AcceptTime = AcceptTime
self.AcceptTime_nsprefix_ = None
self.Expedited = Expedited
self.Expedited_nsprefix_ = None
if NonExpedited is None:
self.NonExpedited = []
else:
self.NonExpedited = NonExpedited
self.NonExpedited_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SDCGetLocationsResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SDCGetLocationsResponse.subclass:
return SDCGetLocationsResponse.subclass(*args_, **kwargs_)
else:
return SDCGetLocationsResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Release(self):
return self.Release
def set_Release(self, Release):
self.Release = Release
def get_MailClass(self):
return self.MailClass
def set_MailClass(self, MailClass):
self.MailClass = MailClass
def get_OriginZIP(self):
return self.OriginZIP
def set_OriginZIP(self, OriginZIP):
self.OriginZIP = OriginZIP
def get_OriginCity(self):
return self.OriginCity
def set_OriginCity(self, OriginCity):
self.OriginCity = OriginCity
def get_OriginState(self):
return self.OriginState
def set_OriginState(self, OriginState):
self.OriginState = OriginState
def get_DestZIP(self):
return self.DestZIP
def set_DestZIP(self, DestZIP):
self.DestZIP = DestZIP
def get_DestCity(self):
return self.DestCity
def set_DestCity(self, DestCity):
self.DestCity = DestCity
def get_DestState(self):
return self.DestState
def set_DestState(self, DestState):
self.DestState = DestState
def get_AcceptDate(self):
return self.AcceptDate
def set_AcceptDate(self, AcceptDate):
self.AcceptDate = AcceptDate
def get_AcceptTime(self):
return self.AcceptTime
def set_AcceptTime(self, AcceptTime):
self.AcceptTime = AcceptTime
def get_Expedited(self):
return self.Expedited
def set_Expedited(self, Expedited):
self.Expedited = Expedited
def get_NonExpedited(self):
return self.NonExpedited
def set_NonExpedited(self, NonExpedited):
self.NonExpedited = NonExpedited
def add_NonExpedited(self, value):
self.NonExpedited.append(value)
def insert_NonExpedited_at(self, index, value):
self.NonExpedited.insert(index, value)
def replace_NonExpedited_at(self, index, value):
self.NonExpedited[index] = value
def _hasContent(self):
if (
self.Release is not None or
self.MailClass is not None or
self.OriginZIP is not None or
self.OriginCity is not None or
self.OriginState is not None or
self.DestZIP is not None or
self.DestCity is not None or
self.DestState is not None or
self.AcceptDate is not None or
self.AcceptTime is not None or
self.Expedited is not None or
self.NonExpedited
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='SDCGetLocationsResponse', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SDCGetLocationsResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'SDCGetLocationsResponse':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SDCGetLocationsResponse')
if self._hasContent():
outfile.write('>%s' % (eol_, ))
self._exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='SDCGetLocationsResponse', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SDCGetLocationsResponse'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='SDCGetLocationsResponse', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Release is not None:
namespaceprefix_ = self.Release_nsprefix_ + ':' if (UseCapturedNS_ and self.Release_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sRelease>%s</%sRelease>%s' % (namespaceprefix_ , self.gds_format_integer(self.Release, input_name='Release'), namespaceprefix_ , eol_))
if self.MailClass is not None:
namespaceprefix_ = self.MailClass_nsprefix_ + ':' if (UseCapturedNS_ and self.MailClass_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sMailClass>%s</%sMailClass>%s' % (namespaceprefix_ , self.gds_format_integer(self.MailClass, input_name='MailClass'), namespaceprefix_ , eol_))
if self.OriginZIP is not None:
namespaceprefix_ = self.OriginZIP_nsprefix_ + ':' if (UseCapturedNS_ and self.OriginZIP_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sOriginZIP>%s</%sOriginZIP>%s' % (namespaceprefix_ , self.gds_format_integer(self.OriginZIP, input_name='OriginZIP'), namespaceprefix_ , eol_))
if self.OriginCity is not None:
namespaceprefix_ = self.OriginCity_nsprefix_ + ':' if (UseCapturedNS_ and self.OriginCity_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sOriginCity>%s</%sOriginCity>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.OriginCity), input_name='OriginCity')), namespaceprefix_ , eol_))
if self.OriginState is not None:
namespaceprefix_ = self.OriginState_nsprefix_ + ':' if (UseCapturedNS_ and self.OriginState_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sOriginState>%s</%sOriginState>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.OriginState), input_name='OriginState')), namespaceprefix_ , eol_))
if self.DestZIP is not None:
namespaceprefix_ = self.DestZIP_nsprefix_ + ':' if (UseCapturedNS_ and self.DestZIP_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sDestZIP>%s</%sDestZIP>%s' % (namespaceprefix_ , self.gds_format_integer(self.DestZIP, input_name='DestZIP'), namespaceprefix_ , eol_))
if self.DestCity is not None:
namespaceprefix_ = self.DestCity_nsprefix_ + ':' if (UseCapturedNS_ and self.DestCity_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sDestCity>%s</%sDestCity>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.DestCity), input_name='DestCity')), namespaceprefix_ , eol_))
if self.DestState is not None:
namespaceprefix_ = self.DestState_nsprefix_ + ':' if (UseCapturedNS_ and self.DestState_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sDestState>%s</%sDestState>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.DestState), input_name='DestState')), namespaceprefix_ , eol_))
if self.AcceptDate is not None:
namespaceprefix_ = self.AcceptDate_nsprefix_ + ':' if (UseCapturedNS_ and self.AcceptDate_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sAcceptDate>%s</%sAcceptDate>%s' % (namespaceprefix_ , self.gds_format_date(self.AcceptDate, input_name='AcceptDate'), namespaceprefix_ , eol_))
if self.AcceptTime is not None:
namespaceprefix_ = self.AcceptTime_nsprefix_ + ':' if (UseCapturedNS_ and self.AcceptTime_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sAcceptTime>%s</%sAcceptTime>%s' % (namespaceprefix_ , self.gds_format_integer(self.AcceptTime, input_name='AcceptTime'), namespaceprefix_ , eol_))
if self.Expedited is not None:
namespaceprefix_ = self.Expedited_nsprefix_ + ':' if (UseCapturedNS_ and self.Expedited_nsprefix_) else ''
self.Expedited.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Expedited', pretty_print=pretty_print)
for NonExpedited_ in self.NonExpedited:
namespaceprefix_ = self.NonExpedited_nsprefix_ + ':' if (UseCapturedNS_ and self.NonExpedited_nsprefix_) else ''
NonExpedited_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='NonExpedited', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Release' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'Release')
ival_ = self.gds_validate_integer(ival_, node, 'Release')
self.Release = ival_
self.Release_nsprefix_ = child_.prefix
elif nodeName_ == 'MailClass' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'MailClass')
ival_ = self.gds_validate_integer(ival_, node, 'MailClass')
self.MailClass = ival_
self.MailClass_nsprefix_ = child_.prefix
elif nodeName_ == 'OriginZIP' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'OriginZIP')
ival_ = self.gds_validate_integer(ival_, node, 'OriginZIP')
self.OriginZIP = ival_
self.OriginZIP_nsprefix_ = child_.prefix
elif nodeName_ == 'OriginCity':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'OriginCity')
value_ = self.gds_validate_string(value_, node, 'OriginCity')
self.OriginCity = value_
self.OriginCity_nsprefix_ = child_.prefix
elif nodeName_ == 'OriginState':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'OriginState')
value_ = self.gds_validate_string(value_, node, 'OriginState')
self.OriginState = value_
self.OriginState_nsprefix_ = child_.prefix
elif nodeName_ == 'DestZIP' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'DestZIP')
ival_ = self.gds_validate_integer(ival_, node, 'DestZIP')
self.DestZIP = ival_
self.DestZIP_nsprefix_ = child_.prefix
elif nodeName_ == 'DestCity':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'DestCity')
value_ = self.gds_validate_string(value_, node, 'DestCity')
self.DestCity = value_
self.DestCity_nsprefix_ = child_.prefix
elif nodeName_ == 'DestState':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'DestState')
value_ = self.gds_validate_string(value_, node, 'DestState')
self.DestState = value_
self.DestState_nsprefix_ = child_.prefix
elif nodeName_ == 'AcceptDate':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.AcceptDate = dval_
self.AcceptDate_nsprefix_ = child_.prefix
elif nodeName_ == 'AcceptTime' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'AcceptTime')
ival_ = self.gds_validate_integer(ival_, node, 'AcceptTime')
self.AcceptTime = ival_
self.AcceptTime_nsprefix_ = child_.prefix
elif nodeName_ == 'Expedited':
obj_ = ExpeditedType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Expedited = obj_
obj_.original_tagname_ = 'Expedited'
elif nodeName_ == 'NonExpedited':
obj_ = NonExpeditedType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.NonExpedited.append(obj_)
obj_.original_tagname_ = 'NonExpedited'
# end class SDCGetLocationsResponse
class ExpeditedType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, EAD=None, Commitment=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
if isinstance(EAD, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(EAD, '%Y-%m-%d').date()
else:
initvalue_ = EAD
self.EAD = initvalue_
self.EAD_nsprefix_ = None
if Commitment is None:
self.Commitment = []
else:
self.Commitment = Commitment
self.Commitment_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ExpeditedType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ExpeditedType.subclass:
return ExpeditedType.subclass(*args_, **kwargs_)
else:
return ExpeditedType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_EAD(self):
return self.EAD
def set_EAD(self, EAD):
self.EAD = EAD
def get_Commitment(self):
return self.Commitment
def set_Commitment(self, Commitment):
self.Commitment = Commitment
def add_Commitment(self, value):
self.Commitment.append(value)
def insert_Commitment_at(self, index, value):
self.Commitment.insert(index, value)
def replace_Commitment_at(self, index, value):
self.Commitment[index] = value
def _hasContent(self):
if (
self.EAD is not None or
self.Commitment
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ExpeditedType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ExpeditedType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ExpeditedType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ExpeditedType')
if self._hasContent():
outfile.write('>%s' % (eol_, ))
self._exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ExpeditedType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ExpeditedType'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ExpeditedType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.EAD is not None:
namespaceprefix_ = self.EAD_nsprefix_ + ':' if (UseCapturedNS_ and self.EAD_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sEAD>%s</%sEAD>%s' % (namespaceprefix_ , self.gds_format_date(self.EAD, input_name='EAD'), namespaceprefix_ , eol_))
for Commitment_ in self.Commitment:
namespaceprefix_ = self.Commitment_nsprefix_ + ':' if (UseCapturedNS_ and self.Commitment_nsprefix_) else ''
Commitment_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Commitment', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'EAD':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.EAD = dval_
self.EAD_nsprefix_ = child_.prefix
elif nodeName_ == 'Commitment':
obj_ = CommitmentType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Commitment.append(obj_)
obj_.original_tagname_ = 'Commitment'
# end class ExpeditedType
class CommitmentType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, MailClass=None, CommitmentName=None, CommitmentTime=None, CommitmentSeq=None, Location=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.MailClass = MailClass
self.MailClass_nsprefix_ = None
self.CommitmentName = CommitmentName
self.CommitmentName_nsprefix_ = None
self.CommitmentTime = CommitmentTime
self.CommitmentTime_nsprefix_ = None
self.CommitmentSeq = CommitmentSeq
self.CommitmentSeq_nsprefix_ = None
if Location is None:
self.Location = []
else:
self.Location = Location
self.Location_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CommitmentType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CommitmentType.subclass:
return CommitmentType.subclass(*args_, **kwargs_)
else:
return CommitmentType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_MailClass(self):
return self.MailClass
def set_MailClass(self, MailClass):
self.MailClass = MailClass
def get_CommitmentName(self):
return self.CommitmentName
def set_CommitmentName(self, CommitmentName):
self.CommitmentName = CommitmentName
def get_CommitmentTime(self):
return self.CommitmentTime
def set_CommitmentTime(self, CommitmentTime):
self.CommitmentTime = CommitmentTime
def get_CommitmentSeq(self):
return self.CommitmentSeq
def set_CommitmentSeq(self, CommitmentSeq):
self.CommitmentSeq = CommitmentSeq
def get_Location(self):
return self.Location
def set_Location(self, Location):
self.Location = Location
def add_Location(self, value):
self.Location.append(value)
def insert_Location_at(self, index, value):
self.Location.insert(index, value)
def replace_Location_at(self, index, value):
self.Location[index] = value
def _hasContent(self):
if (
self.MailClass is not None or
self.CommitmentName is not None or
self.CommitmentTime is not None or
self.CommitmentSeq is not None or
self.Location
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='CommitmentType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CommitmentType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'CommitmentType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='CommitmentType')
if self._hasContent():
outfile.write('>%s' % (eol_, ))
self._exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='CommitmentType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='CommitmentType'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='CommitmentType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.MailClass is not None:
namespaceprefix_ = self.MailClass_nsprefix_ + ':' if (UseCapturedNS_ and self.MailClass_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sMailClass>%s</%sMailClass>%s' % (namespaceprefix_ , self.gds_format_integer(self.MailClass, input_name='MailClass'), namespaceprefix_ , eol_))
if self.CommitmentName is not None:
namespaceprefix_ = self.CommitmentName_nsprefix_ + ':' if (UseCapturedNS_ and self.CommitmentName_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCommitmentName>%s</%sCommitmentName>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.CommitmentName), input_name='CommitmentName')), namespaceprefix_ , eol_))
if self.CommitmentTime is not None:
namespaceprefix_ = self.CommitmentTime_nsprefix_ + ':' if (UseCapturedNS_ and self.CommitmentTime_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCommitmentTime>%s</%sCommitmentTime>%s' % (namespaceprefix_ , self.gds_format_integer(self.CommitmentTime, input_name='CommitmentTime'), namespaceprefix_ , eol_))
if self.CommitmentSeq is not None:
namespaceprefix_ = self.CommitmentSeq_nsprefix_ + ':' if (UseCapturedNS_ and self.CommitmentSeq_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCommitmentSeq>%s</%sCommitmentSeq>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.CommitmentSeq), input_name='CommitmentSeq')), namespaceprefix_ , eol_))
for Location_ in self.Location:
namespaceprefix_ = self.Location_nsprefix_ + ':' if (UseCapturedNS_ and self.Location_nsprefix_) else ''
Location_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Location', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'MailClass' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'MailClass')
ival_ = self.gds_validate_integer(ival_, node, 'MailClass')
self.MailClass = ival_
self.MailClass_nsprefix_ = child_.prefix
elif nodeName_ == 'CommitmentName':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'CommitmentName')
value_ = self.gds_validate_string(value_, node, 'CommitmentName')
self.CommitmentName = value_
self.CommitmentName_nsprefix_ = child_.prefix
elif nodeName_ == 'CommitmentTime' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'CommitmentTime')
ival_ = self.gds_validate_integer(ival_, node, 'CommitmentTime')
self.CommitmentTime = ival_
self.CommitmentTime_nsprefix_ = child_.prefix
elif nodeName_ == 'CommitmentSeq':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'CommitmentSeq')
value_ = self.gds_validate_string(value_, node, 'CommitmentSeq')
self.CommitmentSeq = value_
self.CommitmentSeq_nsprefix_ = child_.prefix
elif nodeName_ == 'Location':
obj_ = LocationType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Location.append(obj_)
obj_.original_tagname_ = 'Location'
# end class CommitmentType
class LocationType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, SDD=None, COT=None, FacType=None, Street=None, City=None, State=None, ZIP=None, IsGuaranteed=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
if isinstance(SDD, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(SDD, '%Y-%m-%d').date()
else:
initvalue_ = SDD
self.SDD = initvalue_
self.SDD_nsprefix_ = None
self.COT = COT
self.COT_nsprefix_ = None
self.FacType = FacType
self.FacType_nsprefix_ = None
self.Street = Street
self.Street_nsprefix_ = None
self.City = City
self.City_nsprefix_ = None
self.State = State
self.State_nsprefix_ = None
self.ZIP = ZIP
self.ZIP_nsprefix_ = None
self.IsGuaranteed = IsGuaranteed
self.IsGuaranteed_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, LocationType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if LocationType.subclass:
return LocationType.subclass(*args_, **kwargs_)
else:
return LocationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_SDD(self):
return self.SDD
def set_SDD(self, SDD):
self.SDD = SDD
def get_COT(self):
return self.COT
def set_COT(self, COT):
self.COT = COT
def get_FacType(self):
return self.FacType
def set_FacType(self, FacType):
self.FacType = FacType
def get_Street(self):
return self.Street
def set_Street(self, Street):
self.Street = Street
def get_City(self):
return self.City
def set_City(self, City):
self.City = City
def get_State(self):
return self.State
def set_State(self, State):
self.State = State
def get_ZIP(self):
return self.ZIP
def set_ZIP(self, ZIP):
self.ZIP = ZIP
def get_IsGuaranteed(self):
return self.IsGuaranteed
def set_IsGuaranteed(self, IsGuaranteed):
self.IsGuaranteed = IsGuaranteed
def _hasContent(self):
if (
self.SDD is not None or
self.COT is not None or
self.FacType is not None or
self.Street is not None or
self.City is not None or
self.State is not None or
self.ZIP is not None or
self.IsGuaranteed is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='LocationType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('LocationType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'LocationType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='LocationType')
if self._hasContent():
outfile.write('>%s' % (eol_, ))
self._exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='LocationType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='LocationType'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='LocationType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.SDD is not None:
namespaceprefix_ = self.SDD_nsprefix_ + ':' if (UseCapturedNS_ and self.SDD_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSDD>%s</%sSDD>%s' % (namespaceprefix_ , self.gds_format_date(self.SDD, input_name='SDD'), namespaceprefix_ , eol_))
if self.COT is not None:
namespaceprefix_ = self.COT_nsprefix_ + ':' if (UseCapturedNS_ and self.COT_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCOT>%s</%sCOT>%s' % (namespaceprefix_ , self.gds_format_integer(self.COT, input_name='COT'), namespaceprefix_ , eol_))
if self.FacType is not None:
namespaceprefix_ = self.FacType_nsprefix_ + ':' if (UseCapturedNS_ and self.FacType_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sFacType>%s</%sFacType>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.FacType), input_name='FacType')), namespaceprefix_ , eol_))
if self.Street is not None:
namespaceprefix_ = self.Street_nsprefix_ + ':' if (UseCapturedNS_ and self.Street_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sStreet>%s</%sStreet>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Street), input_name='Street')), namespaceprefix_ , eol_))
if self.City is not None:
namespaceprefix_ = self.City_nsprefix_ + ':' if (UseCapturedNS_ and self.City_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCity>%s</%sCity>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.City), input_name='City')), namespaceprefix_ , eol_))
if self.State is not None:
namespaceprefix_ = self.State_nsprefix_ + ':' if (UseCapturedNS_ and self.State_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sState>%s</%sState>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.State), input_name='State')), namespaceprefix_ , eol_))
if self.ZIP is not None:
namespaceprefix_ = self.ZIP_nsprefix_ + ':' if (UseCapturedNS_ and self.ZIP_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sZIP>%s</%sZIP>%s' % (namespaceprefix_ , self.gds_format_integer(self.ZIP, input_name='ZIP'), namespaceprefix_ , eol_))
if self.IsGuaranteed is not None:
namespaceprefix_ = self.IsGuaranteed_nsprefix_ + ':' if (UseCapturedNS_ and self.IsGuaranteed_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sIsGuaranteed>%s</%sIsGuaranteed>%s' % (namespaceprefix_ , self.gds_format_integer(self.IsGuaranteed, input_name='IsGuaranteed'), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'SDD':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.SDD = dval_
self.SDD_nsprefix_ = child_.prefix
elif nodeName_ == 'COT' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'COT')
ival_ = self.gds_validate_integer(ival_, node, 'COT')
self.COT = ival_
self.COT_nsprefix_ = child_.prefix
elif nodeName_ == 'FacType':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'FacType')
value_ = self.gds_validate_string(value_, node, 'FacType')
self.FacType = value_
self.FacType_nsprefix_ = child_.prefix
elif nodeName_ == 'Street':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Street')
value_ = self.gds_validate_string(value_, node, 'Street')
self.Street = value_
self.Street_nsprefix_ = child_.prefix
elif nodeName_ == 'City':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'City')
value_ = self.gds_validate_string(value_, node, 'City')
self.City = value_
self.City_nsprefix_ = child_.prefix
elif nodeName_ == 'State':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'State')
value_ = self.gds_validate_string(value_, node, 'State')
self.State = value_
self.State_nsprefix_ = child_.prefix
elif nodeName_ == 'ZIP' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'ZIP')
ival_ = self.gds_validate_integer(ival_, node, 'ZIP')
self.ZIP = ival_
self.ZIP_nsprefix_ = child_.prefix
elif nodeName_ == 'IsGuaranteed' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'IsGuaranteed')
ival_ = self.gds_validate_integer(ival_, node, 'IsGuaranteed')
self.IsGuaranteed = ival_
self.IsGuaranteed_nsprefix_ = child_.prefix
# end class LocationType
class NonExpeditedType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, MailClass=None, NonExpeditedDestType=None, EAD=None, COT=None, SvcStdMsg=None, SvcStdDays=None, TotDaysDeliver=None, SchedDlvryDate=None, NonDlvryDays=None, NonExpeditedExceptions=None, HFPU=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.MailClass = MailClass
self.MailClass_nsprefix_ = None
self.NonExpeditedDestType = NonExpeditedDestType
self.NonExpeditedDestType_nsprefix_ = None
if isinstance(EAD, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(EAD, '%Y-%m-%d').date()
else:
initvalue_ = EAD
self.EAD = initvalue_
self.EAD_nsprefix_ = None
self.COT = COT
self.COT_nsprefix_ = None
self.SvcStdMsg = SvcStdMsg
self.SvcStdMsg_nsprefix_ = None
self.SvcStdDays = SvcStdDays
self.SvcStdDays_nsprefix_ = None
self.TotDaysDeliver = TotDaysDeliver
self.TotDaysDeliver_nsprefix_ = None
if isinstance(SchedDlvryDate, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(SchedDlvryDate, '%Y-%m-%d').date()
else:
initvalue_ = SchedDlvryDate
self.SchedDlvryDate = initvalue_
self.SchedDlvryDate_nsprefix_ = None
self.NonDlvryDays = NonDlvryDays
self.NonDlvryDays_nsprefix_ = None
self.NonExpeditedExceptions = NonExpeditedExceptions
self.NonExpeditedExceptions_nsprefix_ = None
self.HFPU = HFPU
self.HFPU_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, NonExpeditedType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if NonExpeditedType.subclass:
return NonExpeditedType.subclass(*args_, **kwargs_)
else:
return NonExpeditedType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_MailClass(self):
return self.MailClass
def set_MailClass(self, MailClass):
self.MailClass = MailClass
def get_NonExpeditedDestType(self):
return self.NonExpeditedDestType
def set_NonExpeditedDestType(self, NonExpeditedDestType):
self.NonExpeditedDestType = NonExpeditedDestType
def get_EAD(self):
return self.EAD
def set_EAD(self, EAD):
self.EAD = EAD
def get_COT(self):
return self.COT
def set_COT(self, COT):
self.COT = COT
def get_SvcStdMsg(self):
return self.SvcStdMsg
def set_SvcStdMsg(self, SvcStdMsg):
self.SvcStdMsg = SvcStdMsg
def get_SvcStdDays(self):
return self.SvcStdDays
def set_SvcStdDays(self, SvcStdDays):
self.SvcStdDays = SvcStdDays
def get_TotDaysDeliver(self):
return self.TotDaysDeliver
def set_TotDaysDeliver(self, TotDaysDeliver):
self.TotDaysDeliver = TotDaysDeliver
def get_SchedDlvryDate(self):
return self.SchedDlvryDate
def set_SchedDlvryDate(self, SchedDlvryDate):
self.SchedDlvryDate = SchedDlvryDate
def get_NonDlvryDays(self):
return self.NonDlvryDays
def set_NonDlvryDays(self, NonDlvryDays):
self.NonDlvryDays = NonDlvryDays
def get_NonExpeditedExceptions(self):
return self.NonExpeditedExceptions
def set_NonExpeditedExceptions(self, NonExpeditedExceptions):
self.NonExpeditedExceptions = NonExpeditedExceptions
def get_HFPU(self):
return self.HFPU
def set_HFPU(self, HFPU):
self.HFPU = HFPU
def _hasContent(self):
if (
self.MailClass is not None or
self.NonExpeditedDestType is not None or
self.EAD is not None or
self.COT is not None or
self.SvcStdMsg is not None or
self.SvcStdDays is not None or
self.TotDaysDeliver is not None or
self.SchedDlvryDate is not None or
self.NonDlvryDays is not None or
self.NonExpeditedExceptions is not None or
self.HFPU is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='NonExpeditedType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('NonExpeditedType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'NonExpeditedType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='NonExpeditedType')
if self._hasContent():
outfile.write('>%s' % (eol_, ))
self._exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='NonExpeditedType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='NonExpeditedType'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='NonExpeditedType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.MailClass is not None:
namespaceprefix_ = self.MailClass_nsprefix_ + ':' if (UseCapturedNS_ and self.MailClass_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sMailClass>%s</%sMailClass>%s' % (namespaceprefix_ , self.gds_format_integer(self.MailClass, input_name='MailClass'), namespaceprefix_ , eol_))
if self.NonExpeditedDestType is not None:
namespaceprefix_ = self.NonExpeditedDestType_nsprefix_ + ':' if (UseCapturedNS_ and self.NonExpeditedDestType_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sNonExpeditedDestType>%s</%sNonExpeditedDestType>%s' % (namespaceprefix_ , self.gds_format_integer(self.NonExpeditedDestType, input_name='NonExpeditedDestType'), namespaceprefix_ , eol_))
if self.EAD is not None:
namespaceprefix_ = self.EAD_nsprefix_ + ':' if (UseCapturedNS_ and self.EAD_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sEAD>%s</%sEAD>%s' % (namespaceprefix_ , self.gds_format_date(self.EAD, input_name='EAD'), namespaceprefix_ , eol_))
if self.COT is not None:
namespaceprefix_ = self.COT_nsprefix_ + ':' if (UseCapturedNS_ and self.COT_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCOT>%s</%sCOT>%s' % (namespaceprefix_ , self.gds_format_integer(self.COT, input_name='COT'), namespaceprefix_ , eol_))
if self.SvcStdMsg is not None:
namespaceprefix_ = self.SvcStdMsg_nsprefix_ + ':' if (UseCapturedNS_ and self.SvcStdMsg_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSvcStdMsg>%s</%sSvcStdMsg>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.SvcStdMsg), input_name='SvcStdMsg')), namespaceprefix_ , eol_))
if self.SvcStdDays is not None:
namespaceprefix_ = self.SvcStdDays_nsprefix_ + ':' if (UseCapturedNS_ and self.SvcStdDays_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSvcStdDays>%s</%sSvcStdDays>%s' % (namespaceprefix_ , self.gds_format_integer(self.SvcStdDays, input_name='SvcStdDays'), namespaceprefix_ , eol_))
if self.TotDaysDeliver is not None:
namespaceprefix_ = self.TotDaysDeliver_nsprefix_ + ':' if (UseCapturedNS_ and self.TotDaysDeliver_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sTotDaysDeliver>%s</%sTotDaysDeliver>%s' % (namespaceprefix_ , self.gds_format_integer(self.TotDaysDeliver, input_name='TotDaysDeliver'), namespaceprefix_ , eol_))
if self.SchedDlvryDate is not None:
namespaceprefix_ = self.SchedDlvryDate_nsprefix_ + ':' if (UseCapturedNS_ and self.SchedDlvryDate_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSchedDlvryDate>%s</%sSchedDlvryDate>%s' % (namespaceprefix_ , self.gds_format_date(self.SchedDlvryDate, input_name='SchedDlvryDate'), namespaceprefix_ , eol_))
if self.NonDlvryDays is not None:
namespaceprefix_ = self.NonDlvryDays_nsprefix_ + ':' if (UseCapturedNS_ and self.NonDlvryDays_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sNonDlvryDays>%s</%sNonDlvryDays>%s' % (namespaceprefix_ , self.gds_format_integer(self.NonDlvryDays, input_name='NonDlvryDays'), namespaceprefix_ , eol_))
if self.NonExpeditedExceptions is not None:
namespaceprefix_ = self.NonExpeditedExceptions_nsprefix_ + ':' if (UseCapturedNS_ and self.NonExpeditedExceptions_nsprefix_) else ''
self.NonExpeditedExceptions.export(outfile, level, namespaceprefix_, namespacedef_='', name_='NonExpeditedExceptions', pretty_print=pretty_print)
if self.HFPU is not None:
namespaceprefix_ = self.HFPU_nsprefix_ + ':' if (UseCapturedNS_ and self.HFPU_nsprefix_) else ''
self.HFPU.export(outfile, level, namespaceprefix_, namespacedef_='', name_='HFPU', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'MailClass' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'MailClass')
ival_ = self.gds_validate_integer(ival_, node, 'MailClass')
self.MailClass = ival_
self.MailClass_nsprefix_ = child_.prefix
elif nodeName_ == 'NonExpeditedDestType' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'NonExpeditedDestType')
ival_ = self.gds_validate_integer(ival_, node, 'NonExpeditedDestType')
self.NonExpeditedDestType = ival_
self.NonExpeditedDestType_nsprefix_ = child_.prefix
elif nodeName_ == 'EAD':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.EAD = dval_
self.EAD_nsprefix_ = child_.prefix
elif nodeName_ == 'COT' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'COT')
ival_ = self.gds_validate_integer(ival_, node, 'COT')
self.COT = ival_
self.COT_nsprefix_ = child_.prefix
elif nodeName_ == 'SvcStdMsg':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'SvcStdMsg')
value_ = self.gds_validate_string(value_, node, 'SvcStdMsg')
self.SvcStdMsg = value_
self.SvcStdMsg_nsprefix_ = child_.prefix
elif nodeName_ == 'SvcStdDays' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'SvcStdDays')
ival_ = self.gds_validate_integer(ival_, node, 'SvcStdDays')
self.SvcStdDays = ival_
self.SvcStdDays_nsprefix_ = child_.prefix
elif nodeName_ == 'TotDaysDeliver' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'TotDaysDeliver')
ival_ = self.gds_validate_integer(ival_, node, 'TotDaysDeliver')
self.TotDaysDeliver = ival_
self.TotDaysDeliver_nsprefix_ = child_.prefix
elif nodeName_ == 'SchedDlvryDate':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.SchedDlvryDate = dval_
self.SchedDlvryDate_nsprefix_ = child_.prefix
elif nodeName_ == 'NonDlvryDays' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'NonDlvryDays')
ival_ = self.gds_validate_integer(ival_, node, 'NonDlvryDays')
self.NonDlvryDays = ival_
self.NonDlvryDays_nsprefix_ = child_.prefix
elif nodeName_ == 'NonExpeditedExceptions':
obj_ = NonExpeditedExceptionsType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.NonExpeditedExceptions = obj_
obj_.original_tagname_ = 'NonExpeditedExceptions'
elif nodeName_ == 'HFPU':
obj_ = HFPUType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.HFPU = obj_
obj_.original_tagname_ = 'HFPU'
# end class NonExpeditedType
class NonExpeditedExceptionsType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, SunHol=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.SunHol = SunHol
self.SunHol_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, NonExpeditedExceptionsType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if NonExpeditedExceptionsType.subclass:
return NonExpeditedExceptionsType.subclass(*args_, **kwargs_)
else:
return NonExpeditedExceptionsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_SunHol(self):
return self.SunHol
def set_SunHol(self, SunHol):
self.SunHol = SunHol
def _hasContent(self):
if (
self.SunHol is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='NonExpeditedExceptionsType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('NonExpeditedExceptionsType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'NonExpeditedExceptionsType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='NonExpeditedExceptionsType')
if self._hasContent():
outfile.write('>%s' % (eol_, ))
self._exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='NonExpeditedExceptionsType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='NonExpeditedExceptionsType'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='NonExpeditedExceptionsType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.SunHol is not None:
namespaceprefix_ = self.SunHol_nsprefix_ + ':' if (UseCapturedNS_ and self.SunHol_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSunHol>%s</%sSunHol>%s' % (namespaceprefix_ , self.gds_format_integer(self.SunHol, input_name='SunHol'), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'SunHol' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'SunHol')
ival_ = self.gds_validate_integer(ival_, node, 'SunHol')
self.SunHol = ival_
self.SunHol_nsprefix_ = child_.prefix
# end class NonExpeditedExceptionsType
class HFPUType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, EAD=None, COT=None, ServiceStandard=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
if isinstance(EAD, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(EAD, '%Y-%m-%d').date()
else:
initvalue_ = EAD
self.EAD = initvalue_
self.EAD_nsprefix_ = None
self.COT = COT
self.COT_nsprefix_ = None
self.ServiceStandard = ServiceStandard
self.ServiceStandard_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, HFPUType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if HFPUType.subclass:
return HFPUType.subclass(*args_, **kwargs_)
else:
return HFPUType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_EAD(self):
return self.EAD
def set_EAD(self, EAD):
self.EAD = EAD
def get_COT(self):
return self.COT
def set_COT(self, COT):
self.COT = COT
def get_ServiceStandard(self):
return self.ServiceStandard
def set_ServiceStandard(self, ServiceStandard):
self.ServiceStandard = ServiceStandard
def _hasContent(self):
if (
self.EAD is not None or
self.COT is not None or
self.ServiceStandard is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='HFPUType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('HFPUType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'HFPUType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='HFPUType')
if self._hasContent():
outfile.write('>%s' % (eol_, ))
self._exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='HFPUType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='HFPUType'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='HFPUType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.EAD is not None:
namespaceprefix_ = self.EAD_nsprefix_ + ':' if (UseCapturedNS_ and self.EAD_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sEAD>%s</%sEAD>%s' % (namespaceprefix_ , self.gds_format_date(self.EAD, input_name='EAD'), namespaceprefix_ , eol_))
if self.COT is not None:
namespaceprefix_ = self.COT_nsprefix_ + ':' if (UseCapturedNS_ and self.COT_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCOT>%s</%sCOT>%s' % (namespaceprefix_ , self.gds_format_integer(self.COT, input_name='COT'), namespaceprefix_ , eol_))
if self.ServiceStandard is not None:
namespaceprefix_ = self.ServiceStandard_nsprefix_ + ':' if (UseCapturedNS_ and self.ServiceStandard_nsprefix_) else ''
self.ServiceStandard.export(outfile, level, namespaceprefix_, namespacedef_='', name_='ServiceStandard', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'EAD':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.EAD = dval_
self.EAD_nsprefix_ = child_.prefix
elif nodeName_ == 'COT' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'COT')
ival_ = self.gds_validate_integer(ival_, node, 'COT')
self.COT = ival_
self.COT_nsprefix_ = child_.prefix
elif nodeName_ == 'ServiceStandard':
obj_ = ServiceStandardType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.ServiceStandard = obj_
obj_.original_tagname_ = 'ServiceStandard'
# end class HFPUType
class ServiceStandardType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, SvcStdMsg=None, SvcStdDays=None, Location=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.SvcStdMsg = SvcStdMsg
self.SvcStdMsg_nsprefix_ = None
self.SvcStdDays = SvcStdDays
self.SvcStdDays_nsprefix_ = None
self.Location = Location
self.Location_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ServiceStandardType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ServiceStandardType.subclass:
return ServiceStandardType.subclass(*args_, **kwargs_)
else:
return ServiceStandardType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_SvcStdMsg(self):
return self.SvcStdMsg
def set_SvcStdMsg(self, SvcStdMsg):
self.SvcStdMsg = SvcStdMsg
def get_SvcStdDays(self):
return self.SvcStdDays
def set_SvcStdDays(self, SvcStdDays):
self.SvcStdDays = SvcStdDays
def get_Location(self):
return self.Location
def set_Location(self, Location):
self.Location = Location
def _hasContent(self):
if (
self.SvcStdMsg is not None or
self.SvcStdDays is not None or
self.Location is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ServiceStandardType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ServiceStandardType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ServiceStandardType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ServiceStandardType')
if self._hasContent():
outfile.write('>%s' % (eol_, ))
self._exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ServiceStandardType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ServiceStandardType'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ServiceStandardType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.SvcStdMsg is not None:
namespaceprefix_ = self.SvcStdMsg_nsprefix_ + ':' if (UseCapturedNS_ and self.SvcStdMsg_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSvcStdMsg>%s</%sSvcStdMsg>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.SvcStdMsg), input_name='SvcStdMsg')), namespaceprefix_ , eol_))
if self.SvcStdDays is not None:
namespaceprefix_ = self.SvcStdDays_nsprefix_ + ':' if (UseCapturedNS_ and self.SvcStdDays_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSvcStdDays>%s</%sSvcStdDays>%s' % (namespaceprefix_ , self.gds_format_integer(self.SvcStdDays, input_name='SvcStdDays'), namespaceprefix_ , eol_))
if self.Location is not None:
namespaceprefix_ = self.Location_nsprefix_ + ':' if (UseCapturedNS_ and self.Location_nsprefix_) else ''
self.Location.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Location', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'SvcStdMsg':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'SvcStdMsg')
value_ = self.gds_validate_string(value_, node, 'SvcStdMsg')
self.SvcStdMsg = value_
self.SvcStdMsg_nsprefix_ = child_.prefix
elif nodeName_ == 'SvcStdDays' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'SvcStdDays')
ival_ = self.gds_validate_integer(ival_, node, 'SvcStdDays')
self.SvcStdDays = ival_
self.SvcStdDays_nsprefix_ = child_.prefix
elif nodeName_ == 'Location':
obj_ = LocationType1.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Location = obj_
obj_.original_tagname_ = 'Location'
# end class ServiceStandardType
class LocationType1(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, TotDaysDeliver=None, SchedDlvryDate=None, NonDlvryDays=None, RAUName=None, Street=None, ZIP=None, CloseTimes=None, NonExpeditedExceptions=None, City=None, State=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.TotDaysDeliver = TotDaysDeliver
self.TotDaysDeliver_nsprefix_ = None
if isinstance(SchedDlvryDate, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(SchedDlvryDate, '%Y-%m-%d').date()
else:
initvalue_ = SchedDlvryDate
self.SchedDlvryDate = initvalue_
self.SchedDlvryDate_nsprefix_ = None
self.NonDlvryDays = NonDlvryDays
self.NonDlvryDays_nsprefix_ = None
self.RAUName = RAUName
self.RAUName_nsprefix_ = None
self.Street = Street
self.Street_nsprefix_ = None
self.ZIP = ZIP
self.ZIP_nsprefix_ = None
self.CloseTimes = CloseTimes
self.CloseTimes_nsprefix_ = None
self.NonExpeditedExceptions = NonExpeditedExceptions
self.NonExpeditedExceptions_nsprefix_ = None
self.City = City
self.City_nsprefix_ = None
self.State = State
self.State_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, LocationType1)
if subclass is not None:
return subclass(*args_, **kwargs_)
if LocationType1.subclass:
return LocationType1.subclass(*args_, **kwargs_)
else:
return LocationType1(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_TotDaysDeliver(self):
return self.TotDaysDeliver
def set_TotDaysDeliver(self, TotDaysDeliver):
self.TotDaysDeliver = TotDaysDeliver
def get_SchedDlvryDate(self):
return self.SchedDlvryDate
def set_SchedDlvryDate(self, SchedDlvryDate):
self.SchedDlvryDate = SchedDlvryDate
def get_NonDlvryDays(self):
return self.NonDlvryDays
def set_NonDlvryDays(self, NonDlvryDays):
self.NonDlvryDays = NonDlvryDays
def get_RAUName(self):
return self.RAUName
def set_RAUName(self, RAUName):
self.RAUName = RAUName
def get_Street(self):
return self.Street
def set_Street(self, Street):
self.Street = Street
def get_ZIP(self):
return self.ZIP
def set_ZIP(self, ZIP):
self.ZIP = ZIP
def get_CloseTimes(self):
return self.CloseTimes
def set_CloseTimes(self, CloseTimes):
self.CloseTimes = CloseTimes
def get_NonExpeditedExceptions(self):
return self.NonExpeditedExceptions
def set_NonExpeditedExceptions(self, NonExpeditedExceptions):
self.NonExpeditedExceptions = NonExpeditedExceptions
def get_City(self):
return self.City
def set_City(self, City):
self.City = City
def get_State(self):
return self.State
def set_State(self, State):
self.State = State
def _hasContent(self):
if (
self.TotDaysDeliver is not None or
self.SchedDlvryDate is not None or
self.NonDlvryDays is not None or
self.RAUName is not None or
self.Street is not None or
self.ZIP is not None or
self.CloseTimes is not None or
self.NonExpeditedExceptions is not None or
self.City is not None or
self.State is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='LocationType1', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('LocationType1')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'LocationType1':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='LocationType1')
if self._hasContent():
outfile.write('>%s' % (eol_, ))
self._exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='LocationType1', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='LocationType1'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='LocationType1', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.TotDaysDeliver is not None:
namespaceprefix_ = self.TotDaysDeliver_nsprefix_ + ':' if (UseCapturedNS_ and self.TotDaysDeliver_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sTotDaysDeliver>%s</%sTotDaysDeliver>%s' % (namespaceprefix_ , self.gds_format_integer(self.TotDaysDeliver, input_name='TotDaysDeliver'), namespaceprefix_ , eol_))
if self.SchedDlvryDate is not None:
namespaceprefix_ = self.SchedDlvryDate_nsprefix_ + ':' if (UseCapturedNS_ and self.SchedDlvryDate_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSchedDlvryDate>%s</%sSchedDlvryDate>%s' % (namespaceprefix_ , self.gds_format_date(self.SchedDlvryDate, input_name='SchedDlvryDate'), namespaceprefix_ , eol_))
if self.NonDlvryDays is not None:
namespaceprefix_ = self.NonDlvryDays_nsprefix_ + ':' if (UseCapturedNS_ and self.NonDlvryDays_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sNonDlvryDays>%s</%sNonDlvryDays>%s' % (namespaceprefix_ , self.gds_format_integer(self.NonDlvryDays, input_name='NonDlvryDays'), namespaceprefix_ , eol_))
if self.RAUName is not None:
namespaceprefix_ = self.RAUName_nsprefix_ + ':' if (UseCapturedNS_ and self.RAUName_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sRAUName>%s</%sRAUName>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.RAUName), input_name='RAUName')), namespaceprefix_ , eol_))
if self.Street is not None:
namespaceprefix_ = self.Street_nsprefix_ + ':' if (UseCapturedNS_ and self.Street_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sStreet>%s</%sStreet>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Street), input_name='Street')), namespaceprefix_ , eol_))
if self.ZIP is not None:
namespaceprefix_ = self.ZIP_nsprefix_ + ':' if (UseCapturedNS_ and self.ZIP_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sZIP>%s</%sZIP>%s' % (namespaceprefix_ , self.gds_format_integer(self.ZIP, input_name='ZIP'), namespaceprefix_ , eol_))
if self.CloseTimes is not None:
namespaceprefix_ = self.CloseTimes_nsprefix_ + ':' if (UseCapturedNS_ and self.CloseTimes_nsprefix_) else ''
self.CloseTimes.export(outfile, level, namespaceprefix_, namespacedef_='', name_='CloseTimes', pretty_print=pretty_print)
if self.NonExpeditedExceptions is not None:
namespaceprefix_ = self.NonExpeditedExceptions_nsprefix_ + ':' if (UseCapturedNS_ and self.NonExpeditedExceptions_nsprefix_) else ''
self.NonExpeditedExceptions.export(outfile, level, namespaceprefix_, namespacedef_='', name_='NonExpeditedExceptions', pretty_print=pretty_print)
if self.City is not None:
namespaceprefix_ = self.City_nsprefix_ + ':' if (UseCapturedNS_ and self.City_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCity>%s</%sCity>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.City), input_name='City')), namespaceprefix_ , eol_))
if self.State is not None:
namespaceprefix_ = self.State_nsprefix_ + ':' if (UseCapturedNS_ and self.State_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sState>%s</%sState>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.State), input_name='State')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'TotDaysDeliver' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'TotDaysDeliver')
ival_ = self.gds_validate_integer(ival_, node, 'TotDaysDeliver')
self.TotDaysDeliver = ival_
self.TotDaysDeliver_nsprefix_ = child_.prefix
elif nodeName_ == 'SchedDlvryDate':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.SchedDlvryDate = dval_
self.SchedDlvryDate_nsprefix_ = child_.prefix
elif nodeName_ == 'NonDlvryDays' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'NonDlvryDays')
ival_ = self.gds_validate_integer(ival_, node, 'NonDlvryDays')
self.NonDlvryDays = ival_
self.NonDlvryDays_nsprefix_ = child_.prefix
elif nodeName_ == 'RAUName':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'RAUName')
value_ = self.gds_validate_string(value_, node, 'RAUName')
self.RAUName = value_
self.RAUName_nsprefix_ = child_.prefix
elif nodeName_ == 'Street':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Street')
value_ = self.gds_validate_string(value_, node, 'Street')
self.Street = value_
self.Street_nsprefix_ = child_.prefix
elif nodeName_ == 'ZIP' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'ZIP')
ival_ = self.gds_validate_integer(ival_, node, 'ZIP')
self.ZIP = ival_
self.ZIP_nsprefix_ = child_.prefix
elif nodeName_ == 'CloseTimes':
obj_ = CloseTimesType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.CloseTimes = obj_
obj_.original_tagname_ = 'CloseTimes'
elif nodeName_ == 'NonExpeditedExceptions':
obj_ = NonExpeditedExceptionsType2.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.NonExpeditedExceptions = obj_
obj_.original_tagname_ = 'NonExpeditedExceptions'
elif nodeName_ == 'City':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'City')
value_ = self.gds_validate_string(value_, node, 'City')
self.City = value_
self.City_nsprefix_ = child_.prefix
elif nodeName_ == 'State':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'State')
value_ = self.gds_validate_string(value_, node, 'State')
self.State = value_
self.State_nsprefix_ = child_.prefix
# end class LocationType1
class CloseTimesType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, M=None, Tu=None, W=None, Th=None, F=None, Sa=None, Su=None, H=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.M = M
self.M_nsprefix_ = None
self.Tu = Tu
self.Tu_nsprefix_ = None
self.W = W
self.W_nsprefix_ = None
self.Th = Th
self.Th_nsprefix_ = None
self.F = F
self.F_nsprefix_ = None
self.Sa = Sa
self.Sa_nsprefix_ = None
self.Su = Su
self.Su_nsprefix_ = None
self.H = H
self.H_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CloseTimesType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CloseTimesType.subclass:
return CloseTimesType.subclass(*args_, **kwargs_)
else:
return CloseTimesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_M(self):
return self.M
def set_M(self, M):
self.M = M
def get_Tu(self):
return self.Tu
def set_Tu(self, Tu):
self.Tu = Tu
def get_W(self):
return self.W
def set_W(self, W):
self.W = W
def get_Th(self):
return self.Th
def set_Th(self, Th):
self.Th = Th
def get_F(self):
return self.F
def set_F(self, F):
self.F = F
def get_Sa(self):
return self.Sa
def set_Sa(self, Sa):
self.Sa = Sa
def get_Su(self):
return self.Su
def set_Su(self, Su):
self.Su = Su
def get_H(self):
return self.H
def set_H(self, H):
self.H = H
def _hasContent(self):
if (
self.M is not None or
self.Tu is not None or
self.W is not None or
self.Th is not None or
self.F is not None or
self.Sa is not None or
self.Su is not None or
self.H is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='CloseTimesType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CloseTimesType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'CloseTimesType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='CloseTimesType')
if self._hasContent():
outfile.write('>%s' % (eol_, ))
self._exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='CloseTimesType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='CloseTimesType'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='CloseTimesType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.M is not None:
namespaceprefix_ = self.M_nsprefix_ + ':' if (UseCapturedNS_ and self.M_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sM>%s</%sM>%s' % (namespaceprefix_ , self.gds_format_integer(self.M, input_name='M'), namespaceprefix_ , eol_))
if self.Tu is not None:
namespaceprefix_ = self.Tu_nsprefix_ + ':' if (UseCapturedNS_ and self.Tu_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sTu>%s</%sTu>%s' % (namespaceprefix_ , self.gds_format_integer(self.Tu, input_name='Tu'), namespaceprefix_ , eol_))
if self.W is not None:
namespaceprefix_ = self.W_nsprefix_ + ':' if (UseCapturedNS_ and self.W_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sW>%s</%sW>%s' % (namespaceprefix_ , self.gds_format_integer(self.W, input_name='W'), namespaceprefix_ , eol_))
if self.Th is not None:
namespaceprefix_ = self.Th_nsprefix_ + ':' if (UseCapturedNS_ and self.Th_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sTh>%s</%sTh>%s' % (namespaceprefix_ , self.gds_format_integer(self.Th, input_name='Th'), namespaceprefix_ , eol_))
if self.F is not None:
namespaceprefix_ = self.F_nsprefix_ + ':' if (UseCapturedNS_ and self.F_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sF>%s</%sF>%s' % (namespaceprefix_ , self.gds_format_integer(self.F, input_name='F'), namespaceprefix_ , eol_))
if self.Sa is not None:
namespaceprefix_ = self.Sa_nsprefix_ + ':' if (UseCapturedNS_ and self.Sa_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSa>%s</%sSa>%s' % (namespaceprefix_ , self.gds_format_integer(self.Sa, input_name='Sa'), namespaceprefix_ , eol_))
if self.Su is not None:
namespaceprefix_ = self.Su_nsprefix_ + ':' if (UseCapturedNS_ and self.Su_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSu>%s</%sSu>%s' % (namespaceprefix_ , self.gds_format_integer(self.Su, input_name='Su'), namespaceprefix_ , eol_))
if self.H is not None:
namespaceprefix_ = self.H_nsprefix_ + ':' if (UseCapturedNS_ and self.H_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sH>%s</%sH>%s' % (namespaceprefix_ , self.gds_format_integer(self.H, input_name='H'), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'M' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'M')
ival_ = self.gds_validate_integer(ival_, node, 'M')
self.M = ival_
self.M_nsprefix_ = child_.prefix
elif nodeName_ == 'Tu' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'Tu')
ival_ = self.gds_validate_integer(ival_, node, 'Tu')
self.Tu = ival_
self.Tu_nsprefix_ = child_.prefix
elif nodeName_ == 'W' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'W')
ival_ = self.gds_validate_integer(ival_, node, 'W')
self.W = ival_
self.W_nsprefix_ = child_.prefix
elif nodeName_ == 'Th' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'Th')
ival_ = self.gds_validate_integer(ival_, node, 'Th')
self.Th = ival_
self.Th_nsprefix_ = child_.prefix
elif nodeName_ == 'F' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'F')
ival_ = self.gds_validate_integer(ival_, node, 'F')
self.F = ival_
self.F_nsprefix_ = child_.prefix
elif nodeName_ == 'Sa' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'Sa')
ival_ = self.gds_validate_integer(ival_, node, 'Sa')
self.Sa = ival_
self.Sa_nsprefix_ = child_.prefix
elif nodeName_ == 'Su' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'Su')
ival_ = self.gds_validate_integer(ival_, node, 'Su')
self.Su = ival_
self.Su_nsprefix_ = child_.prefix
elif nodeName_ == 'H' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'H')
ival_ = self.gds_validate_integer(ival_, node, 'H')
self.H = ival_
self.H_nsprefix_ = child_.prefix
# end class CloseTimesType
class NonExpeditedExceptionsType2(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, SunHol=None, Closed=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.SunHol = SunHol
self.SunHol_nsprefix_ = None
self.Closed = Closed
self.Closed_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, NonExpeditedExceptionsType2)
if subclass is not None:
return subclass(*args_, **kwargs_)
if NonExpeditedExceptionsType2.subclass:
return NonExpeditedExceptionsType2.subclass(*args_, **kwargs_)
else:
return NonExpeditedExceptionsType2(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_SunHol(self):
return self.SunHol
def set_SunHol(self, SunHol):
self.SunHol = SunHol
def get_Closed(self):
return self.Closed
def set_Closed(self, Closed):
self.Closed = Closed
def _hasContent(self):
if (
self.SunHol is not None or
self.Closed is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='NonExpeditedExceptionsType2', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('NonExpeditedExceptionsType2')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'NonExpeditedExceptionsType2':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='NonExpeditedExceptionsType2')
if self._hasContent():
outfile.write('>%s' % (eol_, ))
self._exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='NonExpeditedExceptionsType2', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='NonExpeditedExceptionsType2'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='NonExpeditedExceptionsType2', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.SunHol is not None:
namespaceprefix_ = self.SunHol_nsprefix_ + ':' if (UseCapturedNS_ and self.SunHol_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSunHol>%s</%sSunHol>%s' % (namespaceprefix_ , self.gds_format_integer(self.SunHol, input_name='SunHol'), namespaceprefix_ , eol_))
if self.Closed is not None:
namespaceprefix_ = self.Closed_nsprefix_ + ':' if (UseCapturedNS_ and self.Closed_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sClosed>%s</%sClosed>%s' % (namespaceprefix_ , self.gds_format_integer(self.Closed, input_name='Closed'), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'SunHol' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'SunHol')
ival_ = self.gds_validate_integer(ival_, node, 'SunHol')
self.SunHol = ival_
self.SunHol_nsprefix_ = child_.prefix
elif nodeName_ == 'Closed' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'Closed')
ival_ = self.gds_validate_integer(ival_, node, 'Closed')
self.Closed = ival_
self.Closed_nsprefix_ = child_.prefix
# end class NonExpeditedExceptionsType2
GDSClassesMapping = {
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
prefix_tag = TagNamePrefix + tag
rootClass = GDSClassesMapping.get(prefix_tag)
if rootClass is None:
rootClass = globals().get(prefix_tag)
return tag, rootClass
def get_required_ns_prefix_defs(rootNode):
'''Get all name space prefix definitions required in this XML doc.
Return a dictionary of definitions and a char string of definitions.
'''
nsmap = {
prefix: uri
for node in rootNode.iter()
for (prefix, uri) in node.nsmap.items()
if prefix is not None
}
namespacedefs = ' '.join([
'xmlns:{}="{}"'.format(prefix, uri)
for prefix, uri in nsmap.items()
])
return nsmap, namespacedefs
def parse(inFileName, silence=False, print_warnings=True):
global CapturedNsmap_
gds_collector = GdsCollector_()
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'SDCGetLocationsResponse'
rootClass = SDCGetLocationsResponse
rootObj = rootClass.factory()
rootObj.build(rootNode, gds_collector_=gds_collector)
CapturedNsmap_, namespacedefs = get_required_ns_prefix_defs(rootNode)
if not SaveElementTreeNode:
doc = None
rootNode = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_=namespacedefs,
pretty_print=True)
if print_warnings and len(gds_collector.get_messages()) > 0:
separator = ('-' * 50) + '\n'
sys.stderr.write(separator)
sys.stderr.write('----- Warnings -- count: {} -----\n'.format(
len(gds_collector.get_messages()), ))
gds_collector.write_messages(sys.stderr)
sys.stderr.write(separator)
return rootObj
def parseEtree(inFileName, silence=False, print_warnings=True,
mapping=None, reverse_mapping=None, nsmap=None):
parser = None
doc = parsexml_(inFileName, parser)
gds_collector = GdsCollector_()
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'SDCGetLocationsResponse'
rootClass = SDCGetLocationsResponse
rootObj = rootClass.factory()
rootObj.build(rootNode, gds_collector_=gds_collector)
if mapping is None:
mapping = {}
if reverse_mapping is None:
reverse_mapping = {}
rootElement = rootObj.to_etree(
None, name_=rootTag, mapping_=mapping,
reverse_mapping_=reverse_mapping, nsmap_=nsmap)
reverse_node_mapping = rootObj.gds_reverse_node_mapping(mapping)
# Enable Python to collect the space used by the DOM.
if not SaveElementTreeNode:
doc = None
rootNode = None
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(str(content))
sys.stdout.write('\n')
if print_warnings and len(gds_collector.get_messages()) > 0:
separator = ('-' * 50) + '\n'
sys.stderr.write(separator)
sys.stderr.write('----- Warnings -- count: {} -----\n'.format(
len(gds_collector.get_messages()), ))
gds_collector.write_messages(sys.stderr)
sys.stderr.write(separator)
return rootObj, rootElement, mapping, reverse_node_mapping
def parseString(inString, silence=False, print_warnings=True):
'''Parse a string, create the object tree, and export it.
Arguments:
- inString -- A string. This XML fragment should not start
with an XML declaration containing an encoding.
- silence -- A boolean. If False, export the object.
Returns -- The root object in the tree.
'''
parser = None
rootNode= parsexmlstring_(inString, parser)
gds_collector = GdsCollector_()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'SDCGetLocationsResponse'
rootClass = SDCGetLocationsResponse
rootObj = rootClass.factory()
rootObj.build(rootNode, gds_collector_=gds_collector)
if not SaveElementTreeNode:
rootNode = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
if print_warnings and len(gds_collector.get_messages()) > 0:
separator = ('-' * 50) + '\n'
sys.stderr.write(separator)
sys.stderr.write('----- Warnings -- count: {} -----\n'.format(
len(gds_collector.get_messages()), ))
gds_collector.write_messages(sys.stderr)
sys.stderr.write(separator)
return rootObj
def parseLiteral(inFileName, silence=False, print_warnings=True):
parser = None
doc = parsexml_(inFileName, parser)
gds_collector = GdsCollector_()
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'SDCGetLocationsResponse'
rootClass = SDCGetLocationsResponse
rootObj = rootClass.factory()
rootObj.build(rootNode, gds_collector_=gds_collector)
# Enable Python to collect the space used by the DOM.
if not SaveElementTreeNode:
doc = None
rootNode = None
if not silence:
sys.stdout.write('#from sdc_get_locations_response import *\n\n')
sys.stdout.write('import sdc_get_locations_response as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
if print_warnings and len(gds_collector.get_messages()) > 0:
separator = ('-' * 50) + '\n'
sys.stderr.write(separator)
sys.stderr.write('----- Warnings -- count: {} -----\n'.format(
len(gds_collector.get_messages()), ))
gds_collector.write_messages(sys.stderr)
sys.stderr.write(separator)
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
RenameMappings_ = {
}
#
# Mapping of namespaces to types defined in them
# and the file in which each is defined.
# simpleTypes are marked "ST" and complexTypes "CT".
NamespaceToDefMappings_ = {}
__all__ = [
"CloseTimesType",
"CommitmentType",
"ExpeditedType",
"HFPUType",
"LocationType",
"LocationType1",
"NonExpeditedExceptionsType",
"NonExpeditedExceptionsType2",
"NonExpeditedType",
"SDCGetLocationsResponse",
"ServiceStandardType"
]
| UTF-8 | Python | false | false | 145,896 | py | 1,312 | sdc_get_locations_response.py | 1,108 | 0.590057 | 0.586712 | 0 | 3,206 | 44.507174 | 252 |
DLLCODERA/linearregression | 7,370,163,916,146 | bf63846b403631099b773cc210f1a90ee3cffa8e | 7c732c3fa6677394c85ac578ff3a6ccbc520de80 | /dll_LR_multivariant.py | 42e2271f1ebd29b80557d979504ef64aaa45c7ea | []
| no_license | https://github.com/DLLCODERA/linearregression | 5c6b1069586ae8f6e474e41aa7062c7adaa8da97 | f7a6a441b46527fa48c0cd8c3b5445c368796aee | refs/heads/master | 2023-08-29T20:59:26.263333 | 2021-11-02T15:18:10 | 2021-11-02T15:18:10 | 423,894,228 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import seaborn as sns
def incoming_parameters():
data = pd.read_csv("./text1", names=["area", "num_bedroom", "price"])
x = data[["area", "num_bedroom"]].values
y = data[["price"]].values
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=0)
data_dir = {"x_train": x_train, "y_train": y_train, "x_test": x_test, "y_test": y_test}
tup = (data_dir, data)
return tup
def train_model(x, y):
regression = LinearRegression()
regression.fit(x, y)
y_prediction = regression.predict(x)
m_s_e = mean_squared_error(y, y_prediction)
r_2 = r2_score(y, y_prediction)
coefficient = regression.coef_
intercept = regression.intercept_
print("coefficient:", end="")
print(coefficient)
print("intercept:", end="")
print(intercept)
print("")
print("均方差:", end="")
print(m_s_e)
print("相关系数:", end="")
print(r_2)
print("")
print(y_prediction)
return y_prediction
def configure(data, data_all):
features_train = data["x_train"]
label_train = data["y_train"]
features_test = data["x_test"]
label_test = data["y_test"]
print("=" * 60)
print("parameters of train")
pred_train = train_model(features_train, label_train)
print("=" * 60)
print("parameters of test")
pred_test = train_model(features_test, label_test)
print("=" * 60)
visualization(features_train,
label_train,
features_test,
label_test,
data_all,
pred_train,
pred_test
)
def visualization(x_train, y_train, x_test, y_test, data, y_pred, y_pred_t):
fig_1 = plt.figure(figsize=(10, 10), dpi=80)
ax1 = fig_1.add_subplot(221, projection="3d")
ax2 = fig_1.add_subplot(222, projection="3d")
ax1.scatter(x_train[:, :1], x_train[:, 1:], y_train, color="r", marker="x", alpha=0.5)
ax1.legend(labels=["Test data"], loc="lower right")
ax1.set_title("train diagram")
ax1.set_xlabel("area")
ax1.set_ylabel("num_bedroom")
ax1.set_zlabel("price")
ax2.scatter(x_test[:, :1], x_test[:, 1:], y_test, color="g", marker="x", alpha=0.5)
ax2.legend(labels=["Test data"], loc="lower right")
ax2.set_title("test diagram")
ax2.set_xlabel("area")
ax2.set_ylabel("num_bedroom")
ax2.set_zlabel("price")
sns.pairplot(data, x_vars=["area", "num_bedroom"], y_vars=["price"], size=6, aspect=0.8, kind="reg")
ax3 = fig_1.add_subplot(223)
ax4 = fig_1.add_subplot(224)
ax3.plot(range(len(y_pred)), y_pred, 'b', label="predict")
ax4.plot(range(len(y_pred_t)), y_pred_t, 'r', label="test")
ax3.set_title("predict the price,train")
ax3.legend(loc="upper right")
ax3.set_xlabel("number")
ax3.set_ylabel("price")
ax4.set_title("predict the price,test")
ax4.legend(loc="upper right")
ax4.set_xlabel("number")
ax4.set_ylabel("price")
plt.show()
if __name__ == "__main__":
plt.rcParams["axes.unicode_minus"] = False
date = incoming_parameters()[0]
date_all = incoming_parameters()[1]
configure(date, date_all)
| UTF-8 | Python | false | false | 3,436 | py | 7 | dll_LR_multivariant.py | 7 | 0.605617 | 0.582504 | 0 | 120 | 27.483333 | 104 |
Kimuda/Phillip_Python | 11,836,929,905,341 | 096b78f7c0b41f852d13f89fc6ecb6fa0b406081 | 3f100a1002a1f8ed453c8b81a9b403444d77b4c6 | /conditionals_if/conditionals_4.py | 0b8a4344a1bb31e18aefcd7ccd9abde449024138 | []
| no_license | https://github.com/Kimuda/Phillip_Python | c19c85a43c5a13760239e4e94c08436c99787ebf | 59d56a0d45839656eb15dbe288bdb0d18cb7df2b | refs/heads/master | 2016-09-09T22:19:02.347744 | 2015-05-01T10:56:49 | 2015-05-01T10:56:49 | 32,330,951 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | first_number=int(input("give the first number "))
second_number=int(input("give the second number "))
if second_number!=0:
print(first_number/second_number)
else:
print("You can not divide a number by zero numnuts")
| UTF-8 | Python | false | false | 224 | py | 149 | conditionals_4.py | 146 | 0.727679 | 0.723214 | 0 | 6 | 36.333333 | 56 |
safu9/elcats | 9,354,438,805,184 | 680ea1f95908661865235a74cf3214b1cc26b66f | 7a8d817d64c7c7c84f82bd31649dfb7373c33fbd | /home/migrations/0001_initial.py | 1073d7423794e7c05dcdf6b5649e62a4c53cb4e5 | []
| no_license | https://github.com/safu9/elcats | 84216e2df2c7ac174af031189609214f3067efcd | 769667d8937483af1cf9b33edaa671ca7e2fef90 | refs/heads/master | 2021-06-20T19:09:53.051365 | 2019-04-29T12:55:01 | 2019-04-29T12:55:01 | 193,738,319 | 0 | 0 | null | false | 2021-03-19T01:39:09 | 2019-06-25T15:48:57 | 2019-06-25T15:49:40 | 2021-03-19T01:39:07 | 166 | 0 | 0 | 2 | Python | false | false | # Generated by Django 2.1 on 2018-12-05 07:10
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='名前')),
('slug', models.SlugField(verbose_name='スラッグ')),
('description', models.TextField(blank=True, max_length=50, verbose_name='説明')),
('is_private', models.BooleanField(default=False, verbose_name='非公開')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='作成日時')),
('members', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='メンバー')),
],
options={
'verbose_name': 'プロジェクト',
'verbose_name_plural': 'プロジェクト',
},
),
]
| UTF-8 | Python | false | false | 1,240 | py | 91 | 0001_initial.py | 48 | 0.578098 | 0.562818 | 0 | 32 | 35.8125 | 114 |
dmlnk/weather_app | 15,633,680,962,376 | 962c2c963ad1ed6e461368ab3b0799f63c756309 | a4a8a32d8cfab17a37d5d8b6f93c940f3a03794f | /views/MenuView.py | 3cd3edc3bb2dd1fed7be95406136594c7429eb3c | []
| no_license | https://github.com/dmlnk/weather_app | 91838c3622d6c3752ac729ff7e96e092ba8a8a72 | dae4b256606737dbe34d0a14c654646fe5d214a3 | refs/heads/main | 2023-02-17T22:37:46.394454 | 2021-01-19T10:27:29 | 2021-01-19T10:27:29 | 330,938,266 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tkinter import *
from tkinter import ttk
class MenuView:
def __init__(self, master, controller):
self.controller = controller
self.window_fill_color = 'white'
self.font_color = '#707070'
self.font_size = 12
self.btn_font_size = 10
self.btn_width = 12
self.font_style = "Segoe UI"
self.frame_border_color = '#A1A0A0'
self.frame_fill_color = '#F2F2F2'
self.search_bar_frame = Frame(master, background=self.frame_fill_color,
highlightbackground=self.frame_border_color,
highlightthickness=2)
self.search_bar_frame.grid(row=0, column=0, columnspan=3, padx=10, pady=4, ipadx=5, ipady=5, sticky="ew")
self.search_bar_frame.columnconfigure([0, 1, 2], minsize=1, weight=1)
self.search_bar_frame.rowconfigure([0, 1, 2], minsize=1, weight=1)
self.input_label = Label(self.search_bar_frame,
text="Type a city name or choose from recent:",
background='#F2F2F2',
anchor="w",
fg=self.font_color,
font=(self.font_style, self.font_size, "bold"))
self.input_label.grid(row=0, column=0, columnspan=3, padx=12, pady=2,sticky="ew")
self.input_cbox = ttk.Combobox(self.search_bar_frame, width=40)
self.input_cbox.grid(row=1, column=0, columnspan=3, padx=15, ipady=5, sticky="ew")
self.map_btn = Button(master,
text="Open map",
font=(self.font_style, self.btn_font_size, "bold"),
width=self.btn_width,
background=self.window_fill_color,
foreground=self.font_color,
relief=RIDGE,
command=self.controller.show_map)
self.map_btn.grid(row=1, column=0, ipady=5)
self.air_btn = Button(master,
text="Air pollution",
font=(self.font_style, self.btn_font_size, "bold"),
width=self.btn_width,
background=self.window_fill_color,
foreground=self.font_color,
relief=RIDGE,
command=self.controller.show_pollution)
self.air_btn.grid(row=1, column=1, ipady=5)
self.forecast_btn = Button(master,
text="Forecast",
font=(self.font_style, self.btn_font_size, "bold"),
width=self.btn_width,
background=self.window_fill_color,
foreground=self.font_color,
relief=RIDGE,
command=controller.show_forecast)
self.forecast_btn.grid(row=1, column=2, ipady=5)
self.statusbar = Label(master, bd=1, font=(self.font_style, 10, "bold"),
text="Ready", relief=SUNKEN, anchor=W,
background=self.window_fill_color)
self.statusbar.grid(row=2, column=0, columnspan=3, sticky="ew", ipady=2)
| UTF-8 | Python | false | false | 3,419 | py | 14 | MenuView.py | 12 | 0.486984 | 0.466511 | 0 | 72 | 46.472222 | 113 |
JavierPacheco1601/StructuredPrograming2A | 2,637,109,940,231 | 213f49c7c369027e73bb54f995a009565156811d | 416e9a9af0eb9b4073968e6277e7bc3549b8bf5f | /unit2/project/Variables_2.py | 5e92487b7d048b1a0ec55a843c92b33084c16108 | []
| no_license | https://github.com/JavierPacheco1601/StructuredPrograming2A | cb25b0207bb420b1ad572de56e40cf71c42f9d0d | 11bd0a7466c244146d3373650f47e6515aef3f5d | refs/heads/master | 2023-06-29T20:39:21.610398 | 2021-08-04T23:55:40 | 2021-08-04T23:55:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
Adition= 0
Substraction= 0
Multiplication= 0
Division= 0
Number1= int(sys.argv[1])
Number2= int(sys.argv[2])
if __name__ == "__main__":
Adition= Number1+Number2
print(f'The adition is: {Adition}')
Substraction= Number1-Number2
print(f'The substraction is: {Substraction}')
Multiplication= Number1*Number2
print(f'The multiplication is: {Multiplication}')
Division= Number1/Number2
print(f'The division is: {Division}') | UTF-8 | Python | false | false | 468 | py | 16 | Variables_2.py | 12 | 0.690171 | 0.655983 | 0 | 23 | 19.391304 | 53 |
liangriyu/pytemplate | 16,131,897,209,561 | 620a57b6829f6a14033cf17b419d752f01d02066 | 062c7e6a25889ad4f721fbb25cf22124353c98f2 | /hdcloud/utils/dateutil.py | 1d3b246d7457b386114492d49b4794a78d884ee5 | []
| no_license | https://github.com/liangriyu/pytemplate | 1f79dde298b151be41169de6e2fcb11a28a028c0 | 586906519c7b7a0aee60dbdc32ac628dad1546fc | refs/heads/master | 2022-12-14T05:32:32.126494 | 2020-08-04T09:35:41 | 2020-08-04T09:35:41 | 202,729,382 | 1 | 0 | null | false | 2022-12-08T06:16:50 | 2019-08-16T13:08:32 | 2022-03-04T05:40:15 | 2022-12-08T06:16:50 | 58 | 1 | 0 | 8 | Python | false | false | # -*- coding: utf-8 -*-
# @Time : 2019/8/24 10:47
# @Author : liangriyu
#时间格式
from datetime import datetime,timedelta
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
DATETIME_FORMAT2 = "%Y%m%d%H%M%S"
DATETIME_FORMAT_D = "%Y-%m-%d"
DATETIME_FORMAT_D2 = "%Y%m%d"
DATETIME_FORMAT_H = "%Y-%m-%d %H"
DATETIME_FORMAT_H2 = "%Y%m%d%H"
DATETIME_FORMAT_M = "%Y-%m-%d %H:%M"
DATETIME_FORMAT_M2 = "%Y%m%d%H%M%S"
DATETIME_FORMAT_MS = "%Y-%m-%d %H:%M:%S.%f"
DATETIME_FORMAT_MS2 = "%Y%m%d%H%M%S%f"
def format2dtime(str_dtime,format):
return datetime.strptime(str_dtime,format)
def format2str(dtime,format):
return dtime.strftime(format)
def add_timedelta(dtime,days=0,hours=0,minutes=0,seconds=0):
"""
日期时间偏移
:param dtime: 日期时间
:param days: 偏移天数
:param hours: 偏移小时数
:param minutes: 偏移分钟数
:param seconds: 偏移秒数
:return:
"""
if not isinstance(dtime, datetime):
dtime = format2dtime(dtime,format)
dtime = dtime + timedelta(days=days)
dtime = dtime + timedelta(hours=hours)
dtime = dtime + timedelta(minutes=minutes)
dtime = dtime + timedelta(seconds=seconds)
return dtime
def vaild_date(date):
try:
if ":" in date:
datetime.strptime(date,DATETIME_FORMAT)
else:
datetime.strptime(date, DATETIME_FORMAT_D)
return 1
except Exception as e:
print(e)
return 0
| UTF-8 | Python | false | false | 1,448 | py | 25 | dateutil.py | 20 | 0.621387 | 0.602601 | 0 | 52 | 25.596154 | 60 |
sui84/tools | 12,180,527,274,029 | 9023dd521cc07ca031d045f5b35b9c7defd25d40 | 25985aeeee54373d26a164e4cc6a014770e3ebf3 | /windows/w3af/w3af/core/data/searchEngines/.svn/text-base/bing.py.svn-base | cd4f3be8d2104c4d0e010ae51315fde9c0d55284 | []
| no_license | https://github.com/sui84/tools | 4b750dae90940fbe3a226cba72dc071d8fb88b7c | 651cc08eb50199ce1044c684dbf714ea26df6432 | refs/heads/master | 2021-01-22T19:22:26.964580 | 2017-08-20T15:23:38 | 2017-08-20T15:23:38 | 100,774,276 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
bing.py
Copyright 2006 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
import urllib
import re
import core.controllers.outputManager as om
from core.data.searchEngines.searchEngine import searchEngine as searchEngine
from core.data.parsers.urlParser import url_object
class bing(searchEngine):
'''
This class is a wrapper for doing bing searches. It allows the user to use pymsn or simply do GET requests
to bing.com.
@author: Andres Riancho ( andres.riancho@gmail.com )
'''
def __init__(self, urlOpener):
searchEngine.__init__(self)
self._urlOpener = urlOpener
def search(self, query, start, count=10):
res = self._metSearch(query, start)
om.out.debug('Bing search for: ' + query + ' returned ' + str(len(res)) + ' results.')
return res
def _metSearch(self, query, start=0):
'''
Search the web with Bing.
This method is based from the msn.py file from the massive enumeration toolset,
coded by pdp and released under GPL v2.
'''
class bingResult:
'''
Dummy class that represents the search result.
'''
def __init__( self, url ):
if not isinstance(url, url_object):
msg = 'The url __init__ parameter of a bingResult object must'
msg += ' be of urlParser.url_object type.'
raise ValueError( msg )
self.URL = url
url = 'http://www.bing.com/search?'
_query = urllib.urlencode({'q':query, 'first':start+1, 'FORM':'PERE'})
url_instance = url_object(url+_query)
response = self._urlOpener.GET( url_instance, headers=self._headers,
useCache=True, grepResult=False)
results = []
# This regex MAY become outdated
urls = re.findall('<h3><a href="(.*?)" onmousedown', response.getBody())
if len(urls) == 11:
urls = urls[:-1]
for url in urls:
if 'www.bing.com' not in url:
results.append(bingResult( url_object(url) ))
return results
| UTF-8 | Python | false | false | 2,767 | 234 | bing.py.svn-base | 205 | 0.633177 | 0.622335 | 0 | 83 | 32.337349 | 110 |
|
mxer/Lyric_ASR | 10,256,381,938,406 | 384f43c26abc331d16db68b100ded8e4a23bb586 | 1a165b84fdbca7a6377e0bad36070dcaf951c8b8 | /progress/check_progress.py | 0b9b7aeb8bc22001936e9070874573b9f0bc7b25 | []
| no_license | https://github.com/mxer/Lyric_ASR | 1464b8fa741598e93f7c5220ed273e115517bf8b | a841a07ba6e4a7c55c8249231eeae4136827b866 | refs/heads/master | 2021-05-17T08:13:36.856940 | 2019-07-17T06:02:54 | 2019-07-17T06:02:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import sys,os
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("logDir", help = 'log directory of nnet2')
parser.add_argument("firstIter", help = 'first iter', type = int)
parser.add_argument("finalIter", help = 'final iter', type = int)
args = parser.parse_args()
def find_acc(content):
content = content.rstrip()
content = content.split()
flag = 0
acc = 0
for word in content:
if flag > 0:
flag += 1
if word == 'accuracy':
flag = 1
if flag == 5 :
acc = float(word)
return acc
X = []
Y_train = []
Y_valid = []
for itr in range(args.firstIter,args.finalIter):
logFileTrain = os.path.join(args.logDir,'compute_prob_train' + '.' +
str(itr) + '.log' )
logFileValid = os.path.join(args.logDir,'compute_prob_valid' + '.' +
str(itr) + '.log' )
with open(logFileTrain,'r') as fp:
content = fp.read()
acc_train = find_acc(content)
with open(logFileValid,'r') as fp:
content = fp.read()
acc_valid = find_acc(content)
X.append(itr)
Y_train.append(acc_train)
Y_valid.append(acc_valid)
plt.plot(X,Y_train,label = 'train',color='b')
plt.plot(X,Y_valid,label = 'valid',color='r')
plt.legend()
plt.show()
| UTF-8 | Python | false | false | 1,372 | py | 136 | check_progress.py | 132 | 0.586006 | 0.580904 | 0 | 45 | 29.488889 | 73 |
leilalu/algorithm | 8,950,711,853,666 | e2dac9c3d8f4a747ad075ae27d8e3423efef5a05 | 4cdc9ba739f90f6ac4bcd6f916ba194ada77d68c | /链表/19.删除链表的倒数第N个结点.py | 3c29856a378921dd9e043eb686ab2b5432ad34ac | []
| no_license | https://github.com/leilalu/algorithm | bee68690daf836cc5807c3112c2c9e6f63bc0a76 | 746d77e9bfbcb3877fefae9a915004b3bfbcc612 | refs/heads/master | 2020-09-30T15:56:28.224945 | 2020-05-30T03:28:39 | 2020-05-30T03:28:39 | 227,313,730 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
给定一个链表,删除链表的倒数第 n 个节点,并且返回链表的头结点。
示例:
给定一个链表: 1->2->3->4->5, 和 n = 2.
当删除了倒数第二个节点后,链表变为 1->2->3->5.
说明:
给定的 n 保证是有效的。
进阶:
你能尝试使用一趟扫描实现吗?
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
if not head or n <= 0:
return None
pre = ListNode(0)
pre.next = head
preNode = pre
slow = fast = head
for i in range(n):
if fast:
fast = fast.next
else:
break
while fast:
slow = slow.next
preNode = preNode.next
fast = fast.next
preNode.next = slow.next
return pre.next
| UTF-8 | Python | false | false | 929 | py | 357 | 19.删除链表的倒数第N个结点.py | 356 | 0.488032 | 0.472074 | 0 | 47 | 14.957447 | 40 |
BeBruceThomas/KilaueaKoaeProject | 1,537,598,301,188 | 95bf803d5d606d8f9ce595a69b75fc0548298a0d | 2f8ca0ed0a32b72b9299087e8add11acb77c3555 | /scripts_okada/setup.py | b821ffd9d7212160c636baedb62a33b2adbcec7a | [
"MIT"
]
| permissive | https://github.com/BeBruceThomas/KilaueaKoaeProject | 39dd3753495b4eb70df953342cc4381b176ce385 | a740ba1a981c7beab631cf694b86701b0fc44ee2 | refs/heads/master | 2021-01-22T19:14:57.898037 | 2017-08-22T01:34:40 | 2017-08-22T01:34:40 | 100,773,271 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from numpy.distutils.core import setup, Extension
# -g compiles with debugging information.
# -O0 means compile with no optimization, try -O3 for blazing speed
compile_args = ['-O3']
ext = []
ext.append(Extension('DC3D',
sources = ['okada_wrapper/DC3D.f',
'okada_wrapper/DC3D.pyf'],
extra_compile_args=compile_args))
setup(
name = "okada_wrapper",
packages = ['okada_wrapper'],
version = '0.1.0',
description = 'Python and MATLAB wrappers for the Okada Green\'s function codes',
author = 'Ben Thompson',
author_email = 't.ben.thompson@gmail.com',
url = 'https://github.com/tbenthompson/okada_wrapper',
keywords = ['okada', 'elastic', 'halfspace'],
classifiers = [],
ext_modules=ext
)
| UTF-8 | Python | false | false | 781 | py | 38 | setup.py | 21 | 0.629962 | 0.618438 | 0 | 23 | 32.956522 | 84 |
TSL-UOB/CAV-Gym | 9,912,784,556,962 | 99e19935bfdf647fb1fd102dae24aa0fa57c633c | 564cff5c2f3677f4c9913968a40860accc2f0378 | /examples/targets.py | c35cb46bd68e7ab66e742d867bdd24cb748721dc | []
| no_license | https://github.com/TSL-UOB/CAV-Gym | 63aac9bdd2239646811fadbfae881bafc91689fb | 5f841160806464620cfcfc1f5e6f1006090ac402 | refs/heads/master | 2021-05-20T08:23:22.818372 | 2021-05-11T12:22:31 | 2021-05-11T12:22:31 | 252,192,085 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
from enum import Enum
class TargetVelocity(Enum):
MIN = 0
MID = 1
MAX = 2
class TargetOrientation(Enum):
NORTH = math.pi * 0.5
NORTH_EAST = math.pi * 0.25
EAST = 0.0
SOUTH_EAST = -(math.pi * 0.25)
SOUTH = -(math.pi * 0.5)
SOUTH_WEST = -(math.pi * 0.75)
WEST = math.pi
NORTH_WEST = math.pi * 0.75
| UTF-8 | Python | false | false | 356 | py | 29 | targets.py | 26 | 0.570225 | 0.511236 | 0 | 19 | 17.736842 | 34 |
schevalier/chronology | 9,895,604,657,076 | 35f3df96bd1f160a230eace55f648ec53118bc1b | 786b1ae976e36f123975613c950ca26d300cf35f | /common/tests/test_json_schema.py | 0cb7d30d037f4e33b89244b3ab67b1c3b860af9f | [
"MIT"
]
| permissive | https://github.com/schevalier/chronology | 0d841d7e9a358f26b24cb0b300480ab0be994f32 | 237c8c1df4bce4636f3bda48c4fd73c979131e1e | refs/heads/master | 2023-05-25T06:28:37.363358 | 2014-12-09T13:56:48 | 2014-12-09T13:56:48 | 27,831,968 | 0 | 0 | MIT | true | 2023-05-23T06:54:54 | 2014-12-10T17:50:57 | 2014-12-10T17:50:58 | 2023-05-23T06:54:54 | 4,053 | 0 | 0 | 5 | Python | false | false | import json
import unittest
from src.json_schema import AnyType
from src.json_schema import ArrayType
from src.json_schema import BooleanType
from src.json_schema import IntegerType
from src.json_schema import NullType
from src.json_schema import NumberType
from src.json_schema import ObjectType
from src.json_schema import StringType
from src.json_schema import get_schema_type
class SchemaTest(unittest.TestCase):
def test_get_schema_type(self):
# Basic parsing.
self.assertEqual(type(get_schema_type(False)), BooleanType)
self.assertEqual(type(get_schema_type(1)), IntegerType)
self.assertEqual(type(get_schema_type(1.6)), NumberType)
self.assertEqual(type(get_schema_type(None)), NullType)
self.assertEqual(type(get_schema_type('lolcat')), StringType)
self.assertEqual(type(get_schema_type([1])), ArrayType)
self.assertEqual(type(get_schema_type({'a': 'b'})), ObjectType)
# Array parsing.
schema = get_schema_type([1, 2, 3])
self.assertEqual(type(schema), ArrayType)
self.assertEqual(type(schema.items), IntegerType)
schema = get_schema_type([1, False, 'hello'])
self.assertEqual(type(schema), ArrayType)
self.assertEqual(type(schema.items), AnyType)
# Object parsing.
schema = get_schema_type({'lol': 'cat', 'int': 10, 'bool': True,
'nested': {'hello': 'world'}})
self.assertEqual(type(schema), ObjectType)
self.assertEqual(len(schema.properties), 4)
self.assertEqual(set(schema.properties), {'lol', 'int', 'bool', 'nested'})
self.assertEqual(len(schema.required), 4)
self.assertEqual(set(schema.required), {'lol', 'int', 'bool', 'nested'})
self.assertEqual(type(schema.properties['lol']), StringType)
self.assertEqual(type(schema.properties['int']), IntegerType)
self.assertEqual(type(schema.properties['bool']), BooleanType)
nested = schema.properties['nested']
self.assertEqual(type(nested), ObjectType)
self.assertEqual(len(nested.properties), 1)
self.assertEqual(set(nested.properties), {'hello'})
self.assertEqual(len(nested.required), 1)
self.assertEqual(set(nested.required), {'hello'})
self.assertEqual(type(nested.properties['hello']), StringType)
def test_combining(self):
a = {
'bool': True,
'string': 'hello',
'int': 1,
'number': 2,
'null': None,
'any': False,
'array_int': [1, 2],
'array_any': [False],
'object': {
'lol': 'cat'
},
'not_required1': 1
}
b = {
'bool': False,
'string': 'world',
'int': 23,
'number': 2.56,
'null': None,
'any': 'yo',
'array_int': [3, 4],
'array_any': ['any'],
'object': {
'lol': 'cat',
'not_required2': 'blah'
},
'null_not_required': None
}
merged = get_schema_type(a).combine(get_schema_type(b))
expected = {
'properties': {
'any': {
'type': 'any'
},
'array_any': {
'items': {
'type': 'any'
},
'type': 'array'
},
'array_int': {
'items': {
'type': 'integer'
},
'type': 'array'
},
'bool': {
'type': 'boolean'
},
'int': {
'type': 'integer'
},
'not_required1': {
'type': 'integer'
},
'null': {
'type': 'null'
},
'number': {
'type': 'number'
},
'object': {
'properties': {
'lol': {
'type': 'string'
},
'not_required2': {
'type': 'string'
}
},
'required': ['lol'],
'type': 'object'
},
'string': {
'type': 'string'
},
'null_not_required': {
'type': 'null'
},
},
'required': sorted(['bool', 'string', 'int', 'number', 'null', 'any',
'array_int', 'array_any', 'object']),
'type': 'object'
}
self.assertEqual(json.dumps(merged.to_dict(), sort_keys=True),
json.dumps(expected, sort_keys=True))
| UTF-8 | Python | false | false | 4,216 | py | 184 | test_json_schema.py | 120 | 0.541983 | 0.534867 | 0 | 140 | 29.114286 | 78 |
MIlenaMontoya/holbertonschool-higher_level_programming | 5,660,766,915,083 | 32bda4ccc21c9f8f2a4937b28ba07a536209c104 | 1261bc255ed3df9ed760e1fa70ad58dbc4e52c30 | /0x04-python-more_data_structures/0-square_matrix_simple.py | 8f65e04f7bb14f17645546dacda8fa257d73defe | []
| no_license | https://github.com/MIlenaMontoya/holbertonschool-higher_level_programming | 77ece3156d9c0490c69090665b79e1c16def02d1 | 9b6942b509bd32cd8f3570d23277404631096e7d | refs/heads/master | 2023-03-07T03:08:27.689032 | 2021-02-10T04:40:48 | 2021-02-10T04:40:48 | 291,843,392 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
def square_matrix_simple(matrix=[]):
nmtx = []
for i in matrix:
nmtx.append(list(map(lambda a: a ** 2, i)))
return nmtx
| UTF-8 | Python | false | false | 159 | py | 45 | 0-square_matrix_simple.py | 39 | 0.584906 | 0.572327 | 0 | 6 | 25.5 | 51 |
jorgeaugusto01/PortfolioManager | 386,547,091,287 | 893f9b7ac56bfcc267caa70f689f099f0f300d97 | 5d7a5da289c3796a55cdce7a233b5df1a64e1bdf | /Entidades/TipoArquivo.py | 0d556f35f11a5abab3b5cf7aed9b4bb75106f33f | []
| no_license | https://github.com/jorgeaugusto01/PortfolioManager | e8222e9c20147f233178fd5a3e3a99885578d15a | e89d6d7ce54be3e79c9fc389586cebf33721e952 | refs/heads/master | 2018-10-10T04:29:53.190631 | 2018-10-09T23:39:01 | 2018-10-09T23:39:01 | 131,020,188 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import psycopg2
from PostgreSql import PostgreSql
class TipoArquivo:
def __init__(self, dbCon):
self._cod_tipo_arquivo = None
self._desc_tipo_arquivo = None
self._dbcon = dbCon
def SelecionarTipoArquivo(self, codTipoArquivo):
select_tipo_arquivo = ("SELECT * FROM TB_TIPO_ARQUIVO WHERE _cod_tipo_arquivo = (%s)")
select_data_tipo_arquivo = (str(codTipoArquivo),)
result = self._dbcon.ExecuteSqlSelect(select_tipo_arquivo, select_data_tipo_arquivo)
if (result is not None):
tipo_arquivo = TipoArquivo()
tipo_arquivo._cod_tipo_arquivo = result[0][1]
tipo_arquivo._desc_tipo_arquivo = result[0][2]
return tipo_arquivo
else:
None
def InserirTipoArquivo(self, codTipoArquivo, descTipoArquivo):
tipo_arquivo = self.SelecionarTipoArquivo(codTipoArquivo)
if (tipo_arquivo is None):
add_tipo_arquivo = ("INSERT INTO TB_TIPO_ARQUIVO "
"(cod_tipo_arquivo, desc_tipo_arquivo)"
"VALUES (%s, %s)")
data_tipo_arquivo = (codTipoArquivo, descTipoArquivo)
if(self._dbcon.ExecuteSqlInsertUpd(add_tipo_arquivo, (data_tipo_arquivo)) == True):
tipo_arquivo = TipoArquivo(None)
tipo_arquivo._cod_tipo_arquivo = codTipoArquivo
tipo_arquivo._desc_tipo_arquivo = descTipoArquivo
return tipo_arquivo
else:
return None
| UTF-8 | Python | false | false | 1,538 | py | 31 | TipoArquivo.py | 28 | 0.595579 | 0.592328 | 0 | 40 | 37.45 | 95 |
peterus/python-telegram | 1,056,561,988,263 | 935c8b48e84b358a506ad3ee09e769ab5a60e9c7 | b0828c61038f1a5b45f191eece16495654a69b9e | /telegram/__init__.py | eb11289fc5efa826776480ada61ca8e15b6df52b | [
"MIT"
]
| permissive | https://github.com/peterus/python-telegram | 480f95e6d5d127093d6be7559e6efdecb0a33508 | e551cde71d13d460a1b348168df91f682ffe48eb | refs/heads/master | 2020-05-01T06:38:18.991705 | 2019-03-23T20:09:17 | 2019-03-23T20:09:17 | 177,335,064 | 0 | 0 | MIT | true | 2019-03-23T20:07:14 | 2019-03-23T20:07:13 | 2019-03-18T07:36:28 | 2019-03-19T03:17:58 | 17,774 | 0 | 0 | 0 | null | false | null | __version__ = '0.8.0'
VERSION = __version__
| UTF-8 | Python | false | false | 45 | py | 2 | __init__.py | 1 | 0.533333 | 0.466667 | 0 | 3 | 14 | 21 |
karthikpappu/pyc_source | 1,030,792,168,630 | 945612f336bc0902c88b866a794e2795c73f840e | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pypi_install_script/airport-py-0.1.0.tar/setup.py | af0e82d411ef28aa6c8b1815750e2fcbd878156d | []
| no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | 2023-02-04T11:27:19.098827 | 2020-12-27T04:51:17 | 2020-12-27T04:51:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import sys
from setuptools import setup, find_packages
if sys.platform != 'darwin':
sys.exit('airport-py depends on a specific mac os tool called "airport"')
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='airport-py',
version='0.1.0',
description='Mac OS X airport command result parser',
long_description=readme,
author='Egemen Yildiz',
author_email='egemenyildiz.e@gmail.com',
url='https://github.com/egemenyildiz/airport-py',
license=license,
packages=find_packages(),
python_requires="<=2.7.15",
classifiers=[
'Programming Language :: Python :: 2 :: Only',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
],
)
| UTF-8 | Python | false | false | 819 | py | 114,545 | setup.py | 111,506 | 0.632479 | 0.62149 | 0 | 30 | 26.3 | 77 |
QS-L-1992/DI-drive | 4,827,543,277,619 | bab1a23c50ab4d516c1018925cbe38d2533a9a83 | df13b7f3188e1cb0a9c2cb0a1d16d8a999416f15 | /core/utils/simulator_utils/md_utils/discrete_policy.py | c598c48d3288ddbc0142f012c8d1ff24139597c3 | [
"Apache-2.0"
]
| permissive | https://github.com/QS-L-1992/DI-drive | 5d2d4c387cbed933cc4e89459c262945ce811335 | 8246b09d631826a1db85154ac854808b15f8a5b1 | refs/heads/main | 2023-08-16T11:13:16.261898 | 2022-10-10T06:41:22 | 2022-10-10T06:41:22 | 395,540,647 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
import numpy as np
from metadrive.component.vehicle_module.PID_controller import PIDController
from metadrive.policy.base_policy import BasePolicy
from metadrive.policy.manual_control_policy import ManualControlPolicy
from metadrive.utils.math_utils import not_zero, wrap_to_pi, point_distance
from metadrive.utils.scene_utils import is_same_lane_index, is_following_lane_index
from metadrive.engine.core.manual_controller import KeyboardController, SteeringWheelController
from metadrive.utils import clip
from metadrive.examples import expert
from metadrive.policy.env_input_policy import EnvInputPolicy
from direct.controls.InputState import InputState
from metadrive.engine.engine_utils import get_global_config
import gym
from gym import Wrapper
from gym import spaces
# from metadrive.envs.base_env import BaseEnv
# from metadrive.envs.metadrive_env import MetaDriveEnv
from typing import Callable
class ActionType(object):
#def __init__(self, env: 'MetaDriveEnv', **kwargs) -> None:
def __init__(self, env, **kwargs) -> None:
self.env = env
self.__controlled_vehicle = None
def space(self) -> spaces.Space:
raise NotImplementedError
@property
def vehicle_class(self) -> Callable:
raise NotImplementedError
def act(self, action) -> None:
raise NotImplementedError
@property
def controlled_vehicle(self):
return self.__controlled_vehicle or self.env.vehicle
@controlled_vehicle.setter
def controlled_vehicle(self, vehicle):
self.__controlled_vehicle = vehicle
class DiscreteMetaAction(ActionType):
ACTIONS_ALL = {0: 'LANE_LEFT', 1: 'IDLE', 2: 'LANE_RIGHT', 3: 'FASTER', 4: 'SLOWER', 5: 'Holdon'}
def __init__(self, **kwargs):
self.actions = self.ACTIONS_ALL
self.actions_indexes = {v: k for k, v in self.actions.items()}
def space(self) -> spaces.Space:
return spaces.Discrete(5)
#return spaces.Discrete(len(self.actions))
# @property
# def vehicle_class(self) -> Callable:
# return MDPVehicle
def act(self, action: int) -> None:
self.controlled_vehicle.act(self.actions[action])
| UTF-8 | Python | false | false | 2,185 | py | 231 | discrete_policy.py | 149 | 0.719908 | 0.716705 | 0 | 67 | 31.61194 | 101 |
chrispydych/Projects | 11,355,893,551,115 | eb10c6abf70790fede19ac90a58ed2d1549b26de | 668903cd9436eff8dbf1333691f0577a34eb7bad | /Python/NumberClassifier/Main.py | c4cb4ae1f3394273ad0577bce92d568989e90e91 | []
| no_license | https://github.com/chrispydych/Projects | 8337ec7fd7d272143f9d7a66eb285850c783c079 | ab24c5ebde3d4a161afe419a2b9638a75e4f7b97 | refs/heads/master | 2022-12-23T07:57:39.522160 | 2022-07-12T02:32:39 | 2022-07-12T02:32:39 | 232,688,967 | 1 | 0 | null | false | 2022-12-10T14:18:22 | 2020-01-09T00:41:50 | 2021-10-05T21:23:18 | 2022-12-10T14:18:19 | 465,049 | 1 | 0 | 3 | Jupyter Notebook | false | false | import cv2
import numpy
import Model
import PreProcessor
import Analyzer
import DataLoader
import tensorflow as tf
| UTF-8 | Python | false | false | 116 | py | 383 | Main.py | 65 | 0.853448 | 0.844828 | 0 | 7 | 15.428571 | 23 |
arjuntsaji/Django-crud-operations-miniproject | 18,614,388,284,475 | bb217615e055aa4f891392717c496510f5718dfe | c249f6a46ae5f20669b8db87bf0dc5bae49e6770 | /Brandless/accounts/models.py | 0f087471b6971b0c1baa1c419f729c62ffafa339 | []
| no_license | https://github.com/arjuntsaji/Django-crud-operations-miniproject | 390715fb0f5faed9e26f4cfb9dbf8916e2de4d0d | c1a193e5a80cfb3ac64e7c3ebdd9ae4bd43be833 | refs/heads/master | 2023-05-14T20:29:31.609820 | 2021-06-06T04:42:46 | 2021-06-06T04:42:46 | 348,951,368 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
# from django.core.validators import MinValueValidator,MaxValueValidator
# Create your models here.
class Position(models.Model):
title=models.CharField(max_length=30)
def __str__(self):
return self.title
class Department(models.Model):
department=models.CharField(max_length=30)
def __str__(self):
return self.department
class Typeofwork(models.Model):
type=models.CharField(max_length=20)
def __str__(self):
return self.type
class Employees(models.Model):
first_name=models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
emp_id = models.CharField(max_length=30)
department = models.ForeignKey(Department,on_delete=models.SET_NULL,null=True)
email= models.EmailField(max_length=30)
number = models.BigIntegerField()
position = models.ForeignKey(Position,on_delete=models.SET_NULL,null=True)
typeofwork = models.ForeignKey(Typeofwork,on_delete=models.SET_NULL,null=True)
resume_file=models.FileField(null=True,upload_to='resume',blank=True)
def __str__(self):
return self.first_name
| UTF-8 | Python | false | false | 1,144 | py | 36 | models.py | 23 | 0.716783 | 0.704545 | 0 | 40 | 27.575 | 82 |
DamonZCR/PythonStu | 11,081,015,641,023 | f2afcd777e0d0e22edc673f54e4485e391903833 | caa72788fdae6b05c5ce4c132b45fc00d55bb607 | /47Tkinter/Canvas画布/18-2Canvas画布调整.py | 72a9ff5bfb98d61109c1f78dde004bd9f4cc162d | []
| no_license | https://github.com/DamonZCR/PythonStu | dcc2ba49195f5859fd63227fe0f8f78b36ed46df | 88fec97e3bccff47ba1c5f521f53a69af6ca2b2d | refs/heads/master | 2023-07-05T06:29:53.300920 | 2021-08-13T12:22:30 | 2021-08-13T12:22:30 | 302,256,563 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tkinter import *
'''画布中,样式的修改共有三个方法:coords(),itemconfig(),move()或者delete()'''
root = Tk()
w = Canvas(root, width=200, heigh=100)
w.pack()
# 前两个参数代表起点,后两个代表终点,dash代表线的样式,虚线
line1 = w.create_line(0, 50, 200, 50, fill='black')
line2 = w.create_line(100, 0, 100, 100, fill='red', dash=(4, 4))
rect1 = w.create_rectangle(50, 25, 150, 75, fill='orange')
# 实现样式的移动,
w.coords(line1, 0, 25, 200, 20)
# 用于设置样式的各个选项
w.itemconfig(rect1, fill='blue')
w.delete(line2)
# 这里的ALL是一个tag,代表画布上的所有对象。
Button(root, text='清除所有', command=(lambda x=ALL:w.delete(x))).pack()
mainloop() | UTF-8 | Python | false | false | 736 | py | 237 | 18-2Canvas画布调整.py | 230 | 0.685512 | 0.59894 | 0 | 18 | 30.5 | 68 |
ArkCase/arkcase-ce | 11,149,735,108,033 | dab9b94f3f40184aed686bfb6bae32c846e7be66 | d0c835f43df436429177059e290160546bac3c1a | /vagrant/provisioning/roles/arkcase-app/files/fixGroups.py | 3c66b85ff3f5bc653770e0a4533f901770068f94 | []
| no_license | https://github.com/ArkCase/arkcase-ce | 1d92412c680399431880aaa32908b887cc17ce68 | f0d7867bb87a8fa16b637f3407aaf8b74b0cd57c | refs/heads/develop | 2023-08-24T23:09:58.507519 | 2023-08-17T12:33:10 | 2023-08-17T12:33:10 | 171,937,101 | 19 | 15 | null | false | 2023-09-13T14:56:53 | 2019-02-21T20:04:13 | 2023-06-13T13:03:26 | 2023-09-13T14:56:53 | 73,385 | 14 | 14 | 26 | JavaScript | false | false | #!/usr/bin/env python
import sys
import openpyxl
if len(sys.argv) < 4:
print ("Must provide Excel file name, and at least one old_string,new_string pair")
sys.exit(1)
excel_file = sys.argv[1] + ".xlsx"
new_excel_file = sys.argv[1] + "-updated.xlsx"
wb = openpyxl.load_workbook(excel_file)
sheet = wb.get_sheet_by_name('Sheet1')
updated = False
for r in range(1, sheet.max_row + 1):
for c in range(1, sheet.max_column + 1):
rowcol_index = openpyxl.utils.cell.get_column_letter(c) + str(r)
v = sheet[rowcol_index].value
for p in range(2, len(sys.argv), 2):
old_string = sys.argv[p]
new_string = sys.argv[p + 1]
if old_string != new_string and hasattr(v, 'strip') and v.strip() == old_string:
print ("replacing " + v)
sheet[rowcol_index] = new_string
updated = True
if updated:
wb.save(filename = new_excel_file)
wb.close()
| UTF-8 | Python | false | false | 956 | py | 217 | fixGroups.py | 13 | 0.59728 | 0.584728 | 0 | 32 | 28.875 | 92 |
chaneylc/SvgLayerParser | 6,279,242,229,169 | 403da10b279d40ce90ba64062fc6d4a8f7084327 | ed9fc9b61083ac32cef239336699f92c5e4796f5 | /SvgParser.py | b8eb5bf70949f55d38bab996e8198667c95b3009 | []
| no_license | https://github.com/chaneylc/SvgLayerParser | 728a9fb54f5923b3e3da2952d9cd78bfc6b51553 | b00a6e4343e065bd230f36663f5a14e498e251e4 | refs/heads/master | 2021-01-23T05:30:14.721468 | 2017-06-08T21:17:56 | 2017-06-08T21:17:56 | 92,974,029 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from subprocess import Popen, PIPE
from lxml import etree
from svglib.svglib import svg2rlg
from reportlab.graphics import renderPM
import os
SLIC3R_HOME=None
if "SLIC3R_HOME" in os.environ:
SLIC3R_HOME = os.environ["SLIC3R_HOME"]
else :
SLIC3R_HOME = os.getcwd()
SLIC3R_SCRIPT = os.path.join(SLIC3R_HOME, "slic3r.pl")
print("Slic3r home: {}".format(SLIC3R_HOME))
print("Slic3r script: {}".format(SLIC3R_SCRIPT))
#function takes .svg filename as input and number of layers to be built
#used slic3r.pl --export-svg as reference .svg
def build(filename, numLayers):
#namespaces
z = '{http://slic3r.org/namespaces/slic3r}z' #namespace for z layer in attributes
g = '{http://www.w3.org/2000/svg}g' #namespace for g tag in svg element
svg = '{http://www.w3.org/2000/svg}svg' #namespace for svg tag in root
#need this header line for correctly formatted svg file
header = '''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN"
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">'''.replace('\n', '')
layers = [] #list of elements (g) to create svg from
tree = etree.parse(filename)
for elem in tree.getiterator():
if not elem.tag is etree.Comment:
if svg in elem.tag: #found svg header
header += etree.tostring(elem).split("<g")[0]
if g in elem.tag: #found layer
layer = elem.attrib.get('id')
if int(layer.split("layer")[1]) <= numLayers:
layers.append(etree.tostring(elem).replace('\n', ''))
else :
break
return header + "".join(layers) + "</svg>"
def createIdeal(layer, filePrefix):
#valid svg file string
rawString = build("{}.svg".format(filePrefix), layer)
#create temp .svg and write rawstring to file
f = open("tempSvg.svg", 'w')
f.write(rawString)
f.close()
#use svg2rlg lib to create .png
drawing = svg2rlg("tempSvg.svg")
#ideal files will be named e.g bikiniIdeal_1.png for layer 1
renderPM.drawToFile(drawing, "{}Ideal_{}.png".format(filePrefix, layer))
#implement camera object capture
def captureActual(layer):
#filename should be "{}Actual_{}.png" s.a: bikiniActual_1.png for layer 1
pass
#utilize printrun library to call this list of commands
def printCode(code):
commandString = "".join(code)
#use printrun lib to call must be a blocking command
#our image analysis algorithm goes here
def analyzeActualAndIdeal(layer, filePrefix):
ideal = "{}Ideal_{}.png".format(filePrefix, layer)
actual = "{}Actual_{}.png".format(filePrefix, layer)
if os.path.isfile(ideal) and os.path.isfile(actual):
try:
idealImg = open(ideal, 'r')
actualimg = open(actual, 'r')
#image analysis code
idealImg.close()
actualImg.close()
except Exception as e:
print e
#the main function to run the experiment
def run(filePrefix):
#initialize filenames and check if they exist before continuing
gcodeFilename = filePrefix + ".gcode"
svgFilename = filePrefix + ".svg"
if os.path.isfile(gcodeFilename) and os.path.isfile(svgFilename):
try :
#load .gcode string into a list
gcode = open(gcodeFilename).readlines()
#layerCode holds the commands for a single layer
layerCode = []
#foreach line add the code to our list
#check if it calls the 'next layer' identifier
#if it does, print the layer and capture actual image
#create png to compare with actual
for line, code in enumerate(gcode):
layerCode.append(code)
if "; move to next layer" in code:
#print layerCode
layer = int(code.split('(')[1].split(')')[0])
#this call blocks until print layer is complete
printCode(layerCode)
#skip cam check for code before first layer
if not layer == 0: #and len(layerCode) >= 5 ?
#print "attempting capture"
captureActual(layer)
#print "attempting create ideal"
createIdeal(layer, filePrefix)
#print "attempting analyze"
analyzeActualAndIdeal(layer, filePrefix)
#reset layer code
layerCode = []
except Exception as e:
print gcodeFilename, svgFilename
print e
#the function to create .gcode and .svg
#user gives a 3d object file and kwargs for passing directly to
#slic3r when executing gcode --not-implemented yet--
def init(fileStl=None, isRun=True, **kwargs):
if fileStl:
cwd = os.getcwd()
absFileStl = os.path.join(cwd, fileStl)
#grab the file prefix to write .gcode and .svg file
filePrefix = os.path.join(cwd, fileStl.split('.')[0])
try :
#call slic3r to create gcode
p = Popen('perl {} --gcode-comments -o {}.gcode {}'\
.format(SLIC3R_SCRIPT, filePrefix, absFileStl))
p.wait()
p.terminate()
p.kill()
#call slic3r to create svg
p = Popen('perl {} --export-svg -o {}.svg {}'\
.format(SLIC3R_SCRIPT, filePrefix, absFileStl))
p.wait()
p.terminate()
p.kill()
if isRun:
run(filePrefix)
except Exception as e:
print e
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
print "Running in script mode: {}".format(sys.argv[0])
print "Executing on 3d object: {}".format(sys.argv[1])
inputFilename = sys.argv[1]
if os.path.isfile(inputFilename):
init(inputFilename)
else: #check if they gave relative path
absFilename = os.path.join(os.getcwd(), inputFilename)
if os.path.isfile(absFilename):
init(absFilename)
| UTF-8 | Python | false | false | 6,427 | py | 2 | SvgParser.py | 1 | 0.567761 | 0.556714 | 0 | 174 | 34.936782 | 96 |
Federico-PizarroBejarano/Don-Mills-Online-Judge | 12,945,031,455,508 | dfbfa87445e0f4e875c84f7220712138587a2050 | cacdbf688209cce2f39698758346b99de7d5281d | /Who is in the Middle.py | 79aa9c51f3ccb9ed5e015358a349b4a413e4364a | []
| no_license | https://github.com/Federico-PizarroBejarano/Don-Mills-Online-Judge | 27d168e390cdf7be104117d6a699fd7df4104b63 | 6e77978a19d29ec3095687b71dc8eff3565f6a60 | refs/heads/master | 2021-05-11T09:14:24.849165 | 2018-01-19T03:42:06 | 2018-01-19T03:42:06 | 118,072,968 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | a = []
for i in range(3):
b = input()
a.append(b)
a.sort()
print a[1]
| UTF-8 | Python | false | false | 85 | py | 88 | Who is in the Middle.py | 87 | 0.458824 | 0.435294 | 0 | 6 | 12 | 18 |
DavidBitner/Aprendizado-Python | 9,139,690,418,144 | 48b9dba3b2a2e5164631f8b9b23b435860246b6b | 7a20dac7b15879b9453150b1a1026e8760bcd817 | /Curso/ExMundo1/Ex027Strings6PrimeiroUltimoNome.py | c302d55d9ae5de23380f95b1acdf732f5ffd18e5 | [
"MIT"
]
| permissive | https://github.com/DavidBitner/Aprendizado-Python | 7afbe94c48c210ddf1ab6ae21109a8475e11bdbc | e1dcf18f9473c697fc2302f34a2d3e025ca6c969 | refs/heads/master | 2023-01-02T13:24:38.987257 | 2020-10-26T19:31:22 | 2020-10-26T19:31:22 | 283,448,224 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | nome = str(input('Digite um nome completo: ')).strip()
separado = nome.split()
pnome = separado[0]
unome = separado[-1]
print('O nome digitado foi: {}'.format(nome))
print('O primeiro nome é: {}'.format(pnome))
print('O ultimo nome é: {}'.format(unome))
| UTF-8 | Python | false | false | 256 | py | 289 | Ex027Strings6PrimeiroUltimoNome.py | 283 | 0.673228 | 0.665354 | 0 | 7 | 35.285714 | 54 |
yajiayang/previous_projects | 14,903,536,540,332 | 4559272fd1bb57efc6c8e61fd6cbff27003dfb5a | 8a008f7e39125ec8b982fa0d5b6ef1f233f2ce29 | /topic_recom.py | 5bd51045cb285a784c7fab1f1b192d5acb2fa962 | []
| no_license | https://github.com/yajiayang/previous_projects | 1b87e5a0916a8a8cd5d6282c83a5166a936f89e2 | 339cdb18bae0903d6a755293e38e4bb2c30d0f4d | refs/heads/master | 2021-01-22T19:14:05.347218 | 2015-08-19T23:48:20 | 2015-08-19T23:48:20 | 41,065,246 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import collections as co
from top_search_scraper import Belieber
import re
def coffee_rel(path,term):
with open(path) as handle:
for line in handle:
item = line.replace('\n','').split('\t')
a = item[1]+item[2]+item[3]+item[4]
match = re.findall(term,a)
if match != []:
yield item[0],len(match)
term = raw_input("Enter Term or enter q to quit: ")
path = '/Users/royyang/Desktop/coffee/dump.txt'
path3 = '/Users/royyang/Desktop/coffee/cof_rel_fullcontent.txt'
a=coffee_rel(path,term)
count=0
with open(path3,'w') as new:
for item in a:
count+=1
new.write(item[0]+'\t'+str(item[1])+'\n')
r = Belieber()
r.ask([term])
result = []
with open('/Users/royyang/Desktop/Top_search_term/top_search_'+term+'.txt') as new:
for line in new:
item = line.replace('\n','').split('\t')
result.append(item[1])
m = co.Counter(result)
# suggested item to work on
print term, count, m
| UTF-8 | Python | false | false | 1,007 | py | 46 | topic_recom.py | 45 | 0.590864 | 0.578947 | 0 | 35 | 27.571429 | 83 |
kejek/legislature | 14,061,722,935,492 | 054832a56acc220659ddd17047292b1275b9eb2a | a2722a407ff5e5c4ac488b162c9a8911ba1a8cfc | /checkout/admin.py | 0eb388f89fcc9dc6aab209e8061456edc98db04d | []
| no_license | https://github.com/kejek/legislature | 221b65cff8eefa260ab61a1bfbeca27396bbe724 | a78fc5aaf3b6fa641143693b60030e34370b777a | refs/heads/master | 2021-01-02T09:10:02.239242 | 2017-08-02T20:04:35 | 2017-08-02T20:04:35 | 99,149,699 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import Employee
from .models import Group
from .models import Section
from .models import Division
from .models import Status
class EmployeeAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'image', 'phone', 'status_name',
'notes', 'returning', 'site_phone', 'site', 'schedule',
'section_name', 'group_name', 'user']
class Meta:
model = Employee
def section_name(self, obj):
return obj.section.name
section_name.admin_order_field = 'name'
def group_name(self, obj):
return obj.group.name
group_name.admin_order_field = 'name'
def status_name(selfself, obj):
return obj.status.desc
status_name.admin_order_field = 'desc'
class GroupAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'description']
class SectionAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'description', 'division_name']
def division_name(self, obj):
return obj.division.name
division_name.admin_order_field = 'name'
class DivisionAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'description']
class StatusAdmin(admin.ModelAdmin):
list_display = ['id', 'desc', 'statusId']
admin.site.register(Employee, EmployeeAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(Section, SectionAdmin)
admin.site.register(Division, DivisionAdmin)
admin.site.register(Status, StatusAdmin)
| UTF-8 | Python | false | false | 1,482 | py | 22 | admin.py | 14 | 0.674764 | 0.674764 | 0 | 59 | 24.118644 | 75 |
sukanyavenkataraman/Bigdatasystems | 4,475,355,941,399 | 3d5ff389fa89e655efc25b205789b52aaf5627d5 | 521a64784b9a12fff042e5e2ed10303b1143f59b | /load_generator_dependencies.py | cb963581a4f98c7d21c6f729d337a7834c204495 | []
| no_license | https://github.com/sukanyavenkataraman/Bigdatasystems | a1b876d1ea7a9bb3395603f8ec442255961d4dfb | 192a09189c17b5d1bc2ec55ba50fd52000cddfac | refs/heads/master | 2021-08-28T12:32:54.261467 | 2017-12-12T08:42:20 | 2017-12-12T08:42:20 | 113,410,010 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import random
import math
from datetime import datetime
# Usage: python load_generator.py output_file_path num_jobs max_execution_time_per_partition max_partitions_per_vertex max_ready_time max_vertices_per_job max_dependencies_per_vertex
# Sample Usage: python load_generator_dependencies.py foobar.txt 5 5 5 20 5 3
def main(args):
random.seed(datetime.now())
filename = args[1]
num_jobs = int(args[2])
max_execution_time_per_partition = int(args[3])
max_partitions_per_vertex = int(args[4])
max_ready_time = int(args[5]) - 1
max_vertices_per_job = int(args[6])
max_dependencies_per_vertex = int(args[7])
file = open(filename, 'w')
vertices_per_job = {}
start_time_per_job = {}
quota_per_job = {}
curr_time = 0
for i in range(0, num_jobs):
quota_per_job[i] = 1.0 * random.randint(1, 10) / 10.0
vertices_per_job[i] = random.randint(1, max_vertices_per_job)
start_time_per_job[i] = random.randint(0, max_ready_time)
print "current time: ", curr_time
line = generate_dag(i,
curr_time,
max_partitions_per_vertex,
max_execution_time_per_partition,
quota_per_job[i],
vertices_per_job[i],
max_dependencies_per_vertex)
file.write(line)
delta = random.randint(1, math.floor(max_ready_time / num_jobs))
curr_time = curr_time + delta
file.write("\n")
file.close()
def generate_vertex(job_id, vertex_id, max_partitions_per_vertex,
max_execution_time_per_partition, quota, max_dependencies_per_vertex):
partitions = str(random.randint(1, max_partitions_per_vertex))
container_per_partition = str(1.0 * random.randint(1, 10) / 10.0)
quota = str(quota)
execution_time = str(random.randint(1, max_execution_time_per_partition))
num_dependencies = min(vertex_id,
random.randint(1, max_dependencies_per_vertex))
parent_vertices = []
while len(parent_vertices) < num_dependencies:
parent_vertex_id = random.randint(0, vertex_id - 1)
if parent_vertex_id in parent_vertices:
continue
else:
parent_vertices.append(parent_vertex_id)
if num_dependencies is 0:
parent_vertices_str = "None"
else:
parent_vertices_str = ':'.join(
map(lambda x: str(x), sorted(parent_vertices)))
return ','.join(
[chr(job_id + ord('A')), partitions, container_per_partition, quota,
execution_time, str(vertex_id), parent_vertices_str])
def generate_dag(job_id, time, max_partitions_per_vertex,
max_execution_time_per_partition, quota, vertices_in_job,
max_dependencies_per_vertex):
time_str = str(time) + '-'
counter = 0
while counter < vertices_in_job:
print "generating vertex id %s for job %s" % (job_id, counter)
time_str += generate_vertex(job_id, counter, max_partitions_per_vertex,
max_execution_time_per_partition, quota,
max_dependencies_per_vertex)
counter += 1
if counter != vertices_in_job:
time_str += ";"
return time_str
if __name__ == '__main__':
main(sys.argv)
| UTF-8 | Python | false | false | 3,183 | py | 21 | load_generator_dependencies.py | 4 | 0.633679 | 0.619855 | 0 | 99 | 31.151515 | 182 |
py2many/py2many | 15,101,105,040,255 | 6470ac64d69c1f479b7fc8c689c65b58fa7c9d1b | c012534659fbad6033dea9d2f2db905a8498c6a1 | /tests/expected/infer.py | 0d7e67112322080a8e96b7cea8fd9ca4d591daee | [
"MIT"
]
| permissive | https://github.com/py2many/py2many | dee68d496f17afdd85ef4828ab5c707de6587722 | c9947f91c7bd94cca5df41d1573dc9edd4682709 | refs/heads/main | 2023-08-16T21:22:28.844458 | 2023-02-13T04:00:27 | 2023-02-13T04:00:27 | 332,357,790 | 221 | 15 | MIT | false | 2023-08-07T02:27:45 | 2021-01-24T03:19:12 | 2023-07-31T01:24:36 | 2023-08-07T02:27:44 | 1,612 | 533 | 42 | 127 | Python | false | false | from typing import Callable, Dict, List, Set, Optional
from ctypes import c_int8 as i8, c_int16 as i16, c_int32 as i32, c_int64 as i64
from ctypes import c_uint8 as u8, c_uint16 as u16, c_uint32 as u32, c_uint64 as u64
import sys
def foo():
a: int = 10
b: int = a
assert b == 10
print(b)
if __name__ == "__main__":
foo()
| UTF-8 | Python | false | false | 345 | py | 394 | infer.py | 225 | 0.628986 | 0.536232 | 0 | 15 | 22 | 83 |
msoroush/alchemical-analysis | 9,629,316,680,528 | b7ff1478cfc4052f3efd5ff05595ed67478d521d | ea9c3169029e3c73c139ae614ef94de82a5088d9 | /alchemical_analysis/parser_desmond.py | 4a91129e794d8c26ba318bc54942136871681892 | [
"MIT"
]
| permissive | https://github.com/msoroush/alchemical-analysis | d262151c3222a51666ac6be3a018d1c24568032a | ac9f948db03639a35a7155752b26e98ef7b88fa5 | refs/heads/master | 2020-05-14T10:39:21.666691 | 2019-11-26T19:53:38 | 2019-11-26T19:53:38 | 181,764,643 | 1 | 1 | MIT | true | 2019-04-16T20:46:05 | 2019-04-16T20:46:04 | 2019-04-11T06:23:41 | 2018-06-28T15:55:06 | 11,771 | 0 | 0 | 0 | null | false | false | ###==========================###
# Desmond parser module
# Module adapted from Gromacs parser
# Adapted by Nathan M. Lim
###==========================###
import numpy
import os # for os interface
import re # for regular expressions
from glob import glob # for pathname matching
from collections import Counter # for counting elements in an array
import unixlike # some implemented unixlike commands
#===================================================================================================
# FUNCTIONS: This is the Desmond gibbs.N.dE file parser.
#===================================================================================================
def readDataDesmond(P):
class F:
def __init__(self, filename):
self.filename = filename
def sortedHelper(self):
meat = os.path.basename(self.filename).replace(P.prefix, '').replace(P.suffix, '')
l = [i for i in re.split('\.|-|_', meat) if i]
try:
self.state = l[0] = int(l[0]) # Will be of use for selective MBAR analysis.
except:
print("\nERROR!\nFile's prefix should be followed by a numerical character. Cannot sort the files.\n")
raise
return tuple(l)
def get_snapsize(self):
self.skip_lines = 0
self.lv_names = ()
snap_size = [] # Time from first two snapshots to determine snapshot's size.
self.lv = [] # Lambda vectors, e.g. (0, 0), (0.2, 0), (0.5, 0).
with open(self.filename,'r') as infile:
for line in infile:
snap_size.append(float(line.split()[0]))
if len(snap_size) > 1:
self.snap_size = numpy.diff(snap_size)[0]
P.snap_size.append(self.snap_size)
break
def iter_loadtxt(self, state):
def iter_func():
with open(self.filename, 'r') as infile:
for _ in range(self.skip_lines):
next(infile)
for line in infile:
line = line.split()
for item in line:
yield item
def slice_data(data, state=state):
#Energies stored in:
# Reverse: data[1,:]
# Forward: data[2,:]
#Desmond unit input: kcal/mol, conversion factor 4.184kJ/kcal
#P.beta from alchemical_analysis.py in kJ/mol/K
#Return: u_klt contains energies of adjacent lambdas only
data = data.T
if state == 0:
u_klt[state, state+1 , :nsnapshots[state]] = data[ 2 , : ]*4.184*P.beta
elif state == K:
u_klt[state, state-1 , :nsnapshots[state]] = data[ 2 , : ]*4.184*P.beta
else:
u_klt[state, state-1, :nsnapshots[state]] = data[ 1 , :]*4.184*P.beta
u_klt[state, state+1, :nsnapshots[state]] = data[ 2 , :]*4.184*P.beta
return
print("Loading in data from %s (%s) ...") % (self.filename, 'state %d' % state)
data = numpy.fromiter(iter_func(), dtype=float)
if not self.len_first == self.len_last:
data = data[: -self.len_last]
data = data.reshape((-1, self.len_first))
slice_data(data)
#===================================================================================================
# Preliminaries I: Get LV,Snapsize,consistency check, and skip frames
#===================================================================================================
datafile_tuple = P.datafile_directory, P.prefix, P.suffix
fs = [ F(filename) for filename in glob( '%s/%s*%s' % datafile_tuple ) ]
n_files = len(fs)
if not n_files:
raise SystemExit("\nERROR!\nNo files found within directory '%s' with prefix '%s' and suffix '%s': check your inputs." % datafile_tuple)
if n_files > 1:
fs = sorted(fs, key=F.sortedHelper)
###Set lambda vector and get snapsize
lv = []
P.snap_size = []
for nf, f in enumerate(fs):
lv.append( [nf,0] )
f.get_snapsize()
P.lv_names = lv_names = f.lv_names
n_components = len(lv_names)
lv = numpy.array(lv, float) # *** Lambda vectors.
K = len(lv) # *** Number of lambda states.
equiltime = P.equiltime
nsnapshots = numpy.zeros(K, int)
for nf, f in enumerate(fs):
###Check for consistent timestep???
f.len_first, f.len_last = (len(line.split()) for line in unixlike.tailPy(f.filename, 2))
bLenConsistency = (f.len_first != f.len_last)
###Skip N snapshots
equilsnapshots = int(equiltime/f.snap_size)
f.skip_lines += equilsnapshots
nsnapshots[nf] += unixlike.wcPy(f.filename) - f.skip_lines - 1*bLenConsistency
print("First %s ps (%s snapshots) will be discarded due to equilibration from file %s...") % (equiltime, equilsnapshots, f.filename)
#===================================================================================================
# Preliminaries: Load in equilibrated data.
#===================================================================================================
maxn = max(nsnapshots) # maximum number of the equilibrated snapshots from any state
u_klt = numpy.zeros([K,K+1,int(maxn)], numpy.float64) # u_klt[k,m,t] is the reduced potential energy of snapshot t of state k evaluated at state m
for nf, f in enumerate(fs):
f.iter_loadtxt(nf)
return nsnapshots, lv, u_klt
| UTF-8 | Python | false | false | 5,606 | py | 30 | parser_desmond.py | 10 | 0.498751 | 0.489297 | 0 | 132 | 41.469697 | 155 |
zzilch/SUNCG-Search | 16,870,631,577,986 | e47c01a83971cb5ddc89575f1597246fac40ebe2 | 3bf21dcf36348a12ef2140274e09eca4cc12bba6 | /servers/db_api/db_api/resources/single_relationship.py | e300ad6862ca0af676d7b96e264c30d6ccd93ab9 | []
| no_license | https://github.com/zzilch/SUNCG-Search | a71492e61f5cee1fef0fc9ff867af96d88bccbf6 | 101e97da8ff441c38cdf80e8114b4e17eb9f1412 | refs/heads/master | 2020-07-30T09:00:30.780478 | 2017-06-07T19:42:24 | 2017-06-07T19:42:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import jsonify
from flask_restful import Resource
from flask_restful.utils import cors
from webargs import fields, validate
from webargs.flaskparser import use_args, use_kwargs, parser, abort
from db_api import cursor
from ..util import parseData
cmd_temptables = """
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
DROP TABLE IF EXISTS rel;
CREATE TEMPORARY TABLE t1 (
SELECT model_id
FROM models
WHERE
fine_grained_class = %s
OR coarse_grained_class = %s
);
CREATE TEMPORARY TABLE t2 (
SELECT model_id
FROM models
WHERE
fine_grained_class = %s
OR coarse_grained_class = %s
);
CREATE TEMPORARY TABLE rel (
SELECT
relations.id
FROM relations
WHERE
relations.name = %s
)
"""
def exec_cmd_temptables(pri_class, sec_class, relationship):
cursor.execute(cmd_temptables, (pri_class, pri_class, \
sec_class, sec_class, \
relationship))
cmd_query = """
SELECT
scene_id,
level_num,
room_num,
COUNT(*) AS occurences
FROM
pairwise_rels pr
WHERE
pr.primary_id IN (SELECT * FROM t1)
AND pr.secondary_id IN (SELECT * FROM t2)
AND pr.relation_id IN (SELECT * FROM rel)
GROUP BY
scene_id,
level_num,
room_num
ORDER BY
occurences DESC
"""
class SingleRelationship(Resource):
singlerelationship_args = {
'primary': fields.Str(required=True),
'relationship': fields.Str(required=True),
'secondary': fields.Str(required=True),
}
@cors.crossdomain(origin='*')
@use_kwargs(singlerelationship_args)
def get(self, primary, relationship, secondary):
print "SingleRelationship"
exec_cmd_temptables(primary, secondary, relationship)
cursor.execute(cmd_query)
data = cursor.fetchall()
scene_return, level_return, room_return = parseData(data)
return jsonify({'scene_results': scene_return,
'level_results': level_return,
'room_results' : room_return}), 200
| UTF-8 | Python | false | false | 2,113 | py | 65 | single_relationship.py | 51 | 0.629437 | 0.625177 | 0 | 85 | 23.788235 | 67 |
gadeuneo/SCSI-Python | 17,506,286,700,679 | 37fe2787fbe478dace3571c2704c974919cb9345 | d1e1eb5f292f3982ddf67ebefcc9429d77ec8241 | /saveImages.py | 8001533014cb474a2aae51c2924fd720bc1e0705 | []
| no_license | https://github.com/gadeuneo/SCSI-Python | 46b23c5e2f2f43308e78c9dad7f4ea588d7b3e6b | 194e296ffd06dcd50e64206fc125e9b04641a6d4 | refs/heads/master | 2021-01-25T08:49:08.825827 | 2014-08-07T15:56:48 | 2014-08-07T15:56:48 | 22,621,871 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import cv2
cap = cv2.VideoCapture(0)
frameNum = 0
filenameTemplate = "SavedImages/frame{:04d}.jpg"
key = ' '
while key != 'q':
ret, frame = cap.read()
cv2.imshow("Video", frame)
fileName = filenameTemplate.format(frameNum)
cv2.imwrite(fileName, frame)
x = cv2.waitKey(30)
key = chr(x & 0xFF)
frameNum = frameNum + 1
cv2.destroyAllWindows()
| UTF-8 | Python | false | false | 378 | py | 37 | saveImages.py | 36 | 0.642857 | 0.60582 | 0 | 18 | 19.888889 | 48 |
fendaq/Arithmetic_Func_detection_for_CTPN | 1,864,015,812,542 | 964f22ee6ca1db02dfa1faeabb9abfac78a0b356 | 1c59524a45a5859d1bff39f83f7b4e6b2f9fdfbb | /lib/prepare_training_data/divide_dataset.py | 79f8507bf0b950fe299df64ac4ac6344974fdabe | []
| no_license | https://github.com/fendaq/Arithmetic_Func_detection_for_CTPN | d62087547e863f22df4c219ddd616ced4103a42b | 2bf6e05cd706189918ef892666d151894a049fad | refs/heads/master | 2020-03-30T04:17:10.971584 | 2018-09-28T09:48:27 | 2018-09-28T09:48:27 | 150,734,626 | 2 | 0 | null | true | 2018-09-28T12:05:15 | 2018-09-28T12:05:15 | 2018-09-28T09:48:34 | 2018-09-28T09:48:33 | 866 | 0 | 0 | 0 | null | false | null | import os,shutil
import numpy as np
import tqdm
validation_data_num = 22
img_dir = "/home/tony/ocr/ocr_dataset/tal_detec_data_v2/img"
xml_dir = "/home/tony/ocr/ocr_dataset/tal_detec_data_v2/xml"
train_img_dir = "/home/tony/ocr/ocr_dataset/ctpn/train_data/img"
train_xml_dir = "/home/tony/ocr/ocr_dataset/ctpn/train_data/xml"
val_img_dir = "/home/tony/ocr/ocr_dataset/ctpn/val_data/img"
val_xml_dir = "/home/tony/ocr/ocr_dataset/ctpn/val_data/xml"
img_type = ["jpg", "png", "JPG"]
def divide_dataset():
img_name_list = os.listdir(img_dir)
val_data_index = []
while len(val_data_index) != validation_data_num:
value = np.random.randint(0, len(img_name_list)-1)
if value not in val_data_index:
val_data_index.append(value)
if not os.path.exists(train_img_dir):
os.mkdir(train_img_dir)
if not os.path.exists(train_xml_dir):
os.mkdir(train_xml_dir)
if not os.path.exists(val_img_dir):
os.mkdir(val_img_dir)
if not os.path.exists(val_xml_dir):
os.mkdir(val_xml_dir)
for index in tqdm.tqdm(range(len(img_name_list))):
img_name, img_type = img_name_list[index].split('.')
if img_type not in img_type:
assert 0, '{}not a img'.format(img_name_list[index])
xml_name = img_name + '.xml'
assert os.path.exists(os.path.join(xml_dir,xml_name)), "{} not exist".format(xml_name)
if index in val_data_index:
shutil.copyfile(os.path.join(img_dir, img_name_list[index]),
os.path.join(val_img_dir,img_name_list[index]))
shutil.copyfile(os.path.join(xml_dir, xml_name),
os.path.join(val_xml_dir, xml_name))
else:
shutil.copyfile(os.path.join(img_dir, img_name_list[index]),
os.path.join(train_img_dir, img_name_list[index]))
shutil.copyfile(os.path.join(xml_dir, xml_name),
os.path.join(train_xml_dir, xml_name))
print(val_data_index)
if __name__ == "__main__":
divide_dataset() | UTF-8 | Python | false | false | 2,094 | py | 8 | divide_dataset.py | 7 | 0.602197 | 0.598854 | 0 | 63 | 32.253968 | 94 |
fengbingchun/PyTorch_Test | 8,315,056,686,839 | 102559cf7ac31fc88d0881895d59bf9b244a26c8 | 926b3c52070f6e309567c8598248fd5c57095be9 | /src/mmgeneration/configs/_base_/models/biggan/biggan_128x128.py | 29def783aef0700182a84f45fc1f9d1b9118578d | [
"Apache-2.0"
]
| permissive | https://github.com/fengbingchun/PyTorch_Test | 410f7cd2303707b0141d433fb9d144a961e1f4c8 | df5c2169f0b699bcd6e74adb4cb0e57f7dcd9348 | refs/heads/master | 2023-05-23T16:42:29.711338 | 2023-03-25T11:31:43 | 2023-03-25T11:31:43 | 167,339,907 | 15 | 4 | null | false | 2023-03-25T11:31:45 | 2019-01-24T09:24:59 | 2022-12-08T03:01:33 | 2023-03-25T11:31:44 | 264,586 | 13 | 5 | 0 | C++ | false | false | model = dict(
type='BasiccGAN',
generator=dict(
type='BigGANGenerator',
output_scale=128,
noise_size=120,
num_classes=1000,
base_channels=96,
shared_dim=128,
with_shared_embedding=True,
sn_eps=1e-6,
init_type='ortho',
act_cfg=dict(type='ReLU', inplace=True),
split_noise=True,
auto_sync_bn=False),
discriminator=dict(
type='BigGANDiscriminator',
input_scale=128,
num_classes=1000,
base_channels=96,
sn_eps=1e-6,
init_type='ortho',
act_cfg=dict(type='ReLU', inplace=True),
with_spectral_norm=True),
gan_loss=dict(type='GANLoss', gan_type='hinge'))
train_cfg = dict(
disc_steps=8, gen_steps=1, batch_accumulation_steps=8, use_ema=True)
test_cfg = None
optimizer = dict(
generator=dict(type='Adam', lr=0.0001, betas=(0.0, 0.999), eps=1e-6),
discriminator=dict(type='Adam', lr=0.0004, betas=(0.0, 0.999), eps=1e-6))
| UTF-8 | Python | false | false | 1,001 | py | 785 | biggan_128x128.py | 581 | 0.588412 | 0.531469 | 0 | 32 | 30.28125 | 77 |
davidaparicio/davidaparicio.github.io | 2,018,634,669,937 | 92d9a2fbd955f7c30c8fd6db481877627e17a4d3 | b4fb95d5bc7fc6eda91394f21b86df0caa4a9705 | /scripts/qrcode/qrcode_gen.py | be608c84b5c5b6aa57525da6159f11630e57a2b7 | [
"MIT"
]
| permissive | https://github.com/davidaparicio/davidaparicio.github.io | 3f0be2a577a8553cfddcb98adb4996be94622f70 | 4c24ee985ba4fbd36e6c3a6bf34316e6830e0aeb | refs/heads/master | 2023-08-03T00:30:13.879949 | 2023-08-01T11:16:30 | 2023-08-01T13:20:41 | 405,609,189 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import qrcode
websiteurl = 'https://davidaparicio.gitlab.io'
filename = 'website.png'
versions = 1 #1 to 40
boxsize = 10 #nb of pixels
bordersize = 4 #min thickness of the border
#img = qrcode.make('Your input text')
img = qrcode.make()
qr = qrcode.QRCode(
version=versions,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=boxsize,
border=4,
)
qr.add_data(websiteurl)
qr.make(fit=True)
img = qr.make_image(fill_color="#000000", back_color="white").convert('RGB')
#img = qr.make_image(fill_color="#160096", back_color="white").convert('RGB')
img.save(filename)
#https://betterprogramming.pub/how-to-generate-and-decode-qr-codes-in-python-a933bce56fd0
""" data = {
'jour' : '31/05/1832',
'heure' : '08h00',
'nom' : 'Galois',
'prenom' : 'Evariste',
'naissance' : '25/11/1811',
'lieu' : 'Bourg-la-Reine',
'adresse' : 'Quelque part 12345 Ville',
'motifs' : 'duel',
}
content = 'Cree le: {jour} a {heure}; Nom: {nom}; Prenom: {prenom}; '\
+ 'Naissance: a {lieu}; Adresse: {adresse}; '\
+ 'Sortie: {jour} a {heure}; Motifs: {motifs};'
qr = qrcode.QRCode(border=0)
qr.add_data(content.format(**data))
qr.make(fit=True) """ | UTF-8 | Python | false | false | 1,177 | py | 128 | qrcode_gen.py | 11 | 0.653356 | 0.609176 | 0 | 43 | 26.395349 | 89 |
xgfelicia/Deep-Learning | 9,191,230,044,566 | 8584a6694c883e7ac46cbbe2501fa9e4cc4e73d2 | c8ccf1915bd05aea1c7fb891acb2f5ef4bcc9ae8 | /vae/vae-vanilla.py | c326c1227221ca7a06786a7b845c0a909011befb | []
| no_license | https://github.com/xgfelicia/Deep-Learning | 98c0d563183e1ba6cb510c9ba17c897867d96464 | 57f5166eb08b0ab85022321ac3cdcb8c1e40cf2a | refs/heads/master | 2020-04-08T06:12:14.579826 | 2019-03-23T20:32:00 | 2019-03-23T20:32:00 | 159,088,813 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd.variable import Variable
from torchvision import transforms, datasets
import torchvision
import argparse
parser = argparse.ArgumentParser(description = "MNIST Testing")
parser.add_argument('--no-cuda', action = 'store_true', default = False)
ARGS = parser.parse_args()
use_cuda = torch.cuda.is_available() and not ARGS.no_cuda
device = torch.device('cuda' if use_cuda else 'cpu')
print(device)
###############################################################
# take MNIST data and tranform input values from [0, 255] to [-1, 1]
def mnist():
out_dir = '../dataset'
train = datasets.MNIST(root = out_dir, train = True, transform = transforms.ToTensor(), download = True)
test = datasets.MNIST(root = out_dir, train = False, transform = transforms.ToTensor())
return train, test
def loss_function(recon_x, x, mu, log_var):
BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum')
KLD = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
return BCE + KLD
def generate_image(vae):
z = torch.randn(64, 2).to(device)
sample = vae.decoder(z)
torchvision.utils.save_image(sample.view(64, 1, 28, 28), './sample_vae' + '.png')
##########################################################
class VAE(nn.Module):
def __init__(self):
super().__init__()
self.encode_layer = nn.Sequential(
nn.Linear(28 * 28, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU()
)
self.mu = nn.Linear(256, 2)
self.log_var = nn.Linear(256, 2)
self.decode_layer = nn.Sequential(
nn.Linear(2, 256),
nn.ReLU(),
nn.Linear(256, 512),
nn.ReLU(),
nn.Linear(512, 28 * 28),
nn.Sigmoid()
)
def encoder(self, x):
x = self.encode_layer(x)
return self.mu(x), self.log_var(x)
def decoder(self, x):
return self.decode_layer(x)
def forward(self, x):
mu, log_var = self.encoder(x.view(-1, 28*28))
z = self.sampling(mu, log_var)
return self.decoder(z), mu, log_var
def sampling(self, mu, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu) # return z
#####################################################
def training(vae, optimizer, epoch, train_loader):
vae.train()
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
recon_batch, mu, log_var = vae(data)
loss = loss_function(recon_batch, data, mu, log_var)
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item() / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(train_loader.dataset)))
if __name__ == '__main__':
size = 100
train, test = mnist()
train_loader = torch.utils.data.DataLoader(train, batch_size = size, shuffle = True)
test_loader = torch.utils.data.DataLoader(test, batch_size = size, shuffle = False)
vae = VAE().to(device)
optimizer = optim.Adam(vae.parameters(), lr = 0.001)
# training
for epoch in range(0, 100):
training(vae, optimizer, epoch, train_loader)
generate_image(vae)
| UTF-8 | Python | false | false | 3,707 | py | 8 | vae-vanilla.py | 5 | 0.563259 | 0.538711 | 0 | 128 | 27.945313 | 108 |
Brokenwind/Grabscenic | 13,872,744,410,996 | b5dbc8a429c08381ebfbd0cb79c4faaf3f2ab4f7 | 3b3deed872d6aa9b133e04731ac4d1447be1def6 | /scenic/grab/grab.py | f7f3f0f6b0c7e9dfd799190b91af7e8030b7ab9e | []
| no_license | https://github.com/Brokenwind/Grabscenic | 77b87224bd1129a4e3a0de5511aa3859fa1e6836 | 6a18680644c6c84a016a95d0688c47e603292988 | refs/heads/master | 2020-06-14T15:47:18.767803 | 2016-12-16T13:43:54 | 2016-12-16T13:43:54 | 75,163,829 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# coding=utf-8
import numpy
import re
from bs4 import BeautifulSoup
from selenium import webdriver
from pandas import DataFrame,Series
from scenic import Scenic
from tables import Tables
from baidu import Baidu
from map import BaiduMap
import sys
sys.path.append("..")
from log import Logger
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
class Grab:
def __init__(self):
self._logger = Logger(__file__)
# the entry point of grabing
self.base="http://scenic.cthy.com"
self.provinces = []
#self._browser = webdriver.PhantomJS()
self._browser = webdriver.Firefox()
self.tabopera = Tables();
self.record = open("record.txt","a+")
self.fdate = open("date.txt","a+")
self.fprice = open("price.txt","a+")
self.sprovince = 0
self.spage = 1
self.snum = 0
self.picturenum = 10
self.baidu = Baidu(self._browser)
self.map = BaiduMap()
self.ak = "sh0wDYRg1LnB5OYTefZcuHu3zwuoFeOy"
def __del__(self):
self._browser.quit()
self.record.close()
def getProvinces(self):
'''Get the information of link, area and the number of provinces.
# Process:
1): To get the source code of the entry point (http://scenic.cthy.com) with PhantomJS
2): To find tag which contains the information of provinces
3): Get link,area and number information of every province
# Return:
The return value is a DataFrame contains the following attributes:
index: the specified number of a province which extract from link for further use
link: the relative web address of details
area: the name of province
'''
self._browser.get(self.base)
entry = BeautifulSoup(self._browser.page_source)
map = entry.find("map")
if map:
# the pattern to extract number from link
pattern = re.compile(r".*(\d\d)/")
self._logger.info("got the the tag containing the information of provinces")
for item in map.find_all("area"):
number = re.findall(pattern,item.attrs["href"])
if number:
self.provinces.append(number[0])
else:
continue
else:
self._logger.info("sorry,did not get the province map data")
return None
return self.provinces
def searchAll(self):
for i in range(self.sprovince,len(self.provinces)):
self.searchScenic(i)
def searchScenic(self,num):
"""Extract scenics information of a spicified province.
# Parameters:
num: the number of a province which you want to grab scenic information
# Return:
"""
prefix = "/scenicSearch/"
suffix = "-0-0-0-0-1.html"
self._browser.get(self.base+prefix+str(self.provinces[num])+suffix)
first = BeautifulSoup(self._browser.page_source)
""" The content of tags:
# the total records
[<span class="f14 point">135</span>,
# how many pages
<span class="f14 point">14</span>,
# the number of records of one page
<span class="f14 point">10</span>]
"""
palist = first.find(id="PagerList")
if palist:
tags = palist.select("li > span")
else:
return False
if tags and len(tags) >= 2:
pageCount = int(tags[1].string)
self._logger.info("total: "+tags[0].string+" records. "+"total "+tags[1].string+" pages")
else:
return False
for i in range(self.spage,pageCount+1):
self.searchSeniceSpiPage(num,str(i))
# it is import, it must be reset to 1
self.spage = 1
return True
def searchSeniceSpiPage(self,num,pagenum):
"""Search scenics information from a specified page of a specified province
# Parameters:
num: the number of a province which you want to grab scenic information
page: where now you want to extract scenic information from
# Return:
"""
addr = "/scenicSearch/"+str(self.provinces[num])+"-0-0-0-0-"+str(pagenum)+".html"
# record the current searching page
self._browser.get(self.base+addr)
page = BeautifulSoup(self._browser.page_source)
sightTags = page.select("div.sightlist > div.sightshow > div.sightdetail > h4 > a")
link = ""
if sightTags:
for i in range(self.snum,len(sightTags)):
# recording the number of province,page,item for recovery
self.record.write(str(num)+" "+str(pagenum)+" "+str(i)+"\n")
self._logger.info("current position: "+str(num)+" "+str(pagenum)+" "+str(i))
self._logger.info("got the link of "+sightTags[i].string)
link = sightTags[i].attrs["href"]
self.extractScenicInfor(link)
else:
self._logger.error("searchSeniceSpiPage: can not get the list of scenics")
return False
# it is import, it must be reset to 1
self.snum = 0
return True
def extractScenicInfor(self,link):
"""Extract a scenic information with the given scenic address
# Parameters:
link: the address where you can get detailed information of scenic
# Return:
"""
scenic = self.extractScenicAbout(link)
if not scenic:
return False;
scenic = self.remedy(scenic)
scenic = self.remedyMap(scenic)
self.tabopera.insertData(scenic)
return True
def remedy(self,scenic):
"""if the return of function extractScenicAbout if not enough,we need to access baidu for more information
"""
openpat = u"开放时间"
suggpat = u"时长"
areapat = u"面积"
pricepat = u"门票"
# this is for getting longitude and latitude
scenic.mapname = scenic.name
# remedy pictures
picnum = len(scenic.images)
if picnum < 10:
self._logger.info("There are "+str(picnum)+" pictures.Getting the reset from baidu image")
imgs = self.baidu.image(scenic.name,self.picturenum - len(scenic.images))
if imgs:
scenic.images.extend(imgs)
if not scenic.description:
self._logger.info("Got details from baike")
baike = self.baidu.baike(scenic.name)
if not baike:
self._logger.error("Remedy: can not got information from baidu baike")
return scenic
if "detail" in baike.keys():
scenic.description = baike["detail"]
else:
baike = self.baidu.baike(scenic.name,False)
if not baike:
self._logger.error("Remedy: can not got information from baidu baike")
return scenic
# use the name in baike for baidu searching
if "name" in baike.keys():
scenic.mapname = baike["name"]
if "basic" in baike.keys():
basic = baike["basic"]
for item in basic.keys():
if re.findall(openpat,item):
times = re.findall(r"(\d+[:|;]\d+).*(\d+[:|;]\d+)",basic[item])
if times:
scenic.opentime = times[0][0]
scenic.closetime = times[0][1]
else:
scenic.opentime = "00:00"
scenic.closetime = "23:00"
if re.findall(suggpat,item):
scenic.suggest = basic[item]
if re.findall(pricepat,item):
scenic.price = basic[item]
if re.findall(areapat,item):
scenic.area = basic[item]
if not scenic.opentime:
scenic.opentime = "00:00"
if not scenic.closetime:
scenic.closetime = "23:00"
if not scenic.price:
scenic.price = "0"
if not scenic.area:
scenic.area = "未知"
if not scenic.symbol:
if scenic.images:
scenic.symbol = scenic.images[0]
return scenic
def remedyMap(self,scenic):
# map relatives:
mapret = self.map.getGeoAddress(scenic.mapname,self.ak)
if mapret:
if "location" in mapret.keys():
scenic.latitude = "%.13f" % mapret["location"]["lat"]
scenic.longitude = "%.13f" % mapret["location"]["lng"]
if "precise" in mapret.keys():
scenic.precise = str(mapret["precise"])
if "confidence" in mapret.keys():
scenic.confidence = str(mapret["confidence"])
return scenic
def extractScenicAbout(self,link):
"""Extract the information of introduction,geographic postion,type,quality,class
# Parameters:
link: the address where you can get detailed information of scenic
# Return:
the return value is a dict which has fowllowing attrs:
province:
city:
types:
level:
fits:
description:
images:
"""
scenic = Scenic()
# got the symbol picture and the name of scenic at index page
self._browser.get(link)
first = BeautifulSoup(self._browser.page_source)
symbol = first.select("div.sightfocuspic > img")
if symbol:
scenic.symbol = symbol[0].attrs["src"] and self.base+symbol[0].attrs["src"] or ""
scename = first.select("div.sightprofile > h4")
if scename:
scenic.name = scename[0].string
# if canot get the scenic name,it means the pages is wrong
else:
self._logger.error("Cannot got the scenic name. Is the page is wrong,please check it")
return None
# get detailed information about scenic at about page
addr = link+"about.html"
self._browser.get(addr)
about = BeautifulSoup(self._browser.page_source)
relative = about.select("div.main > div.wrap > div.pright > div.pfood > ul#RightControl11_ScenicBaseInfo > li")
if len(relative) == 5:
# get province and city information
pos = relative[0].select("a")
# It will only be right when we got two extract two infor
if len(pos) == 2:
if pos[0].string:
scenic.province = pos[0].string
if pos[1].string:
scenic.city = pos[1].string
self._logger.info("current position: province: "+scenic.province+" city: "+scenic.city)
else:
return None
# get the type of scenic
for item in relative[1].select("a"):
if item.string:
scenic.types.append(item.string)
# get the quality of scenic
qua = relative[2].find("a")
if qua:
scenic.quality = qua.string
# get the scenic level
lev = relative[3].find("a")
if lev:
scenic.level = lev.string
# get the fit time of the scenic
for item in relative[4].select("a"):
if item.string:
scenic.fits.append(item.string)
else:
self._logger.error("there is not ralative information"+str(len(relative)))
return None
# get the description of the scenic
desc = about.find(id="AboutInfo")
if desc:
for s in desc.stripped_strings:
scenic.description = scenic.description + s + "\n"
for item in desc.find_all("p"):
# if a tag p contains image address,it always has the style or align attr
attrs = item.attrs
if "style" in attrs.keys() or "align" in attrs.keys():
for img in item.find_all("img"):
if not img.attrs["src"]:
continue
scenic.images.append(self.base+img.attrs["src"])
else:
for s in item.stripped_strings:
scenic.description = scenic.description + s + "\n"
else:
self._logger.info("there is no description information and scenic pictures")
scenic.website = link
return scenic
def extractScenicAttractions(self,link):
"""extract information of attractions of a specified scenic
# Parameters:
link: the address where you can get attractions of scenic
# Return:
The return value is a list which the item is dict,each item contains the following attrs:
"""
attractions = []
addr = link+"about.html"
self._browser.get(addr)
page = BeautifulSoup(self._browser.page_source)
lists = page.select("")
def startGrab(self):
content = self.record.readlines()
# if do not have record
if len(content) != 0:
line = content[len(content)-1]
strs = line.split(" ")
self.sprovince = int(strs[0])
self.spage = int(strs[1])
self.snum = int(strs[2])
self.getProvinces()
self.searchAll()
if __name__ == "__main__":
grab = Grab()
grab.getProvinces()
#grab.extractScenicInfor("http://scenic.cthy.com/scenic-12654/")
grab.startGrab()
#grab.searchScenic(-2)
"""
result = grab.extractScenicAbout("http://scenic.cthy.com/scenic-10046/")
print result.symbol
print result.name
"""
| UTF-8 | Python | false | false | 13,912 | py | 10 | grab.py | 7 | 0.55746 | 0.549611 | 0 | 360 | 37.577778 | 119 |
nchen0/Algorithms | 14,499,809,619,865 | 106bcc0c5f23a8422c73b9d8736fc4cb02e2c2fa | 327871e65cdf46392fdc988fc7c2f3d88343426b | /climbing_stairs/climbing_stairs.py | 8cba2b5cc208b2604166514ad8c8b2ad1ed613b5 | []
| no_license | https://github.com/nchen0/Algorithms | 0960c7c9ea19cfe8cab92da953af5269666a4cc2 | c5f98e88813ed6e31df2e1b2d4780f09d3b60aa1 | refs/heads/master | 2020-03-27T19:56:11.050857 | 2018-09-04T21:52:54 | 2018-09-04T21:52:54 | 147,022,656 | 0 | 1 | null | true | 2018-09-01T18:27:33 | 2018-09-01T18:27:32 | 2018-08-30T18:37:40 | 2018-09-01T17:34:08 | 10 | 0 | 0 | 0 | null | false | null | #!/usr/bin/python
import sys
import itertools
def climbing_stairs(n):
if n == 0:
return 1
original_list = []
# Find ALL combinations of 1,2,3 possible, with repeats, of n times and add it to original_list.
for i in range(1, n+1):
original_list.append(list(itertools.product('123', repeat=i)))
# Extract each nested list, and put them as integers.
extracted_list = []
for nested_list in original_list:
for inner_list in nested_list:
extracted_list.append((list(map(int, inner_list))))
# Return any list combination that adds up to n.
return len([sum(j) for j in extracted_list if sum(j) == n])
print(climbing_stairs(15))
if __name__ == "__main__":
# Test out your implementation from the command line
# with `python climbing_stairs.py [n]` with different n values
if len(sys.argv) > 1:
num_stairs = int(sys.argv[1])
print("There are {ways} ways for a child to jump {n} stairs.".format(
ways=climbing_stairs(num_stairs), n=num_stairs))
else:
print('Usage: climbing_stairs.py [num_stairs]')
| UTF-8 | Python | false | false | 1,109 | py | 4 | climbing_stairs.py | 4 | 0.640216 | 0.627592 | 0 | 36 | 29.805556 | 96 |
elazarg/gender_dots | 1,125,281,440,873 | 8ee5bd53f1f64e551a9f506a7d88d41c910d3f7f | a4650bc66e5c334b905948a5857321cbc7e9b501 | /run_experiment.py | 0d77d5e618642312ed18670fba158477bd4fe0f4 | []
| no_license | https://github.com/elazarg/gender_dots | b91fbbf820bc961d4c4e57680dc7380110b01bb9 | bd07685cc1d08d41d61fd66b95078f37bbe4ef2e | refs/heads/main | 2023-06-22T00:40:20.194782 | 2021-07-26T14:29:40 | 2021-07-26T14:29:40 | 381,103,336 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
from pathlib import Path
import csv
from hebrew import remove_niqqud
import external_apis
def read_tsv(filename):
with open(filename, encoding='utf8', newline='') as f:
yield from csv.reader(f, delimiter='\t')
def make_filename(system, category):
return f'data/{system}/{category}.tsv'
def run_experiment(system, category):
fetch = external_apis.SYSTEMS[system]
rows = list(read_tsv(make_filename('expected', category)))
results_file = make_filename(system, category)
Path(results_file).parent.mkdir(parents=True, exist_ok=True)
with open(results_file, 'w', encoding='utf8') as f:
for i, bookid, author, book, word, index, expected_male, expected_female, original in rows:
index = int(index)
actual_male = fetch(remove_niqqud(expected_male))
actual_female = fetch(remove_niqqud(expected_female))
success_male = int(expected_male.split()[index] == actual_male.split()[index])
success_female = int(expected_female.split()[index] == actual_female.split()[index])
print(i, bookid, author, book, word, index,
actual_male, actual_female, original,
success_male, success_female,
file=f, sep='\t')
def print_row(system, size, masc, fem):
if isinstance(masc, str):
print(f'{system:>13} {size:>5} {masc:>9} {fem:>9} ratio')
else:
print(f'{system:>13} {size:>5} {masc:>9.2%} {fem:>9.2%} {masc/fem:>5.3}')
def print_results(system, category):
results_file = make_filename(system, category)
rows = list(read_tsv(results_file))
female_success = sum(int(row[-1]) for row in rows) / len(rows)
male_success = sum(int(row[-2]) for row in rows) / len(rows)
print_row(category, len(rows), male_success, female_success)
def print_results_original(system, category):
results_file = make_filename(system, category)
rows = list(read_tsv(results_file))
original_success = sum(int(row[-2] if int(row[-3]) == 1 else row[-1]) for row in rows) / len(rows)
copy_success = sum(int(row[-1] if int(row[-3]) == 1 else row[-2]) for row in rows) / len(rows)
print_row(category, len(rows), original_success, copy_success)
def print_count_original(system, category):
results_file = make_filename(system, category)
rows = list(read_tsv(results_file))
original_number = sum((1 if int(row[-3]) == 1 else 0) for row in rows)
copy_number = sum((1 if int(row[-3]) == 2 else 0) for row in rows)
print(f'{category:>13} {original_number} {copy_number}')
if __name__ == '__main__':
categories = ['ART-OCC', 'ART-VERBS', 'KAF', 'NLY-HITPAEL', 'NLY-PAAL', 'NLY-PIEL', 'TAV-PAAL']
parser = argparse.ArgumentParser(description='Run diacritization tests.')
parser.add_argument('system', metavar='system', type=str, nargs='?', default='Dicta',
choices=['Dicta', 'Nakdimon'],
help='Diacritization system.')
parser.add_argument('category', metavar='category', type=str, nargs='+',
default=categories,
choices=categories,
help='Tests to run')
parser.add_argument('--no-classify', action="store_true", default=False)
parser.add_argument('--original', action="store_true", default=False)
parser.add_argument('--count', action="store_true", default=False)
args = parser.parse_args()
if args.original:
print_row(args.system, "#", "ORIGNAL (%)", "COPY (%)")
elif args.count:
print(args.system, "#ORIGINAL", "#COPY")
else:
print_row(args.system, "#", "MASC (%)", "FEM (%)")
for category in args.category:
if not args.no_classify:
run_experiment(args.system, category)
if args.original:
print_results_original(args.system, category)
elif args.count:
print_count_original(args.system, category)
else:
print_results(args.system, category)
| UTF-8 | Python | false | false | 4,043 | py | 1,133 | run_experiment.py | 6 | 0.617611 | 0.608706 | 0 | 104 | 37.875 | 102 |
oleksis/cubadebatebot | 5,085,241,290,950 | 6d459175da41eb555141f4b7fa3dfd9c34fd855c | ad2b236d30ef4433fe37de516409a1dbcd768729 | /conf/telegram.py | 2f48385c8ad7b44847e7390b6e8ac8ce0c83b157 | [
"MIT"
]
| permissive | https://github.com/oleksis/cubadebatebot | cab8b4074bb5e09ab5731b76da61826a64b906dc | 74c7f788a308c778eac9821477c675b46069f6b2 | refs/heads/master | 2023-05-24T16:23:35.329017 | 2021-04-22T22:49:50 | 2021-04-22T22:49:50 | 265,373,234 | 0 | 0 | MIT | false | 2023-05-22T22:44:36 | 2020-05-19T21:38:08 | 2021-04-22T22:45:32 | 2023-05-22T22:44:36 | 97 | 0 | 0 | 1 | Python | false | false | import os
TG_API_ID = os.getenv("TG_API_ID")
TG_API_HASH = os.getenv("TG_API_HASH")
TG_TOKEN = os.getenv("TG_TOKEN")
TG_AUTHORIZATION = os.getenv("TG_AUTHORIZATION")
TG_CHANNEL = "@CubaDebateNews"
TG_BOT = "@CubaDebateBot"
TG_SESSION = "cubadebatenews"
| UTF-8 | Python | false | false | 254 | py | 8 | telegram.py | 2 | 0.708661 | 0.708661 | 0 | 9 | 27.222222 | 48 |
jain7727/html | 7,816,840,514,276 | e85b91d30e7f7c4afb7585cd414579dcff666245 | 5849acd68ed0ba545c767663911ee0365cad42c2 | /functional programming/fp1.py | 6e6bcc68ee027579e3ca5f69910d6da6cdc26310 | []
| no_license | https://github.com/jain7727/html | 785e6731634732e63fc4d76d52524f8707d53bb6 | c056d6023e9fc05b8cea1afbc941bb35a63efcf5 | refs/heads/master | 2023-04-30T21:39:22.710943 | 2021-05-20T08:26:24 | 2021-05-20T08:26:24 | 369,136,223 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # # f=lambda x,y:x+y
# # print(f(20,29))
# # f=lambda x,y:x-y
# # print(f(20,29))
# # f=lambda x,y:x*y
# # print(f(20,29))
# # f=lambda x,y:x/y
# # print(f(20,29))
#
#
# #map(fn,iterable)
# #filter(fn,iterable)
# # lst=[21,22,23,24,25,26,27,28,29]
#
# # def sq(num):
# # return num*num
# #
# #
# # s=list(map(sq,lst))
# # print(s)
# #
# # s=list(map(lambda num:num*num,lst))
# # print(s)
# # # filter
# # def sq(num):
# # return num%2==0
# #
# #
# # s=list(filter(sq,lst))
# # print(s)
# #
# #
# #
# # lst=[21,22,23,24,25,26,27,28,29]
# #
# # s=list(filter(lambda num:num%2==0,lst))
# # print(s)
#
# #list comprehension
#
# newlist=[]
# for i in range(1,51):
# newlist.append(i)
# print(newlist)
#
#
# lst=[i for i in range(1,21)]
#
# print(lst)
#
#
# lst1=[i*i if i%2==0 else i*i*i for i in range(1,21)]
# print(lst1)
#
#
# lst2=[(i,"even") if i%2==0 else (i,"odd") for i in range(1,21)]
# print(lst2)
#
#
#
# a=10
# def printa():
# a=2
# global a
# # print(a)
# printa()
# print(a)
stack=[]
size=int(input("enter the size"))
top=0
n=0
def push():
global top,size
if top>=size:
print("stack is full")
else:
a = int(input("enter the element to be inserted"))
stack.append(a)
top += 1
print(stack)
def pop():
global top, size
if top <= 0:
print("stack is empty")
else:
stack.pop()
top-=1
print(stack)
while n!=1:
print("enter the operation to be done")
operation=int(input("1)PUSH 2)POP"))
if operation==1:
push()
else:
pop()
#
# queue=[]
# top=0
# n=0
# size=int(input("enter the size"))
#
#
#
# def insert():
# global top,size
# if top>=size:
# print("queue is full")
# else:
# b=int(input("enter the element to be inserted"))
# queue.insert(b)
# top+=1
# print(queue)
#
# def delete():
# global top,size
# if top<=0:
# print("queue is empty")
# else:
# queue.clear()
# top-=1
# print(queue)
#
#
#
# while n!=1:
# print("enter the operation")
# a=int(input("1) insert 2)delete"))
# if a==1:
# insert()
# else:
# delete()
| UTF-8 | Python | false | false | 2,243 | py | 77 | fp1.py | 77 | 0.497102 | 0.453856 | 0 | 151 | 13.728477 | 65 |
ultimatecoder/mobilenetworks-4gLTE-tools | 2,765,958,951,641 | 67efa40249c5f12e774af467daf79ffb757ecf56 | c05fa51e7516e546ddb492d35279542f8fde0bb0 | /whois/fetcher.py~ | 01e76e4677a711ed924b287d0c7b9be5ed4d5d5e | [
"Apache-2.0"
]
| permissive | https://github.com/ultimatecoder/mobilenetworks-4gLTE-tools | 512e5d53f5eefc32db68a77cb31e1b0c6de6aaf5 | 98c10bbc29ecc6dba31694162b9c1d66063fdd54 | refs/heads/master | 2021-05-15T05:13:28.241331 | 2018-01-18T12:49:29 | 2018-01-18T12:49:29 | 117,959,213 | 0 | 0 | null | true | 2018-01-18T09:09:51 | 2018-01-18T09:09:49 | 2018-01-18T08:46:21 | 2018-01-16T12:29:58 | 14 | 0 | 0 | 0 | null | false | null | from urllib import error, request
import json
URL = "https://ep.api.getfastah.com/whereis/json/{}"
def fetch_details(ip, token):
url = URL.format(ip)
headers = {'Fastah-Key': token}
req = request.Request(url, headers=headers)
response = None
try:
with request.urlopen(req) as connection:
response = connection.read()
return response.decode()
except error.HTTPError as e:
if e.code in [400, 404]:
print("IP address {} is invalid.".format(ip))
elif e.code == 401:
print("Token : {} is invalid.".format(token))
else:
print("Invalid response received from the server.")
print(e.reason)
except error.URLError as e:
custom_message = ('Problem while making connection.'
'Please check the Internet connection')
print(e.message)
print(custom_message)
def fetch_multiple_details(ips, token):
ips = set(ips)
responses = []
for ip in ips:
response = fetch_details(ip, token)
if response:
response = json.loads(response)
responses.append({ip: response})
else:
return None
return responses
| UTF-8 | Python | false | false | 1,238 | 4 | fetcher.py~ | 3 | 0.584814 | 0.577544 | 0 | 44 | 27.136364 | 65 |
|
weivis/My-Website | 11,716,670,809,345 | 5f2b897b7dc4c4ad6bbae6966af19493c75970fb | cfa8464383fd4bf0ac3542bb01056a66a46d223b | /Api/app/upload/views.py | 5660c6d37008a868b3a37d712f92a75b93d9953b | []
| no_license | https://github.com/weivis/My-Website | 7f6a4078a87ed58f348addc4279032a986167f04 | 839cb0118673e130f6409beebcd5383c6a974f6d | refs/heads/master | 2023-01-23T11:23:31.724974 | 2020-10-10T09:24:28 | 2020-10-10T09:24:28 | 249,821,040 | 2 | 2 | null | false | 2023-01-05T17:08:43 | 2020-03-24T21:23:01 | 2021-10-03T11:18:35 | 2023-01-05T17:08:43 | 23,275 | 1 | 1 | 28 | Python | false | false | import hashlib
import os
import random
from datetime import datetime
# from io import *
from app.Kit import GetRequestFormData, GetRequestJsonData
from app.ReturnCode import ReturnCode
from app.Extensions import db
from app.Config import SERVER_GULAOBURL
# 关于 UPLOAD_KEY和UPLOAD_KEY_FLOAD的用法
UPLOAD_KEY = ['head','article_cover', 'article_img', 'link', 'photo']
UPLOAD_KEY_FLOAD = {
'head':'/head',
'article_cover':'/article/cover',
'article_img':'/article/img',
'link':'/link/cover',
'photo':'/photo'
}
'''
UPLOAD_KEY 是上传时候要使用的key
UPLOAD_KEY_FLOAD 是上传的key对于的文件储存跟目录
'''
def FileCompress(files):
'''图片压缩'''
print('压缩')
from PIL import Image
# value = BytesIO()
file=Image.open(files)
print(file.size)
# filew, fileh=openfile.size
size = (530, 1000)
file.thumbnail(size)
file = file.crop((0, 0, 500, 300))
# openfile.save(value, format="JPEG")
print(file.size)
return file
def CreateNewFilename(ext):
'''生成新的文件名'''
return datetime.strftime(datetime.now(),'%Y%m%d%H%M%S') + '{:03d}'.format(random.randint(0, 999)) + ext
def QueryFileName(filestr):
'''
获取文件名
返回
1.文件名(不包含文件后缀)
2.后缀
'''
pach , filename = os.path.split(filestr)
return os.path.splitext(filename)
def FileExtLegitimate(ext, uploadtype):
if ext:
if uploadtype == 'image':
if str(ext) not in ['.jpeg','.jpg','.png', '.jpg']:
return False
else:
return True
return False
return False
def upload_file(request):
try:
file = request.files['file']
except:
return 400, '错误: 没有文件', ''
# userkey = GetRequestFormData(request, 'userKey', None)
# token = GetRequestFormData(request, 'authToken', None)
# if not userkey or not token:
# return REQUEST_ERROR_METHOD_CODES, '请求参数有误1', ''
# obj = UserAccount.query.filter(UserAccount.id == userkey).first()
# if not obj:
# return REQUEST_ERROR_METHOD_CODES, '请求参数有误2', ''
# if not token:
# return REQUEST_ERROR_METHOD_CODES, '请求参数有误3', ''
# if obj.token != token:
# return ERROR_TOKENAUTHCODE, 'Token已失效或不正确, 请重新登录', ''
upload_key = GetRequestFormData(request, 'uploadKey', None)
if not upload_key:
return 400, '错误: Key值不能为空', {}
filename, ext = QueryFileName(file.filename)
if not FileExtLegitimate(ext, 'image'):
return 400, '文件类型不允许', {}
if upload_key not in UPLOAD_KEY:
return 400, '错误: 不允许使用的Key值', {}
newfilename = CreateNewFilename(ext)
if upload_key in ['article_cover']:
files = FileCompress(file)
else:
files = file
files.save(os.path.join(os.path.abspath('app/static/' + UPLOAD_KEY_FLOAD[str(upload_key)] + "/"), newfilename))
return ReturnCode.ok, 'ok', {
'lodpath': SERVER_GULAOBURL + '/static/' + UPLOAD_KEY_FLOAD[str(upload_key)] + '/' + newfilename,
'ospath': UPLOAD_KEY_FLOAD[str(upload_key)] + '/' + newfilename
}
| UTF-8 | Python | false | false | 3,301 | py | 109 | views.py | 62 | 0.613198 | 0.600784 | 0 | 112 | 26.330357 | 115 |
LeeJeongHwi/Study | 3,178,275,842,506 | 7d5debdcc865eb3c2fe60a7902f9c1841900eb50 | 19d218b0bca9e0d893f34239fe3e97aa5a3116d9 | /Algorithm/수학/11653-소인수분해.py | a337bc9ccb0ec8b63aacd5405e6c4290b4396096 | []
| no_license | https://github.com/LeeJeongHwi/Study | 9fc26003f4db11e064f2f695208bd0e3c3d343ac | 012389a533fe371946fa2c2c178c5e0b87abae83 | refs/heads/master | 2020-10-01T18:04:04.889660 | 2020-09-06T15:09:36 | 2020-09-06T15:09:36 | 227,592,922 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sys import stdin
n = int(stdin.readline())
if n==1:
pass
else:
while True:
if n==1:
break
for i in range(2,n+1):
if n%i == 0:
n//=i
print(i)
break
| UTF-8 | Python | false | false | 176 | py | 292 | 11653-소인수분해.py | 280 | 0.551136 | 0.522727 | 0 | 13 | 12.538462 | 25 |
wshon/MicroPython | 10,754,598,116,841 | 76830e5d7559ea382f5bb406e5f65c9460542833 | 42347c193b46e5f5c743c036ffa311db05873de9 | /keyboard/main.py | 3a7a1d7c0edf27b2a6f596516b3bd198d9f4c532 | []
| no_license | https://github.com/wshon/MicroPython | a859407d820a8eec5323a4dd07bc03318dfaa72a | 8e9d13bb3b6be9b20f551b71826ae8cc893f1580 | refs/heads/master | 2022-12-24T18:19:38.058442 | 2020-09-28T00:20:40 | 2020-09-28T00:20:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import _thread
import micropython
import pyb
micropython.alloc_emergency_exception_buf(100)
SEND_SIZE = 33 # report is 33 bytes long
RECV_SIZE = 64
# ROW = ['B4', 'B5', 'B6', 'B7', 'B8', 'C14']
# COL = ['B12', 'B13', 'B14', 'B15', 'A8', 'A9', 'A10', 'A15', 'B9', 'B1', 'B0', 'B2', 'A7', 'C15', 'A1', 'A2']
# C14->B3 A7->B10 C15->A3 B2 A0
ROW_PINS = ['B4', 'B5', 'B6', 'B7', 'B8', 'B3']
COL_PINS = ['B12', 'B13', 'B14', 'B15', 'A8', 'A9', 'A10', 'A15', 'B9', 'B1', 'B0', 'A0', 'B10', 'A3', 'A1', 'A2']
START, COUNT = 0x04, 26 + 10
(K_A, K_B, K_C, K_D, K_E, K_F, K_G, K_H, K_I, K_J, K_K, K_L, K_M, K_N, K_O, K_P, K_Q, K_R, K_S, K_T, K_U, K_V, K_W, K_X,
K_Y, K_Z, K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9, K_0) = range(START, START + COUNT)
START, COUNT = 0x28, 10 + 1 + 2 + 1 + 4
(K_ENT, K_ESC, K_BSPC, K_TAB, K_SPC, K_MINS, K_EQL, K_LBRC, K_RBRC, K_BSLS, _,
K_SCLN, K_QUOT, K_GRV, K_COMM, K_DOT, K_SLSH, K_CAPS) = range(START, START + COUNT)
START, COUNT = 0x3A, 12 + 9 + 4
(K_F1, K_F2, K_F3, K_F4, K_F5, K_F6, K_F7, K_F8, K_F9, K_F10, K_F11, K_F12,
K_PSCR, K_SLCK, K_PAUS, K_INS, K_HOME, K_PGUP, K_DEL, K_END, K_PGDN,
K_RGHT, K_LEFT, K_DOWN, K_UP) = range(START, START + COUNT)
START, COUNT = 0xE0, 8
(K_LCTL, K_LSFT, K_LALT, K_LGUI, K_RCTL, K_RSFT, K_RALT, K_RGUI) = range(START, START + COUNT)
K_FN = 0xFF
_ = __ = ___ = ____ = 0
KEY_MAP = [
[K_ESC, K_F1, K_F2, K_F3, K_F4, K_F5, K_F6, K_F7, K_F8, K_F9, K_F10, K_F11, K_F12, K_PSCR, K_SLCK, K_PAUS],
[K_GRV, K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9, K_0, K_MINS, K_EQL, K_BSPC, K_INS, K_PGUP],
[K_TAB, K_Q, K_W, K_E, K_R, K_T, K_Y, K_U, K_I, K_O, K_P, K_LBRC, K_RBRC, K_BSLS, K_DEL, K_PGDN],
[K_CAPS, K_A, K_S, K_D, K_F, K_G, K_H, K_J, K_K, K_L, K_SCLN, K_QUOT, ___, K_ENT, K_HOME, K_END],
[K_LSFT, K_Z, K_X, K_C, K_V, K_B, K_N, K_M, K_COMM, K_DOT, K_SLSH, ___, ___, K_RSFT, K_UP, ____],
[K_LCTL, K_LGUI, K_LALT, __, _, K_SPC, _, _, _, K_RALT, K_FN, _, K_RCTL, K_LEFT, K_DOWN, K_RGHT],
]
KEY_MAP_FN = [
[K_ESC, K_F1, K_F2, K_F3, K_F4, K_F5, K_F6, K_F7, K_F8, K_F9, K_F10, K_F11, K_F12, K_PSCR, K_SLCK, K_PAUS],
[K_GRV, K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9, K_0, K_MINS, K_EQL, K_BSPC, K_INS, K_PGUP],
[K_TAB, K_Q, K_W, K_E, K_R, K_T, K_Y, K_U, K_I, K_O, K_P, K_LBRC, K_RBRC, K_BSLS, K_DEL, K_PGDN],
[K_CAPS, K_A, K_S, K_D, K_F, K_G, K_H, K_J, K_K, K_L, K_SCLN, K_QUOT, ___, K_ENT, K_HOME, K_END],
[K_LSFT, K_Z, K_X, K_C, K_V, K_B, K_N, K_M, K_COMM, K_DOT, K_SLSH, ___, ___, K_RSFT, K_PGUP, __],
[K_LCTL, K_LGUI, K_LALT, __, __, K_SPC, _, _, _, K_RALT, K_FN, _, K_RCTL, K_HOME, K_PGDN, K_END],
]
def thread_entry(self):
while True:
if self.hid is not None:
# self.hid.recv(self.recv_buf)
# print(self.recv_buf)
buf = self.hid.recv(64)
print(buf)
pass
pass
class KeyBoard:
HEAD = 2
def __init__(self):
self.hid = pyb.USB_HID() if 'HID' in pyb.usb_mode() else None
if self.hid is not None:
_thread.start_new_thread(thread_entry, (self,))
self.send_buf = bytearray(SEND_SIZE)
self.recv_buf = bytearray(RECV_SIZE)
self.fn = False
self.row_pins = [pyb.Pin(x) for x in ROW_PINS]
self.col_pins = [pyb.Pin(x) for x in COL_PINS]
for row_pin in self.row_pins:
row_pin.init(pyb.Pin.OUT_OD)
row_pin.high()
for col_pin in self.col_pins:
col_pin.init(pyb.Pin.IN, pull=pyb.Pin.PULL_UP)
@property
def key_map(self):
return KEY_MAP_FN if self.fn else KEY_MAP
def _get_index(self, code):
if code > 0xdf:
return 0, 1 << (code & 0x07)
return (code >> 3) + self.HEAD, 1 << (code & 0x07)
def _check(self, code):
byte_index, bit_index = self._get_index(code)
return self.send_buf[byte_index] & bit_index
def _set_key(self, code):
if code == 0xff:
self.fn = True
return
byte_index, bit_index = self._get_index(code)
self.send_buf[byte_index] = self.send_buf[byte_index] | bit_index
def _reset_key(self, code):
if code == 0xff:
self.fn = False
return
byte_index, bit_index = self._get_index(code)
self.send_buf[byte_index] = self.send_buf[byte_index] & ~bit_index
def _scan_matrix(self):
for row, row_pin in enumerate(self.row_pins):
row_pin.low()
for col, col_pin in enumerate(self.col_pins):
if col_pin.value() == 0:
self._set_key(self.key_map[row][col])
else:
self._reset_key(self.key_map[row][col])
row_pin.high()
if self._check(self.key_map[5][5]):
pyb.LED(1).on()
else:
pyb.LED(1).off()
def run(self):
while True:
self._scan_matrix()
if self.hid is not None:
pyb.LED(1).on()
self.hid.send(self.send_buf)
pyb.LED(1).off()
else:
if 'HID' in pyb.usb_mode():
self.hid = pyb.USB_HID()
if __name__ == '__main__':
kb = KeyBoard()
kb.run()
| UTF-8 | Python | false | false | 5,230 | py | 2 | main.py | 2 | 0.498662 | 0.459656 | 0 | 142 | 35.830986 | 120 |
AbeIka/add-ratebase-rule-deny-ip | 12,326,556,185,695 | 4ed424c95a3a63df8de6da95451a6341b883a571 | 7d0a05dce673d25b6d8c9cfd16f3dfcfd3416300 | /add-ratebase-rule-deny-ip.py | ca58776145da361358d241471a1c68bfed3d7d8a | []
| no_license | https://github.com/AbeIka/add-ratebase-rule-deny-ip | a261fc61f1baddbdb88ed049977a82fbafb39a9c | e338137d65b588fabfca6338c79eb4d1863346bd | refs/heads/main | 2023-06-17T00:26:06.291861 | 2021-07-09T08:42:33 | 2021-07-09T08:42:33 | 384,339,917 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import boto3
import os
def lambda_handler(event, context):
client = boto3.client('wafv2')
try:
# Get IP addresses blocked by rate-based rules
responseAddips = client.get_rate_based_statement_managed_keys(
Scope=os.environ['WEBACL_SCOPE'],
WebACLName=os.environ['WEBACL_NAME'],
WebACLId=os.environ['WEBACL_ID'],
RuleName=os.environ['RATEBASERULE_NAME']
)
# Get the list of IPv4 blocked by rate-based rules
responseAddips = responseAddips['ManagedKeysIPV4']['Addresses']
# If there are no IPs blocked by the rate-based rule, exit
if len(responseAddips) == 0:
print("There are no IP addresses blocked by the rate-based rule")
return()
# The following will be executed when there are IPs blocked by the rate-based rule
# Get an existing IP set.
IPSets = client.get_ip_set(
Name=os.environ['IPSETRULE_NAME'],
Scope=os.environ['WEBACL_SCOPE'],
Id=os.environ['IPSETRULE_ID']
)
IPSets = IPSets['IPSet']['Addresses']
# Show IPs blocked by rate-based rules
print("The IPs detected by the rate-based rule are as follows")
print(responseAddips)
# Display the current IP Sets.
print("The current IP Sets are as follows")
print(IPSets)
# Create a list of IPs that are blocked by rate-based rules and are not registered in the IP Set
addIPList = list(set(responseAddips) - set(IPSets))
# Remove duplicates from IP list
addIPList=list(set(addIPList))
# If the IP being blocked by the rate-based rule is already registered in IPSet, it will be terminated
if len(addIPList) == 0:
print("The IP blocked by the rate-based rule has already been registered in IPSet.Ends the process")
return()
# Display the IP address to be added
for ip in addIPList:
print("Add " + ip + " to the IP set ")
# update_ip_set replaces the specified IP, so match the existing IP list with the IP list to be added.
addIPList = IPSets + addIPList
# Get a token to update the IP Set.
responseToken = client.get_ip_set(
Name='IPset',
Scope='REGIONAL',
Id=os.environ['IPSETRULE_ID']
)
Token = responseToken["LockToken"]
# Update the IP Set
response = client.update_ip_set(
Name='IPset',
Scope='REGIONAL',
Id=os.environ['IPSETRULE_ID'],
Addresses=addIPList,
LockToken=Token
)
# Indicate that the IP set update is complete.
print("Addition to the IP set is complete.")
except:
# exception handling
print("An error has occurred")
return() | UTF-8 | Python | false | false | 2,950 | py | 2 | add-ratebase-rule-deny-ip.py | 1 | 0.585085 | 0.582712 | 0 | 82 | 34.987805 | 112 |
jneeven/paraloop | 10,591,389,354,485 | 82baf7574c294b8084abd0989d11136e69fb01a6 | c2bbd3772172d0013fe2aeb3df48e7b6cf5c30ba | /paraloop/paraloop.py | 3a9e07544fc91a70bcab5e85fd261da2b9aedaa8 | [
"MIT"
]
| permissive | https://github.com/jneeven/paraloop | 49232ef0965bdee8ecfe70c33f2697246ca634a3 | 61fbaa608cd20b4345c46f3458199f3b77689d8f | refs/heads/main | 2023-05-04T04:02:20.399884 | 2021-04-05T16:56:29 | 2021-04-05T16:56:29 | 350,011,313 | 7 | 0 | MIT | false | 2021-05-24T16:31:54 | 2021-03-21T13:45:56 | 2021-04-08T13:22:02 | 2021-05-24T16:31:10 | 44 | 5 | 0 | 1 | Python | false | false | import inspect
import itertools
from multiprocessing import Process, Queue
from typing import Callable, Dict, Iterable, Optional, Sequence
import paraloop.worker as worker
from paraloop.syntax import LoopFinder, LoopTransformer
from paraloop.variable import Variable
class ParaLoop:
"""Wraps an iterable and executes its iterations in parallel over multiple
processes."""
def __init__(
self, iterable: Iterable, length: Optional[int] = None, num_processes: int = 8
):
self.iterable = iter(iterable)
self.length = length
if self.length is None and hasattr(iterable, "__len__"):
self.length = len(iterable)
# TODO: add auto mode where we take num cores - 1.
self.num_processes = num_processes
if self.num_processes < 2:
raise ValueError(
"Paraloop must use at least two worker processes! "
f"The current configuration specifies only {num_processes}."
)
def __iter__(self):
# Find the source code of the calling loop and transform it into a function
caller = inspect.stack()[1]
loop_source = LoopFinder(caller.lineno, filename=caller.filename).find_loop()
function = LoopTransformer(
loop_source, caller.frame.f_globals, caller.frame.f_locals
).build_loop_function()
# Keep track of the Variables that need to be aggregated properly
variables = {
key: value
for key, value in itertools.chain(
caller.frame.f_locals.items(), caller.frame.f_globals.items()
)
if isinstance(value, Variable)
}
# Spawn process and distribute the work
processes, result_queue = self._distribute_work(function, variables)
# Wait for the results and aggregate them
self._process_results(processes, result_queue, variables)
return self
def _distribute_work(self, function: Callable, variables: Dict):
# Create queues to communicate with workers and spawn worker processes
in_queue, out_queue = (Queue(), Queue())
processes = []
for i in range(self.num_processes):
process = Process(
target=worker.create_worker,
args=(function, in_queue, out_queue, variables, i),
name=f"worker_{i}",
)
processes.append(process)
process.start()
# Distribute the work over the workers
for i, x in enumerate(self.iterable):
# TODO: after a certain amount, check how many jobs have been completed so
# we can display a progress bar.
in_queue.put((i, x))
# Signal them to stop once there are no more values to iterate over
for _ in processes:
in_queue.put((0, worker.Finished))
return processes, out_queue
def _process_results(
self, processes: Sequence[Process], result_queue: Queue, variables: Dict
):
# Wait for the results
results = []
for _ in processes:
# TODO: add a timeout here in case one of the workers has crashed.
result = result_queue.get(block=True)
if isinstance(result, Exception):
print("An error has occured in one of the workers!")
raise result
results.append(result)
# print(results)
for name, variable in variables.items():
aggregated = variable.aggregation_strategy.aggregate(
variable.wrapped, [result[name] for result in results]
)
variable.assign(aggregated)
def __next__(self):
# We already looped over the iterable ourselves, so we don't need to loop
# over the original.
raise StopIteration
| UTF-8 | Python | false | false | 3,847 | py | 12 | paraloop.py | 9 | 0.610606 | 0.609306 | 0 | 103 | 36.349515 | 86 |
jellewie/GCode-Camera-move | 18,442,589,594,207 | a7e8225e7ba40a2651bccc037897e78b24fb5829 | cd78bf0e968906b398ac51858948da39b9931e2d | /CameraScript.py | af6fdb2b4b5a5cc619681c98940729444f291488 | []
| no_license | https://github.com/jellewie/GCode-Camera-move | 01195531dff241ae32837f49c117bfb1dade2c7c | ec3bf6d839aa96b313911076f65ef9ec6ddf3735 | refs/heads/master | 2020-06-13T17:40:53.001394 | 2020-05-21T11:37:21 | 2020-05-21T11:37:21 | 194,736,209 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | Feedrate = 6000 ##mm/min speed
FeedrateSlow = 300 ##mm/min speed to move slowly
PosX = 10 ##X pos to go to when pausing
PosY = 235 ##Y pos to go to when pausing
Zhop = 1 ##Amount to Zhop
Retract = 8 ##Distance to retract
PosSlowY = 10 ##Amount to slowly move of the PosY to pres the button
EndCharacter = ";End of Gcode" ##This marks the end of the file, WARNING if non found, the program would freeze!
LayerChar = "^;LAYER:" ##This would indicate a layer
##Do not change things below this line unles you know what you are doing.
inpfilename=input("enter file name: ")
if inpfilename == "":
inpfilename="input.gcode" ##If no name is given use this
if "." not in inpfilename:
inpfilename = inpfilename + ".gcode" ##If no extension was given add this extension
import os.path
if not os.path.isfile(inpfilename): ##If this file does not exist
print('"' + str(inpfilename) + '" is not a valid input file')
quit()
InFile=open(inpfilename) ##Open the file for use
OutFile=open("C_" + inpfilename,"w") ##Create output file
ReturnCoordsFlag=0 ##Reset the flag that markes that we have coords to return to
import re
while True:
line=InFile.readline() ##Read a line from the input file
OutFile.write(line) ##Write the line to the output file
line=line.rstrip() ##remove any trailing characters (DO WE NEED THIS?)
if line==EndCharacter: break ##Stop if we find this line
content=line.split() ##Split the line at each space, stored in the array
try: tester=content[0] ##If this is a emthy line
except: continue ##Just ignore it and continue
if content[0]=="G0": ##If this line starts with a G0 command
coords=line ##Save the coords in case we need it later
ReturnCoordsFlag=1 ##Flag that we have a location to return to
if re.search(LayerChar,line): ##This would indicate a layer change
OutFile.write("G91\n" + ##Use relative Positioning
"G1 F" + str(Feedrate) + " E-" + str(Retract) + "\n" ##Pull in filement
"G1 F" + str(Feedrate) + " Z" + str(Zhop) + "\n" + ##Do a Z-hop
"G90\n" + ##Use Absolute Positioning
"G1 F" + str(Feedrate) + " X" + str(PosX) + " Y" + str(PosY-PosSlowY) + "\n" + ##Move almost to the edge
"G1 F" + str(FeedrateSlow) + " Y" + str(PosY) + "\n" + ##Move (slowly) and push the button
"G0 F" + str(Feedrate) + "\n") ##Set the feedrate back (else the code seems to go slow??) NEEDS TO BE CHECKED
if ReturnCoordsFlag==1: OutFile.write(coords+"\n") ##Move back to last position before this code (if there is any)
OutFile.write("G91\n" + ##Use relative Positioning
"G1 F" + str(Feedrate) + " Z-" + str(Zhop) + " E" + str(Retract) + "\n" + ##Undo the Z hop and prime
"G90\n" ) ##Use Absolute Positioning
print('Done!') | UTF-8 | Python | false | false | 2,844 | py | 4 | CameraScript.py | 2 | 0.648031 | 0.635021 | 0 | 49 | 57.061224 | 121 |
wayneabarquez/benevola | 15,693,810,533,885 | 4bb6eba257a4ac2d14bc4278dc87f43d69d5eac9 | 4d7faef2b751c3bcdecff06dd55e6167bbbfbd45 | /app/crematorium/models.py | 9220567937d6a5863bf4df7ecda15889ba4420eb | []
| no_license | https://github.com/wayneabarquez/benevola | 4c189f21baf5ffea59185e552b9ec2cf5ec6fbcb | 1f7384918e2c986eef0116cec43e402d0048db78 | refs/heads/master | 2020-04-06T05:06:16.943454 | 2016-12-05T03:10:11 | 2016-12-05T03:10:11 | 52,737,100 | 0 | 0 | null | false | 2016-08-26T14:27:57 | 2016-02-28T18:44:27 | 2016-02-28T18:46:59 | 2016-08-26T14:27:57 | 2,928 | 0 | 0 | 0 | JavaScript | null | null | from app import db
from app.models import BaseModel
from app.home.models import Deceased
class FuneralHomes(BaseModel):
name = db.Column(db.String, nullable=False)
address = db.Column(db.Text)
class Crematorium(BaseModel):
deceased_id = db.Column(db.Integer, db.ForeignKey('deceased.id'), index=True, nullable=True)
funeral_home_id = db.Column(db.Integer, db.ForeignKey('funeral_homes.id', ondelete='SET NULL'), nullable=True)
date_cremated = db.Column(db.Date, nullable=False)
time_started = db.Column(db.String, nullable=False)
time_finished = db.Column(db.String, nullable=False)
gas_consumed = db.Column(db.Float) # liters
deceased = db.relationship(Deceased, backref=db.backref('cremations', cascade="all, delete-orphan"))
funeral_homes = db.relationship(FuneralHomes)
| UTF-8 | Python | false | false | 840 | py | 163 | models.py | 120 | 0.714286 | 0.714286 | 0 | 20 | 40 | 114 |
robin3773/Codeforces-Problem-Solution-in-Python-3 | 12,816,182,434,014 | 0b0514589481218472ef8d80fc827f4452894040 | ee1e6c0c2234387b9040527206a01b4b60587c48 | /Type A/1234A - Equalize Prices Again.py | 74c82fbcba240ae6440efb3f04d894cfcff395b2 | []
| no_license | https://github.com/robin3773/Codeforces-Problem-Solution-in-Python-3 | 2d8e7cdf11e4823c1a8fe64dad9af53211132d5f | 9bb5e6cdf64fe0cf6628c40fd64324b70acc0cb9 | refs/heads/master | 2022-11-29T19:19:04.550904 | 2020-08-05T14:44:03 | 2020-08-05T14:44:03 | 276,883,825 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | test_case = int(input())
for _ in range(test_case):
n = int(input())
price = list(map(int, input().split()))
print(abs(-sum(price)//n))
# print(sum(price) // n if sum(price) % n == 0 else (sum(price) // n) + 1)
| UTF-8 | Python | false | false | 235 | py | 150 | 1234A - Equalize Prices Again.py | 149 | 0.531915 | 0.523404 | 0 | 7 | 31.571429 | 78 |
Ming-blue/mindspore | 9,268,539,448,721 | 3a42d049f83c21a4d47951dbf2c6cc299425a030 | 91015480741ec59dda36712d71e7e6f0704bc516 | /mindspore/explainer/_image_classification_runner.py | c3121006f8f6f73facfd01ca093503ab6e8d2d42 | [
"Apache-2.0",
"Libpng",
"LGPL-3.0-only",
"AGPL-3.0-only",
"MPL-1.1",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-only",
"OpenSSL",
"LicenseRef-scancode-other-copyleft",
"IJG",
"Zlib",
"MPL-1.0",
"LicenseRef-scancode-other-permissive",
"libtiff",
"NTP",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-gary-s-brown",
"MPL-2.0",
"BSD-3-Clause",
"Unlicense",
"0BSD",
"MPL-2.0-no-copyleft-exception",
"LicenseRef-scancode-free-unknown",
"GPL-2.0-only",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
]
| permissive | https://github.com/Ming-blue/mindspore | b5dfa6af7876b00163ccfa2e18512678026c232b | 9ec8bc233c76c9903a2f7be5dfc134992e4bf757 | refs/heads/master | 2023-06-23T23:35:38.143983 | 2021-07-14T07:36:40 | 2021-07-14T07:36:40 | 286,421,966 | 1 | 0 | Apache-2.0 | true | 2020-08-10T08:41:45 | 2020-08-10T08:41:45 | 2020-08-10T08:07:09 | 2020-08-10T08:07:05 | 147,900 | 0 | 0 | 0 | null | false | false | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Image Classification Runner."""
import os
import re
import json
from time import time
import numpy as np
from scipy.stats import beta
from PIL import Image
import mindspore as ms
from mindspore import context
from mindspore import log
import mindspore.dataset as ds
from mindspore.dataset import Dataset
from mindspore.nn import Cell, SequentialCell
from mindspore.ops.operations import ExpandDims
from mindspore.train._utils import check_value_type
from mindspore.train.summary._summary_adapter import _convert_image_format
from mindspore.train.summary.summary_record import SummaryRecord
from mindspore.train.summary_pb2 import Explain
from mindspore.nn.probability.toolbox.uncertainty_evaluation import UncertaintyEvaluation
from mindspore.explainer.benchmark import Localization
from mindspore.explainer.benchmark._attribution.metric import AttributionMetric
from mindspore.explainer.benchmark._attribution.metric import LabelSensitiveMetric
from mindspore.explainer.benchmark._attribution.metric import LabelAgnosticMetric
from mindspore.explainer.explanation import RISE
from mindspore.explainer.explanation._attribution.attribution import Attribution
from mindspore.explainer.explanation._counterfactual import hierarchical_occlusion as hoc
_EXPAND_DIMS = ExpandDims()
def _normalize(img_np):
"""Normalize the numpy image to the range of [0, 1]. """
max_ = img_np.max()
min_ = img_np.min()
normed = (img_np - min_) / (max_ - min_).clip(min=1e-10)
return normed
def _np_to_image(img_np, mode):
"""Convert numpy array to PIL image."""
return Image.fromarray(np.uint8(img_np * 255), mode=mode)
class _Verifier:
"""Verification of dataset and settings of ImageClassificationRunner."""
ALL = 0xFFFFFFFF
REGISTRATION = 1
DATA_N_NETWORK = 1 << 1
SALIENCY = 1 << 2
HOC = 1 << 3
ENVIRONMENT = 1 << 4
def _verify(self, flags):
"""
Verify datasets and settings.
Args:
flags (int): Verification flags, use bitwise or '|' to combine multiple flags.
Possible bitwise flags are shown as follow.
- ALL: Verify everything.
- REGISTRATION: Verify explainer module registration.
- DATA_N_NETWORK: Verify dataset and network.
- SALIENCY: Verify saliency related settings.
- HOC: Verify HOC related settings.
- ENVIRONMENT: Verify the runtime environment.
Raises:
ValueError: Be raised for any data or settings' value problem.
TypeError: Be raised for any data or settings' type problem.
RuntimeError: Be raised for any runtime problem.
"""
if flags & self.ENVIRONMENT:
device_target = context.get_context('device_target')
if device_target not in ("Ascend", "GPU"):
raise RuntimeError(f"Unsupported device_target: '{device_target}', "
f"only 'Ascend' or 'GPU' is supported. "
f"Please call context.set_context(device_target='Ascend') or "
f"context.set_context(device_target='GPU').")
if flags & (self.ENVIRONMENT | self.SALIENCY):
if self._is_saliency_registered:
mode = context.get_context('mode')
if mode != context.PYNATIVE_MODE:
raise RuntimeError("Context mode: GRAPH_MODE is not supported, "
"please call context.set_context(mode=context.PYNATIVE_MODE).")
if flags & self.REGISTRATION:
if self._is_uncertainty_registered and not self._is_saliency_registered:
raise ValueError("Function register_uncertainty() is called but register_saliency() is not.")
if not self._is_saliency_registered and not self._is_hoc_registered:
raise ValueError(
"No explanation module was registered, user should at least call register_saliency() "
"or register_hierarchical_occlusion() once with proper arguments.")
if flags & (self.DATA_N_NETWORK | self.SALIENCY | self.HOC):
self._verify_data()
if flags & self.DATA_N_NETWORK:
self._verify_network()
if flags & self.SALIENCY:
self._verify_saliency()
def _verify_labels(self):
"""Verify labels."""
label_set = set()
if not self._labels:
raise ValueError(f"The label list provided is empty.")
for i, label in enumerate(self._labels):
if label.strip() == "":
raise ValueError(f"Label [{i}] is all whitespaces or empty. Please make sure there is "
f"no empty label.")
if label in label_set:
raise ValueError(f"Duplicated label:{label}! Please make sure all labels are unique.")
label_set.add(label)
def _verify_ds_inputs_shape(self, sample, inputs, labels):
"""Verify a dataset sample's input shape."""
if len(inputs.shape) > 4 or len(inputs.shape) < 3 or inputs.shape[-3] not in [1, 3, 4]:
raise ValueError(
"Image shape {} is unrecognizable: the dimension of image can only be CHW or NCHW.".format(
inputs.shape))
if len(inputs.shape) == 3:
log.warning(
"Image shape {} is 3-dimensional. All the data will be automatically unsqueezed at the 0-th"
" dimension as batch data.".format(inputs.shape))
if len(sample) > 1:
if len(labels.shape) > 2 and (np.array(labels.shape[1:]) > 1).sum() > 1:
raise ValueError(
"Labels shape {} is unrecognizable: outputs should not have more than two dimensions"
" with length greater than 1.".format(labels.shape))
if self._is_hoc_registered:
if inputs.shape[-3] != 3:
raise ValueError(
"Hierarchical occlusion is registered, images must be in 3 channels format, but "
"{} channel(s) is(are) encountered.".format(inputs.shape[-3]))
short_side = min(inputs.shape[-2:])
if short_side < hoc.AUTO_IMAGE_SHORT_SIDE_MIN:
raise ValueError(
"Hierarchical occlusion is registered, images' short side must be equals to or greater then "
"{}, but {} is encountered.".format(hoc.AUTO_IMAGE_SHORT_SIDE_MIN, short_side))
def _verify_ds_sample(self, sample):
"""Verify a dataset sample."""
if len(sample) not in [1, 2, 3]:
raise ValueError("The dataset should provide [images] or [images, labels], [images, labels, bboxes]"
" as columns.")
if len(sample) == 3:
inputs, labels, bboxes = sample
if bboxes.shape[-1] != 4:
raise ValueError("The third element of dataset should be bounding boxes with shape of "
"[batch_size, num_ground_truth, 4].")
else:
if self._benchmarkers is not None:
if any([isinstance(bench, Localization) for bench in self._benchmarkers]):
raise ValueError("The dataset must provide bboxes if Localization is to be computed.")
if len(sample) == 2:
inputs, labels = sample
if len(sample) == 1:
inputs = sample[0]
self._verify_ds_inputs_shape(sample, inputs, labels)
def _verify_data(self):
"""Verify dataset and labels."""
self._verify_labels()
try:
sample = next(self._dataset.create_tuple_iterator())
except StopIteration:
raise ValueError("The dataset provided is empty.")
self._verify_ds_sample(sample)
def _verify_network(self):
"""Verify the network."""
next_element = next(self._dataset.create_tuple_iterator())
inputs, _, _ = self._unpack_next_element(next_element)
prop_test = self._full_network(inputs)
check_value_type("output of network in explainer", prop_test, ms.Tensor)
if prop_test.shape[1] != len(self._labels):
raise ValueError("The dimension of network output does not match the no. of classes. Please "
"check labels or the network in the explainer again.")
def _verify_saliency(self):
"""Verify the saliency settings."""
if self._explainers:
explainer_classes = []
for explainer in self._explainers:
if explainer.__class__ in explainer_classes:
raise ValueError(f"Repeated {explainer.__class__.__name__} explainer! "
"Please make sure all explainers' class is distinct.")
if explainer.network is not self._network:
raise ValueError(f"The network of {explainer.__class__.__name__} explainer is different "
"instance from network of runner. Please make sure they are the same "
"instance.")
explainer_classes.append(explainer.__class__)
if self._benchmarkers:
benchmarker_classes = []
for benchmarker in self._benchmarkers:
if benchmarker.__class__ in benchmarker_classes:
raise ValueError(f"Repeated {benchmarker.__class__.__name__} benchmarker! "
"Please make sure all benchmarkers' class is distinct.")
if isinstance(benchmarker, LabelSensitiveMetric) and benchmarker.num_labels != len(self._labels):
raise ValueError(f"The num_labels of {benchmarker.__class__.__name__} benchmarker is different "
"from no. of labels of runner. Please make them are the same.")
benchmarker_classes.append(benchmarker.__class__)
class ImageClassificationRunner(_Verifier):
"""
A high-level API for users to generate and store results of the explanation methods and the evaluation methods.
Update in 2020.11: Adjust the storage structure and format of the data. Summary files generated by previous version
will be deprecated and will not be supported in MindInsight of current version.
Args:
summary_dir (str): The directory path to save the summary files which store the generated results.
data (tuple[Dataset, list[str]]): Tuple of dataset and the corresponding class label list. The dataset
should provides [images], [images, labels] or [images, labels, bboxes] as columns. The label list must
share the exact same length and order of the network outputs.
network (Cell): The network(with logit outputs) to be explained.
activation_fn (Cell): The activation layer that transforms logits to prediction probabilities. For
single label classification tasks, `nn.Softmax` is usually applied. As for multi-label classification
tasks, `nn.Sigmoid` is usually be applied. Users can also pass their own customized `activation_fn` as long
as when combining this function with network, the final output is the probability of the input.
Raises:
TypeError: Be raised for any argument type problem.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> from mindspore.explainer import ImageClassificationRunner
>>> from mindspore.explainer.explanation import GuidedBackprop, Gradient
>>> from mindspore.explainer.benchmark import Faithfulness
>>> from mindspore.nn import Softmax
>>> from mindspore.train.serialization import load_checkpoint, load_param_into_net
>>> from mindspore import context
>>>
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> # The detail of AlexNet is shown in model_zoo.official.cv.alexnet.src.alexnet.py
>>> net = AlexNet(10)
>>> # Load the checkpoint
>>> param_dict = load_checkpoint("/path/to/checkpoint")
>>> load_param_into_net(net, param_dict)
[]
>>>
>>> # Prepare the dataset for explaining and evaluation.
>>> # The detail of create_dataset_cifar10 method is shown in model_zoo.official.cv.alexnet.src.dataset.py
>>>
>>> dataset = create_dataset_cifar10("/path/to/cifar/dataset", 1)
>>> labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
>>>
>>> activation_fn = Softmax()
>>> gbp = GuidedBackprop(net)
>>> gradient = Gradient(net)
>>> explainers = [gbp, gradient]
>>> faithfulness = Faithfulness(len(labels), activation_fn, "NaiveFaithfulness")
>>> benchmarkers = [faithfulness]
>>>
>>> runner = ImageClassificationRunner("./summary_dir", (dataset, labels), net, activation_fn)
>>> runner.register_saliency(explainers=explainers, benchmarkers=benchmarkers)
>>> runner.run()
"""
# datafile directory names
_DATAFILE_DIRNAME_PREFIX = "_explain_"
_ORIGINAL_IMAGE_DIRNAME = "origin_images"
_HEATMAP_DIRNAME = "heatmap"
# specfial filenames
_MANIFEST_FILENAME = "manifest.json"
# max. no. of sample per directory
_SAMPLE_PER_DIR = 1000
# seed for fixing the iterating order of the dataset
_DATASET_SEED = 58
# printing spacer
_SPACER = "{:120}\r"
# datafile directory's permission
_DIR_MODE = 0o700
# datafile's permission
_FILE_MODE = 0o400
def __init__(self,
summary_dir,
data,
network,
activation_fn):
check_value_type("data", data, tuple)
if len(data) != 2:
raise ValueError("Argument data is not a tuple with 2 elements")
check_value_type("data[0]", data[0], Dataset)
check_value_type("data[1]", data[1], list)
if not all(isinstance(ele, str) for ele in data[1]):
raise ValueError("Argument data[1] is not list of str.")
check_value_type("summary_dir", summary_dir, str)
check_value_type("network", network, Cell)
check_value_type("activation_fn", activation_fn, Cell)
self._summary_dir = summary_dir
self._dataset = data[0]
self._labels = data[1]
self._network = network
self._explainers = None
self._benchmarkers = None
self._uncertainty = None
self._hoc_searcher = None
self._summary_timestamp = None
self._sample_index = -1
self._manifest = None
self._full_network = SequentialCell([self._network, activation_fn])
self._full_network.set_train(False)
self._verify(_Verifier.DATA_N_NETWORK | _Verifier.ENVIRONMENT)
def register_saliency(self,
explainers,
benchmarkers=None):
"""
Register saliency explanation instances.
.. warning::
This function can not be invoked more than once on each runner.
Args:
explainers (list[Attribution]): The explainers to be evaluated,
see `mindspore.explainer.explanation`. All explainers' class must be distinct and their network
must be the exact same instance of the runner's network.
benchmarkers (list[AttributionMetric], optional): The benchmarkers for scoring the explainers,
see `mindspore.explainer.benchmark`. All benchmarkers' class must be distinct.
Raises:
ValueError: Be raised for any data or settings' value problem.
TypeError: Be raised for any data or settings' type problem.
RuntimeError: Be raised if this function was invoked before.
"""
check_value_type("explainers", explainers, list)
if not all(isinstance(ele, Attribution) for ele in explainers):
raise TypeError("Argument explainers is not list of mindspore.explainer.explanation .")
if not explainers:
raise ValueError("Argument explainers is empty.")
if benchmarkers is not None:
check_value_type("benchmarkers", benchmarkers, list)
if not all(isinstance(ele, AttributionMetric) for ele in benchmarkers):
raise TypeError("Argument benchmarkers is not list of mindspore.explainer.benchmark .")
if self._explainers is not None:
raise RuntimeError("Function register_saliency() was invoked already.")
self._explainers = explainers
self._benchmarkers = benchmarkers
try:
self._verify(_Verifier.SALIENCY | _Verifier.ENVIRONMENT)
except (ValueError, TypeError):
self._explainers = None
self._benchmarkers = None
raise
def register_hierarchical_occlusion(self):
"""
Register hierarchical occlusion instances.
.. warning::
This function can not be invoked more than once on each runner.
Note:
Input images are required to be in 3 channels formats and the length of side short must be equals to or
greater than 56 pixels.
Raises:
ValueError: Be raised for any data or settings' value problem.
RuntimeError: Be raised if the function was called already.
"""
if self._hoc_searcher is not None:
raise RuntimeError("Function register_hierarchical_occlusion() was invoked already.")
self._hoc_searcher = hoc.Searcher(self._full_network)
try:
self._verify(_Verifier.HOC | _Verifier.ENVIRONMENT)
except ValueError:
self._hoc_searcher = None
raise
def register_uncertainty(self):
"""
Register uncertainty instance to compute the epistemic uncertainty base on the Bayes' theorem.
.. warning::
This function can not be invoked more than once on each runner.
Note:
Please refer to the documentation of mindspore.nn.probability.toolbox.uncertainty_evaluation for the
details. The actual output is standard deviation of the classification predictions and the corresponding
95% confidence intervals. Users have to invoke register_saliency() as well for the uncertainty results are
going to be shown on the saliency map page in MindInsight.
Raises:
RuntimeError: Be raised if the function was called already.
"""
if self._uncertainty is not None:
raise RuntimeError("Function register_uncertainty() was invoked already.")
self._uncertainty = UncertaintyEvaluation(model=self._full_network,
train_dataset=None,
task_type='classification',
num_classes=len(self._labels))
def run(self):
"""
Run the explain job and save the result as a summary in summary_dir.
Note:
User should call register_saliency() once before running this function.
Raises:
ValueError: Be raised for any data or settings' value problem.
TypeError: Be raised for any data or settings' type problem.
RuntimeError: Be raised for any runtime problem.
"""
self._verify(_Verifier.ALL)
self._manifest = {"saliency_map": False,
"benchmark": False,
"uncertainty": False,
"hierarchical_occlusion": False}
with SummaryRecord(self._summary_dir, raise_exception=True) as summary:
print("Start running and writing......")
begin = time()
self._summary_timestamp = self._extract_timestamp(summary.file_info['file_name'])
if self._summary_timestamp is None:
raise RuntimeError("Cannot extract timestamp from summary filename!"
" It should contains a timestamp after 'summary.' .")
self._save_metadata(summary)
imageid_labels = self._run_inference(summary)
sample_count = self._sample_index
if self._is_saliency_registered:
self._run_saliency(summary, imageid_labels)
if not self._manifest["saliency_map"]:
raise RuntimeError(
f"No saliency map was generated in {sample_count} samples. "
f"Please make sure the dataset, labels, activation function and network are properly trained "
f"and configured.")
if self._is_hoc_registered and not self._manifest["hierarchical_occlusion"]:
raise RuntimeError(
f"No Hierarchical Occlusion result was found in {sample_count} samples. "
f"Please make sure the dataset, labels, activation function and network are properly trained "
f"and configured.")
self._save_manifest()
print("Finish running and writing. Total time elapsed: {:.3f} s".format(time() - begin))
@property
def _is_hoc_registered(self):
"""Check if HOC module is registered."""
return self._hoc_searcher is not None
@property
def _is_saliency_registered(self):
"""Check if saliency module is registered."""
return bool(self._explainers)
@property
def _is_uncertainty_registered(self):
"""Check if uncertainty module is registered."""
return self._uncertainty is not None
def _save_metadata(self, summary):
"""Save metadata of the explain job to summary."""
print("Start writing metadata......")
explain = Explain()
explain.metadata.label.extend(self._labels)
if self._is_saliency_registered:
exp_names = [exp.__class__.__name__ for exp in self._explainers]
explain.metadata.explain_method.extend(exp_names)
if self._benchmarkers is not None:
bench_names = [bench.__class__.__name__ for bench in self._benchmarkers]
explain.metadata.benchmark_method.extend(bench_names)
summary.add_value("explainer", "metadata", explain)
summary.record(1)
print("Finish writing metadata.")
def _run_inference(self, summary, threshold=0.5):
"""
Run inference for the dataset and write the inference related data into summary.
Args:
summary (SummaryRecord): The summary object to store the data.
threshold (float): The threshold for prediction.
Returns:
dict, The map of sample d to the union of its ground truth and predicted labels.
"""
sample_id_labels = {}
self._sample_index = 0
ds.config.set_seed(self._DATASET_SEED)
for j, batch in enumerate(self._dataset):
now = time()
self._infer_batch(summary, batch, sample_id_labels, threshold)
self._spaced_print("Finish running and writing {}-th batch inference data."
" Time elapsed: {:.3f} s".format(j, time() - now))
return sample_id_labels
def _infer_batch(self, summary, batch, sample_id_labels, threshold):
"""
Infer a batch.
Args:
summary (SummaryRecord): The summary object to store the data.
batch (tuple): The next dataset sample.
sample_id_labels (dict): The sample id to labels dictionary.
threshold (float): The threshold for prediction.
"""
inputs, labels, _ = self._unpack_next_element(batch)
prob = self._full_network(inputs).asnumpy()
if self._uncertainty is not None:
prob_var = self._uncertainty.eval_epistemic_uncertainty(inputs)
else:
prob_var = None
for idx, inp in enumerate(inputs):
gt_labels = labels[idx]
gt_probs = [float(prob[idx][i]) for i in gt_labels]
if prob_var is not None:
gt_prob_vars = [float(prob_var[idx][i]) for i in gt_labels]
gt_itl_lows, gt_itl_his, gt_prob_sds = \
self._calc_beta_intervals(gt_probs, gt_prob_vars)
data_np = _convert_image_format(np.expand_dims(inp.asnumpy(), 0), 'NCHW')
original_image = _np_to_image(_normalize(data_np), mode='RGB')
original_image_path = self._save_original_image(self._sample_index, original_image)
predicted_labels = [int(i) for i in (prob[idx] > threshold).nonzero()[0]]
predicted_probs = [float(prob[idx][i]) for i in predicted_labels]
if prob_var is not None:
predicted_prob_vars = [float(prob_var[idx][i]) for i in predicted_labels]
predicted_itl_lows, predicted_itl_his, predicted_prob_sds = \
self._calc_beta_intervals(predicted_probs, predicted_prob_vars)
union_labs = list(set(gt_labels + predicted_labels))
sample_id_labels[str(self._sample_index)] = union_labs
explain = Explain()
explain.sample_id = self._sample_index
explain.image_path = original_image_path
summary.add_value("explainer", "sample", explain)
explain = Explain()
explain.sample_id = self._sample_index
explain.ground_truth_label.extend(gt_labels)
explain.inference.ground_truth_prob.extend(gt_probs)
explain.inference.predicted_label.extend(predicted_labels)
explain.inference.predicted_prob.extend(predicted_probs)
if prob_var is not None:
explain.inference.ground_truth_prob_sd.extend(gt_prob_sds)
explain.inference.ground_truth_prob_itl95_low.extend(gt_itl_lows)
explain.inference.ground_truth_prob_itl95_hi.extend(gt_itl_his)
explain.inference.predicted_prob_sd.extend(predicted_prob_sds)
explain.inference.predicted_prob_itl95_low.extend(predicted_itl_lows)
explain.inference.predicted_prob_itl95_hi.extend(predicted_itl_his)
self._manifest["uncertainty"] = True
summary.add_value("explainer", "inference", explain)
summary.record(1)
if self._is_hoc_registered:
self._run_hoc(summary, self._sample_index, inputs[idx], prob[idx])
self._sample_index += 1
def _run_explainer(self, summary, sample_id_labels, explainer):
"""
Run the explainer.
Args:
summary (SummaryRecord): The summary object to store the data.
sample_id_labels (dict): A dict that maps the sample id and its union labels.
explainer (_Attribution): An Attribution object to generate saliency maps.
"""
for idx, next_element in enumerate(self._dataset):
now = time()
self._spaced_print("Start running {}-th explanation data for {}......".format(
idx, explainer.__class__.__name__))
saliency_dict_lst = self._run_exp_step(next_element, explainer, sample_id_labels, summary)
self._spaced_print(
"Finish writing {}-th batch explanation data for {}. Time elapsed: {:.3f} s".format(
idx, explainer.__class__.__name__, time() - now))
if not self._benchmarkers:
continue
for bench in self._benchmarkers:
now = time()
self._spaced_print(
"Start running {}-th batch {} data for {}......".format(
idx, bench.__class__.__name__, explainer.__class__.__name__))
self._run_exp_benchmark_step(next_element, explainer, bench, saliency_dict_lst)
self._spaced_print(
"Finish running {}-th batch {} data for {}. Time elapsed: {:.3f} s".format(
idx, bench.__class__.__name__, explainer.__class__.__name__, time() - now))
def _run_saliency(self, summary, sample_id_labels):
"""Run the saliency explanations."""
for explainer in self._explainers:
explain = Explain()
if self._benchmarkers:
for bench in self._benchmarkers:
bench.reset()
print(f"Start running and writing explanation for {explainer.__class__.__name__}......")
self._sample_index = 0
start = time()
ds.config.set_seed(self._DATASET_SEED)
self._run_explainer(summary, sample_id_labels, explainer)
if not self._benchmarkers:
continue
for bench in self._benchmarkers:
benchmark = explain.benchmark.add()
benchmark.explain_method = explainer.__class__.__name__
benchmark.benchmark_method = bench.__class__.__name__
benchmark.total_score = bench.performance
if isinstance(bench, LabelSensitiveMetric):
benchmark.label_score.extend(bench.class_performances)
self._spaced_print("Finish running and writing explanation and benchmark data for {}. "
"Time elapsed: {:.3f} s".format(explainer.__class__.__name__, time() - start))
summary.add_value('explainer', 'benchmark', explain)
summary.record(1)
def _run_hoc(self, summary, sample_id, sample_input, prob):
"""
Run HOC search for a sample image, and then save the result to summary.
Args:
summary (SummaryRecord): The summary object to store the data.
sample_id (int): The sample ID.
sample_input (Union[Tensor, np.ndarray]): Sample image tensor in CHW or NCWH(N=1).
prob (Union[Tensor, np.ndarray]): List of sample's classification prediction output, HOC will run for
labels with prediction output strictly larger then HOC searcher's threshold(0.5 by default).
"""
if isinstance(sample_input, ms.Tensor):
sample_input = sample_input.asnumpy()
if len(sample_input.shape) == 3:
sample_input = np.expand_dims(sample_input, axis=0)
explain = None
str_mask = hoc.auto_str_mask(sample_input)
compiled_mask = None
for label_idx, label_prob in enumerate(prob):
if label_prob <= self._hoc_searcher.threshold:
continue
if compiled_mask is None:
compiled_mask = hoc.compile_mask(str_mask, sample_input)
try:
edit_tree, layer_outputs = self._hoc_searcher.search(sample_input, label_idx, compiled_mask)
except hoc.NoValidResultError:
log.warning(f"No Hierarchical Occlusion result was found in sample#{sample_id} "
f"label:{self._labels[label_idx]}, skipped.")
continue
if explain is None:
explain = Explain()
explain.sample_id = sample_id
self._add_hoc_result_to_explain(label_idx, str_mask, edit_tree, layer_outputs, explain)
if explain is not None:
summary.add_value("explainer", "hoc", explain)
summary.record(1)
self._manifest['hierarchical_occlusion'] = True
@staticmethod
def _add_hoc_result_to_explain(label_idx, str_mask, edit_tree, layer_outputs, explain):
"""
Add HOC result to Explain record.
Args:
label_idx (int): The label index.
str_mask (str): The mask string.
edit_tree (EditStep): The result HOC edit tree.
layer_outputs (list[float]): The network output confident of each layer.
explain (Explain): The Explain record.
"""
hoc_rec = explain.hoc.add()
hoc_rec.label = label_idx
hoc_rec.mask = str_mask
layer_count = edit_tree.max_layer + 1
for layer in range(layer_count):
steps = edit_tree.get_layer_or_leaf_steps(layer)
layer_output = layer_outputs[layer]
hoc_layer = hoc_rec.layer.add()
hoc_layer.prob = layer_output
for step in steps:
hoc_layer.box.extend(list(step.box))
def _add_exp_step_samples(self, explainer, sample_label_sets, batch_saliency_full, summary):
"""
Add explanation results of samples to summary record.
Args:
explainer (Attribution): The explainer to be run.
sample_label_sets (list[list[int]]): The label sets of samples.
batch_saliency_full (Tensor): The saliency output from explainer.
summary (SummaryRecord): The summary record.
"""
saliency_dict_lst = []
has_saliency_rec = False
for idx, label_set in enumerate(sample_label_sets):
saliency_dict = {}
explain = Explain()
explain.sample_id = self._sample_index
for k, lab in enumerate(label_set):
saliency = batch_saliency_full[idx:idx + 1, k:k + 1]
saliency_dict[lab] = saliency
saliency_np = _normalize(saliency.asnumpy().squeeze())
saliency_image = _np_to_image(saliency_np, mode='L')
heatmap_path = self._save_heatmap(explainer.__class__.__name__, lab,
self._sample_index, saliency_image)
explanation = explain.explanation.add()
explanation.explain_method = explainer.__class__.__name__
explanation.heatmap_path = heatmap_path
explanation.label = lab
has_saliency_rec = True
summary.add_value("explainer", "explanation", explain)
summary.record(1)
self._sample_index += 1
saliency_dict_lst.append(saliency_dict)
return saliency_dict_lst, has_saliency_rec
def _run_exp_step(self, next_element, explainer, sample_id_labels, summary):
"""
Run the explanation for each step and write explanation results into summary.
Args:
next_element (Tuple): Data of one step
explainer (_Attribution): An Attribution object to generate saliency maps.
sample_id_labels (dict): A dict that maps the sample id and its union labels.
summary (SummaryRecord): The summary object to store the data.
Returns:
list, List of dict that maps label to its corresponding saliency map.
"""
inputs, labels, _ = self._unpack_next_element(next_element)
sample_index = self._sample_index
sample_label_sets = []
for _ in range(len(labels)):
sample_label_sets.append(sample_id_labels[str(sample_index)])
sample_index += 1
batch_label_sets = self._make_label_batch(sample_label_sets)
if isinstance(explainer, RISE):
batch_saliency_full = explainer(inputs, batch_label_sets)
else:
batch_saliency_full = []
for i in range(len(batch_label_sets[0])):
batch_saliency = explainer(inputs, batch_label_sets[:, i])
batch_saliency_full.append(batch_saliency)
concat = ms.ops.operations.Concat(1)
batch_saliency_full = concat(tuple(batch_saliency_full))
saliency_dict_lst, has_saliency_rec = \
self._add_exp_step_samples(explainer, sample_label_sets, batch_saliency_full, summary)
if has_saliency_rec:
self._manifest['saliency_map'] = True
return saliency_dict_lst
def _run_exp_benchmark_step(self, next_element, explainer, benchmarker, saliency_dict_lst):
"""Run the explanation and evaluation for each step and write explanation results into summary."""
inputs, labels, _ = self._unpack_next_element(next_element)
for idx, inp in enumerate(inputs):
inp = _EXPAND_DIMS(inp, 0)
self._manifest['benchmark'] = True
if isinstance(benchmarker, LabelAgnosticMetric):
res = benchmarker.evaluate(explainer, inp)
benchmarker.aggregate(res)
continue
saliency_dict = saliency_dict_lst[idx]
for label, saliency in saliency_dict.items():
if isinstance(benchmarker, Localization):
_, _, bboxes = self._unpack_next_element(next_element, True)
if label in labels[idx]:
res = benchmarker.evaluate(explainer, inp, targets=label, mask=bboxes[idx][label],
saliency=saliency)
benchmarker.aggregate(res, label)
elif isinstance(benchmarker, LabelSensitiveMetric):
res = benchmarker.evaluate(explainer, inp, targets=label, saliency=saliency)
benchmarker.aggregate(res, label)
else:
raise TypeError('Benchmarker must be one of LabelSensitiveMetric or LabelAgnosticMetric, but'
'receive {}'.format(type(benchmarker)))
@staticmethod
def _calc_beta_intervals(means, variances, prob=0.95):
"""Calculate confidence interval of beta distributions."""
if not isinstance(means, np.ndarray):
means = np.array(means)
if not isinstance(variances, np.ndarray):
variances = np.array(variances)
with np.errstate(divide='ignore'):
coef_a = ((means ** 2) * (1 - means) / variances) - means
coef_b = (coef_a * (1 - means)) / means
itl_lows, itl_his = beta.interval(prob, coef_a, coef_b)
sds = np.sqrt(variances)
for i in range(itl_lows.shape[0]):
if not np.isfinite(sds[i]) or not np.isfinite(itl_lows[i]) or not np.isfinite(itl_his[i]):
itl_lows[i] = means[i]
itl_his[i] = means[i]
sds[i] = 0
return itl_lows, itl_his, sds
def _transform_bboxes(self, inputs, labels, bboxes, ifbbox):
"""
Transform the bounding boxes.
Args:
inputs (Tensor): the image data
labels (Tensor): the labels
bboxes (Tensor): the boudnding boxes data
ifbbox (bool): whether to preprocess bboxes. If True, a dictionary that indicates bounding boxes w.r.t
label id will be returned. If False, the returned bboxes is the the parsed bboxes.
Returns:
bboxes (Union[list[dict], None, Tensor]): the bounding boxes
"""
input_len = len(inputs)
if bboxes is None or not ifbbox:
return bboxes
bboxes = ms.Tensor(bboxes, ms.int32)
masks_lst = []
labels = labels.asnumpy().reshape([input_len, -1])
bboxes = bboxes.asnumpy().reshape([input_len, -1, 4])
for idx, label in enumerate(labels):
height, width = inputs[idx].shape[-2], inputs[idx].shape[-1]
masks = {}
for j, label_item in enumerate(label):
target = int(label_item)
if not -1 < target < len(self._labels):
continue
if target not in masks:
mask = np.zeros((1, 1, height, width))
else:
mask = masks[target]
x_min, y_min, x_len, y_len = bboxes[idx][j].astype(int)
mask[:, :, x_min:x_min + x_len, y_min:y_min + y_len] = 1
masks[target] = mask
masks_lst.append(masks)
bboxes = masks_lst
return bboxes
def _transform_data(self, inputs, labels, bboxes, ifbbox):
"""
Transform the data from one iteration of dataset to a unifying form for the follow-up operations.
Args:
inputs (Tensor): the image data
labels (Tensor): the labels
bboxes (Tensor): the boudnding boxes data
ifbbox (bool): whether to preprocess bboxes. If True, a dictionary that indicates bounding boxes w.r.t
label id will be returned. If False, the returned bboxes is the the parsed bboxes.
Returns:
inputs (Tensor): the image data, unified to a 4D Tensor.
labels (list[list[int]]): the ground truth labels.
bboxes (Union[list[dict], None, Tensor]): the bounding boxes
"""
inputs = ms.Tensor(inputs, ms.float32)
if len(inputs.shape) == 3:
inputs = _EXPAND_DIMS(inputs, 0)
if isinstance(labels, ms.Tensor):
labels = ms.Tensor(labels, ms.int32)
labels = _EXPAND_DIMS(labels, 0)
if isinstance(bboxes, ms.Tensor):
bboxes = ms.Tensor(bboxes, ms.int32)
bboxes = _EXPAND_DIMS(bboxes, 0)
bboxes = self._transform_bboxes(inputs, labels, bboxes, ifbbox)
labels = ms.Tensor(labels, ms.int32)
if len(labels.shape) == 1:
labels_lst = [[int(i)] for i in labels.asnumpy()]
else:
labels = labels.asnumpy().reshape([len(inputs), -1])
labels_lst = []
for item in labels:
labels_lst.append(list(set(int(i) for i in item if -1 < int(i) < len(self._labels))))
labels = labels_lst
return inputs, labels, bboxes
def _unpack_next_element(self, next_element, ifbbox=False):
"""
Unpack a single iteration of dataset.
Args:
next_element (Tuple): a single element iterated from dataset object.
ifbbox (bool): whether to preprocess bboxes in self._transform_data.
Returns:
tuple, a unified Tuple contains image_data, labels, and bounding boxes.
"""
if len(next_element) == 3:
inputs, labels, bboxes = next_element
elif len(next_element) == 2:
inputs, labels = next_element
bboxes = None
else:
inputs = next_element[0]
labels = [[] for _ in inputs]
bboxes = None
inputs, labels, bboxes = self._transform_data(inputs, labels, bboxes, ifbbox)
return inputs, labels, bboxes
@staticmethod
def _make_label_batch(labels):
"""
Unify a List of List of labels to be a 2D Tensor with shape (b, m), where b = len(labels) and m is the max
length of all the rows in labels.
Args:
labels (List[List]): the union labels of a data batch.
Returns:
2D Tensor.
"""
max_len = max([len(label) for label in labels])
batch_labels = np.zeros((len(labels), max_len))
for idx, _ in enumerate(batch_labels):
length = len(labels[idx])
batch_labels[idx, :length] = np.array(labels[idx])
return ms.Tensor(batch_labels, ms.int32)
def _save_manifest(self):
"""Save manifest.json underneath datafile directory."""
if self._manifest is None:
raise RuntimeError("Manifest not yet be initialized.")
path_tokens = [self._summary_dir,
self._DATAFILE_DIRNAME_PREFIX + str(self._summary_timestamp)]
abs_dir_path = self._create_subdir(*path_tokens)
save_path = os.path.join(abs_dir_path, self._MANIFEST_FILENAME)
fd = os.open(save_path, os.O_WRONLY | os.O_CREAT, mode=self._FILE_MODE)
file = os.fdopen(fd, "w")
try:
json.dump(self._manifest, file, indent=4)
except IOError:
log.error(f"Failed to save manifest as {save_path}!")
raise
finally:
file.flush()
os.close(fd)
os.chmod(save_path, self._FILE_MODE)
def _save_original_image(self, sample_id, image):
"""Save an image to summary directory."""
id_dirname = self._get_sample_dirname(sample_id)
path_tokens = [self._summary_dir,
self._DATAFILE_DIRNAME_PREFIX + str(self._summary_timestamp),
self._ORIGINAL_IMAGE_DIRNAME,
id_dirname]
abs_dir_path = self._create_subdir(*path_tokens)
filename = f"{sample_id}.jpg"
save_path = os.path.join(abs_dir_path, filename)
image.save(save_path)
os.chmod(save_path, self._FILE_MODE)
return os.path.join(*path_tokens[1:], filename)
def _save_heatmap(self, explain_method, class_id, sample_id, image):
"""Save heatmap image to summary directory."""
id_dirname = self._get_sample_dirname(sample_id)
path_tokens = [self._summary_dir,
self._DATAFILE_DIRNAME_PREFIX + str(self._summary_timestamp),
self._HEATMAP_DIRNAME,
explain_method,
id_dirname]
abs_dir_path = self._create_subdir(*path_tokens)
filename = f"{sample_id}_{class_id}.jpg"
save_path = os.path.join(abs_dir_path, filename)
image.save(save_path, optimize=True)
os.chmod(save_path, self._FILE_MODE)
return os.path.join(*path_tokens[1:], filename)
def _create_subdir(self, *args):
"""Recursively create subdirectories."""
abs_path = None
for token in args:
if abs_path is None:
abs_path = os.path.realpath(token)
else:
abs_path = os.path.join(abs_path, token)
# os.makedirs() don't set intermediate dir permission properly, we mkdir() one by one
try:
os.mkdir(abs_path, mode=self._DIR_MODE)
# In some platform, mode may be ignored in os.mkdir(), we have to chmod() again to make sure
os.chmod(abs_path, mode=self._DIR_MODE)
except FileExistsError:
pass
return abs_path
@classmethod
def _get_sample_dirname(cls, sample_id):
"""Get the name of parent directory of the image id."""
return str(int(sample_id / cls._SAMPLE_PER_DIR) * cls._SAMPLE_PER_DIR)
@staticmethod
def _extract_timestamp(filename):
"""Extract timestamp from summary filename."""
matched = re.search(r"summary\.(\d+)", filename)
if matched:
return int(matched.group(1))
return None
@classmethod
def _spaced_print(cls, message):
"""Spaced message printing."""
# workaround to print logs starting new line in case line width mismatch.
print(cls._SPACER.format(message))
| UTF-8 | Python | false | false | 47,661 | py | 884 | _image_classification_runner.py | 717 | 0.590315 | 0.586266 | 0 | 1,072 | 43.459888 | 119 |
luque/better-ways-of-thinking-about-software | 4,561,255,283,727 | e8fd13fd2111a2abc5f3883adbd23cd601def681 | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/oauth_dispatch/tests/factories.py | 473bcd4ced9d97d8e0dbfb079b7904cd33102af1 | [
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
]
| permissive | https://github.com/luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | false | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | 2019-01-02T14:40:26 | 2021-11-22T12:12:30 | 6,548 | 0 | 2 | 0 | JavaScript | false | false | # pylint: disable=missing-docstring
from datetime import datetime, timedelta
import factory
import pytz
from factory.django import DjangoModelFactory
from factory.fuzzy import FuzzyText
from oauth2_provider.models import AccessToken, Application, RefreshToken
from openedx.core.djangoapps.oauth_dispatch.models import ApplicationAccess
from common.djangoapps.student.tests.factories import UserFactory
class ApplicationFactory(DjangoModelFactory):
class Meta:
model = Application
user = factory.SubFactory(UserFactory)
client_id = factory.Sequence('client_{}'.format)
client_secret = 'some_secret'
client_type = 'confidential'
authorization_grant_type = Application.CLIENT_CONFIDENTIAL
name = FuzzyText(prefix='name', length=8)
class ApplicationAccessFactory(DjangoModelFactory):
class Meta:
model = ApplicationAccess
application = factory.SubFactory(ApplicationFactory)
scopes = ['grades:read']
class AccessTokenFactory(DjangoModelFactory):
class Meta:
model = AccessToken
django_get_or_create = ('user', 'application')
token = FuzzyText(length=32)
expires = datetime.now(pytz.UTC) + timedelta(days=1)
class RefreshTokenFactory(DjangoModelFactory):
class Meta:
model = RefreshToken
django_get_or_create = ('user', 'application')
token = FuzzyText(length=32)
| UTF-8 | Python | false | false | 1,383 | py | 5,713 | factories.py | 4,192 | 0.742589 | 0.737527 | 0 | 50 | 26.66 | 75 |
pythonthings/lettersmith_py | 4,088,808,897,748 | 1f59e302ade24b334bacb305f414478a4fe81218 | 71a2d4ba51049fbc21c203eab200d6c1313b201d | /lettersmith/query.py | 088d1c3506c66dce6c6fe8e752d973c824321ca5 | [
"MIT"
]
| permissive | https://github.com/pythonthings/lettersmith_py | abf5b6260d4331b8189e3c0bd729b2f08f6d4109 | 96ddaf1268ac53062b2e7b1d05d06dc092865666 | refs/heads/master | 2023-09-05T05:22:57.500981 | 2021-10-01T21:37:09 | 2021-10-01T21:37:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Tools for querying data structures. Kind of a lightweight LINQ.
"""
from itertools import islice
from random import sample
def filters(predicate):
"""
Keep items if they pass predicate function test.
"""
def filter_bound(iterable):
"""
Filter iterable with bound predicate function.
"""
return filter(predicate, iterable)
return filter_bound
def rejects(predicate):
"""
Reject items if they pass predicate function test.
Inverse of filter.
"""
def reject_bound(iterable):
"""
Reject items with bound predicate function.
"""
for item in iterable:
if not predicate(item):
yield item
return reject_bound
def maps(a2b):
"""
Map `iterable` with function `a2b`.
"""
def map_bound(iterable):
"""
Map iterable using bound function.
"""
return map(a2b, iterable)
return map_bound
def sorts(key=None, reverse=False):
"""
Sort `iterable` by key.
"""
def sort_bound(iterable):
"""
Sort iterable using bound arguments.
"""
return sorted(iterable, key=key, reverse=reverse)
return sort_bound
def takes(n):
"""
Take `n` elements from `iterable`.
"""
def take_bound(iterable):
"""
Return first n elements of iterable.
"""
return islice(iterable, n)
return take_bound
def samples(k):
"""
Sample `k` elements at random from `iterable`.
"""
def sample_bound(iterable):
"""
Sample `k` elements at random from `iterable`.
"""
return sample(iterable, k)
return sample_bound
def dedupes(key):
"""
De-duplicate items in an iterable by key, retaining order.
"""
def dedupe(iterable):
"""
De-duplicate items in an iterable using bound key function,
retaining order.
"""
seen = set()
for item in iterable:
k = key(item)
if k not in seen:
seen.add(k)
yield item
return dedupe | UTF-8 | Python | false | false | 2,133 | py | 50 | query.py | 40 | 0.564463 | 0.563057 | 0 | 98 | 20.77551 | 67 |
Mirannam/while_for | 7,181,185,319,791 | bf595c97ed9d421591bd9f335ac61f1b65cb533a | c556468d20a207e86fb04f1deafad6073d3e6167 | /task2.py | fb28c57ebb88b901a82f6bc3d53824be7bed7927 | []
| no_license | https://github.com/Mirannam/while_for | 1e924b1e7acbb326008e4aa9d0577f991002f288 | 335cce3ff5711a9c403c67932cc67c2eccd7ab7f | refs/heads/main | 2023-07-21T03:54:51.714759 | 2021-09-10T09:03:11 | 2021-09-10T09:03:11 | 405,019,157 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | lang1 = 'php'
languages = ['go', 'java', 'php', 'python', 'javascript', 'ruby']
for lang in languages:
print(lang)
if lang == lang1:
break
| UTF-8 | Python | false | false | 143 | py | 9 | task2.py | 9 | 0.622378 | 0.608392 | 0 | 6 | 22.833333 | 65 |
Codemachine1/personal | 19,164,144,079,082 | 0ae8d40f63aff8a21f7927bea908efdd283c3e4c | 575d144cd6b129cd6954b20b1838f64ce3969976 | /python applications/schedule generator/schedualgenerator.py | 80c946381085965d48f68c39780c4f5f81c40e14 | []
| no_license | https://github.com/Codemachine1/personal | 60bd6f9bf5045b1e84cddcfcc2b1eafc7eb65425 | ca9a228b7c166000a37fc7c314de50752259cac6 | refs/heads/master | 2019-07-15T08:14:59.847779 | 2019-05-02T17:15:48 | 2019-05-02T17:15:48 | 33,884,211 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
from tkMessageBox import *
cars=[]
locos=[]
trains=[]
class Car(object):
def __init__(self,number,type,roadname,lading,condition,destination,currentlocation):
self.number=number
self.type=type
self.roadname=roadname
self.lading=lading
self.condition=condition
self.currentlocation=currentlocation
self.destination=destination
def getNumber(self):
return self.number
def getType(self):
return self.type
def getLading(self):
return self.lading
def getRoadName(self):
return self.roadname
def getCondition(self):
return self.condition
def getCurrentLocation(self):
return self.currentlocation
def getDestination(self):
return self.destination
def setNumber(self,number):
self.number=number
def setType(self,type):
self.type=type
def setLading(self,lading):
self.lading=lading
def setCondition(self,condition):
self.condition=condition
def setCurrentLocation(self,L):
self.currentlocation=L
def setDestination(self,d):
self.destination=d
def setRoadName(self,roadname):
self.roadname=roadname
class locomotive(object):
def __init__(self,number,clas,condition,Typecar,currentlocation):
self.number=number
self.type=clas
self.Typecar=Typecar
self.condition=condition
self.currentlocation=currentlocation
def getNumber(self):
return self.number
def getType(self):
return self.type
def getCondition(self):
return self.condition
def getCurrentLocation(self):
return self.currentlocation
def getTypecar(self):
return self.Typecar
def setNumber(self,number):
self.number=number
def setType(self,type):
self.type=type
def setType(self,typecar):
self.Typecar=typecar
def setCondition(self,condition):
self.condition=condition
def setCurrentLocation(self,L):
self.currentlocation=L
class roster(object):
def __init__(self,c,l):
self.listOfCars=c
self.listOfLocomotives=l
def addCar(self,car):
print type(location)
self.listOfCars.append(car)
def addLocomotive(self,number,type,condition,typeCar,location):
self.listOfLocomotives.append(locomotive(number,type,condition,typeCar,location))
def removeCar(self,carNumber):
pointer=0
while pointer<len(self.listOfCars):
if self.listOfCars[pointer].getNumber()==carNumber:
self.listOfCars.remove(self.listOfCars[pointer])
break
pointer+=1
def isCar(self,car):
for c in self.listOfCars:
if c.getNumber==car:
return true
def islocomotive(self,loco):
for c in self.listOfLocomotives:
if c.getNumber==car:
return true
def getCar(self,car):
for c in self.listOfCars:
if c.getNumber==car:
return c
def getlocomotive(self,loco):
for c in self.listOfLocomotives:
if c.getNumber==car:
return c
def deleteCar(self,car):
print len(self.listOfCars)
self.listOfCars.remove(car)
print len(self.listOfCars)
def deletelocomotive(self,loco):
self.listOfLocomotives.remove(loco)
def loadCar(self,car,load):
pointer=0
while pointer<len(self.listOfCars):
if self.listOfCars[pointer].getNumber()==car:
self.listOfCars[pointer].setLading(load)
break
pointer+=1
def changeDestination(self,car,location):
pointer=0
while pointer<len(self.listOfCars):
if self.listOfCars[pointer].getNumber()==car:
self.listOfCars[pointer].setCurrentLocation(location)
break
pointer+=1
def changeLocation(self,car,location):
pointer=0
while pointer<len(self.listOfCars):
if self.listOfCars[pointer].getNumber()==car:
self.listOfCars[pointer].setCurrentLocation(location)
break
pointer+=1
def changeLocationLoco(self,car,location):
for c in self.listOfLocomotives:
if c.getNumber==car:
c.setCurrentLocation(location)
def getCars(self):
return self.listOfCars
def getLocos(self):
return self.listOfLocomotives
class industry(object):
def __init__(self,name,orders,cars):
self.name=name
self.orders=[]
self.cars=[]
def getName(self):
return self.name
def addCar(self,car):
cars.append(car)
def removeCar(self,car):
try:
cars.remove(car)
except ValueError:
pass
except AttributeError:
pass
def addOrder(self,order):
self.orders.append(order)
def randomOrder(self):
currentOrder=random.choice(self.orders)
return currentOrder
def removeCar(self,order):
try:
orders.remove(order)
except ValueError:
pass
except AttributeError:
pass
class Train(object):
def __init__(self,name,ty,stops,setouts,pickups):
self.type=ty
self.name=name
self.stops=stops
self.setouts=[]
self.pickups=[]
def getTrainName(self):
return self.name
def getType(self):
return self.type
def getServedLocations(self):
return self.stops
def addLocation(self,l):
self.stops.append(l)
def addPickups(self,l):
self.pickups.append(l)
def addSetouts(self,l):
self.setouts.append(l)
def getPickups(self,):
return self.pickups
def getSetouts(self):
return self.setouts
def printTrain(self):
return self.name+" "+self.type+" "+" ".join(self.stops)
def clear(self):
self.setouts=[]
self.pickups=[]
class order(object):
def __init__(self,name,loadType,carTypes):
self.loadTypes=loadType
self.carTypes=carTypes
self.name=name
def addCar(self,car):
self.carTypes.append(car)
def addLoad(self,load):
self.loadTypes.append(load)
def getNeededCars(self):
return self.carTypes
def getLoads(self):
return self.loadTypes
def printThis(self):
output=" "
pointer=0
while pointer<len(self.loadTypes):
output=output+self.carTypes[pointer]+" : "+self.carTypes[pointer]
return output
class location(object):
def __init__(self,name,industries):
self.name=name
self.industries=industries
def getName(self):
return self.name
def getIndustries(self):
return self.industries
def addIndustries(self,name):
self.industries.append(industry(name))
class railroad(object):
def __init__(self,name,locations):
self.name=name
self.locations=[]
def getName(self):
return name
def addLoco(self):
name=raw_input("insert name")
self.locations.append(location(name))
def getlocations(self):
return self.locations
class situation(object):
def __init__(self,text,useRandomValue):
self.text=text
self.useRandomValue=useRandomValue
def getText(self):
if self.useRandomValue:
value=random.randomint(1,7)
return self.text+str(value)
else:
return self.text
import pickle
import random
railroad={}
trains=[]
townsServedByRailroad=[]
def addCar():
roster.addCar()
def addLocation():
location=raw_input("enter new customer name")
railroad[location]=[]
orders=[]
while True:
ordername=raw_input("enter an order name")
newOrder=order(ordername,[],[])
while True:
newCar=raw_input("enter a car to be received at the industry")
newLoad=raw_input("enter the load that will be received or put into the car")
newOrder.addCar(newCar)
newOrder.addLoad(newLoad)
if raw_input("type end to stop adding cars").upper()=="END":
break
orders.append(newOrder)
if raw_input("type end to stop adding orders").upper()=="END":
break
railroad[location]=orders
def generate():
pickups={}
for industry in railroad:
pickups[industry]=[]
for car in roster.getCars():
if car.getCurrentLocation()==industry:
roster.changeDestination(car,"yard")
car.setDestination("yard")
pickups[industry].append(car)
setouts={}
orders=[]
pointer=0
for industries in railroad:
setouts[industries]=[]
setoutorder=random.choice(railroad[industries])
carloads=setoutorder.getLoads()
for carRequest in setoutorder.getNeededCars()[:2]:
for car in roster.getCars():
print car.getDestination()
if car.getLading()=="empty" and car.getCurrentLocation()=="yard" and car.getType()==carRequest:
roster.changeDestination(car,industries)
car.setDestination(industries)
setouts[industries].append(car)
#roster.loadCar(car.getNumber(),carloads[pointer])
pointer+=1
print trains[0].getServedLocations()
for train in trains:
for loc in train.getServedLocations():
for car in setouts[loc]:
train.addSetouts(car)
for car in pickups[loc]:
train.addPickups(car)
window=Toplevel()
Label(window,text= "Schedual").pack()
manifest=Listbox(window,width=50)
manifest.pack()
locotrain=""
for train in trains:
tra=train.getTrainName()+"serves"+', '.join(train.getServedLocations())
manifest.insert(END,tra)
for l in roster.getLocos():
if l.getType()==train.getType():
f=l.getNumber()+" "+l.getTypecar()+" "+l.getCondition()+" "+l.getCurrentLocation()
manifest.insert(END,f)
manifest.insert(END, " Setouts")
for entry in train.getSetouts():
setout=" "+entry.getNumber()+" "+entry.getType()+" v"+entry.getRoadName()+" "+entry.getLading()+" "+entry.getCondition()+" "+entry.getCurrentLocation()+" "+entry.getDestination()
manifest.insert(END,setout)
manifest.insert(END," pickups")
for entry in train.getPickups():
pickup=" "+entry.getNumber()+" "+entry.getType()+" v"+entry.getRoadName()+" "+entry.getLading()+" "+entry.getCondition()+" "+entry.getCurrentLocation()+" "+entry.getDestination()
manifest.insert(END,pickup)
break
Label(window,text="situations to be aware of").pack()
sitsListbox=Listbox(window,width=50)
sitsListbox.pack()
rangeOfInts=random.randint(0,3)
try:
for t in range(rangeOfInts):
choice=random.choice(situations)
sitsListbox.insert(END,choice.getText())
except:
sitsListbox.insert(END,"no situations")
def PrintSchedual():
if askyesno("confirm print","Are you sure you want to print to a file"):
for location in railroad:
for car in setouts[location]:
roster.changeLocation(car.getNumber(),location)
schedualprint=open("schedual","w")
schedualprint.write( "Schedual\n")
for train in trains:
schedualprint.write( train.getTrainName())
for l in roster.getLocos():
if l.getTypecar()==train.getType():
schedualprint.write( " "+l.getNumber()+" "+l.getType()+" "+l.getCondition()+" "+l.getCurrentLocation()+"\n")
schedualprint.write( " Setouts\n")
for entry in train.getSetouts():
schedualprint.write(" "+entry.getNumber()+" "+entry.getType()+" v"+entry.getRoadName()+" "+entry.getLading()+" "+entry.getCondition()+" "+entry.getCurrentLocation()+" "+entry.getDestination()+"\n")
schedualprint.write( " pickups\n")
for entry in train.getPickups():
schedualprint.write(" "+entry.getNumber()+" "+entry.getType()+" v"+entry.getRoadName()+" "+entry.getLading()+" "+entry.getCondition()+" "+entry.getCurrentLocation()+" "+entry.getDestination()+"\n")
Button(window,text="print",command=PrintSchedual).pack()
def help():
print("ADD CAR: add a new car to the roster")
print("ADD LOCO add a new loco to the roster")
print("ADD LOCATION: add a new LOCATION to the line")
print("ADD INDUSTRY: add new industry to a location")
print("ADD ORDER: add new order to an industry")
print("ADD TRAIN: add a new train to the schedual")
print("SAVE: save data to file")
print("GENERATE: create a new manafest")
def addTrain(Railroad):
name=raw_input("enter train name ")
ty=raw_input("enter train locomotive type ")
trainNew=Train(name,ty,[],[],[])
while True:
for l in Railroad.keys():
print l
location=raw_input("enter a industry served by this train ")
trainNew.addLocation(location)
if raw_input("enter END to end").upper()=="END":
break
trains.append(trainNew)
############################### GUI functions
def ShowCarEntry():
carEntryWindow=Toplevel()
name=Entry(carEntryWindow)
number=Entry(carEntryWindow)
cartype=Entry(carEntryWindow)
lading=Entry(carEntryWindow)
condition=Entry(carEntryWindow)
location=Entry(carEntryWindow)
m=Label(carEntryWindow,text="Car entry form")
m.pack()
m1=Label(carEntryWindow,text="Car name")
m1.pack()
name.pack()
m2=Label(carEntryWindow,text="Car number")
m2.pack()
number.pack()
m3=Label(carEntryWindow,text="Car type")
m3.pack()
cartype.pack()
m4=Label(carEntryWindow,text="Car lading")
m4.pack()
lading.pack()
m5=Label(carEntryWindow,text="Car condition")
m5.pack()
condition.pack()
m6=Label(carEntryWindow,text="Car location")
m6.pack
location.pack()
def getCarValue():
a=number.get()
b=cartype.get()
c=name.get()
d=location.get()
e=lading.get()
f=condition.get()
g=Car(a,b,c,e,f,"yard",d)
roster.addCar(g)
carEntryWindow.destroy()
enter=Button(carEntryWindow,text="enter",command=getCarValue)
enter.pack()
def ShowLocomotiveEntry():
LocomotiveEntryWindow=Toplevel()
name=Entry(LocomotiveEntryWindow)
number=Entry(LocomotiveEntryWindow)
type2=Entry(LocomotiveEntryWindow)
lading=Entry(LocomotiveEntryWindow)
classs=Entry(LocomotiveEntryWindow)
condition=Entry(LocomotiveEntryWindow)
location=Entry(LocomotiveEntryWindow)
m=Label(LocomotiveEntryWindow,text="locomotive entry form")
m.pack()
m2=Label(LocomotiveEntryWindow,text="locomotive number")
m2.pack()
number.pack()
m3=Label(LocomotiveEntryWindow,text="locomotive type")
m3.pack()
type2.pack()
m4=Label(LocomotiveEntryWindow,text="locomotive class")
m4.pack()
classs.pack()
m5=Label(LocomotiveEntryWindow,text="locomotive condition")
m5.pack()
condition.pack()
m6=Label(LocomotiveEntryWindow,text="locomotive location")
m6.pack
location.pack()
def getLocomotiveValue():
roster.addLocomotive(number.get(),classs.get(),condition.get(),type2.get(),location.get())
LocomotiveEntryWindow.destroy()
enter=Button(LocomotiveEntryWindow,text="enter",command=getLocomotiveValue)
enter.pack()
def ShowIndustryEntry():
def getInustry():
railroad[name.get()]=[]
IndustryEntryWindow.destroy()
IndustryEntryWindow=Toplevel()
name=Entry(IndustryEntryWindow)
m=Label(IndustryEntryWindow,text="locomotive entry form")
m.pack()
m1=Label(IndustryEntryWindow,text="locomotive name")
m1.pack()
name.pack()
enter=Button(IndustryEntryWindow,text="enter",command=getInustry)
enter.pack()
def ShowOrderEntry():
orderEntry=Toplevel()
industryList=Listbox(orderEntry)
industryList.pack()
keys=railroad.keys()
industryList.insert(END,"Industry selection list")
for itm in keys:
industryList.insert(END,itm)
m=Label(orderEntry,text="Enter the car types to be delivered to this industry seperated by spaces")
m.pack()
cars=Entry(orderEntry)
cars.pack()
loads=Entry(orderEntry)
m2=Label(orderEntry,text="Enter the loads to be delivered to this industry seperated by spaces")
m2.pack()
loads.pack()
def getOrder():
carList=[]
loadList=[]
for ca in cars.get().split():
carList.append(ca)
for lo in loads.get().split():
loadList.append(lo)
railroad[industryList.get(industryList.curselection())].append(order("",loadList,carList))
orderEntry.destroy()
enter=Button(orderEntry,text="enter",command=getOrder)
enter.pack()
def showAddTrain():
TRAINS=Toplevel()
industryList=Listbox(TRAINS)
industryList.pack()
keys=railroad.keys()
industryList.insert(END,"Industry selection list")
for itm in keys:
industryList.insert(END,itm)
m=Label(TRAINS,text="enter train name")
m.pack()
nameEntry=Entry(TRAINS)
nameEntry.pack()
m3=Label(TRAINS,text="enter train type")
m3.pack()
tyEntry=Entry(TRAINS)
tyEntry.pack()
m2=Label(TRAINS,text="enter all industries served by thius train with space between them")
m2.pack()
IndEntry=Entry(TRAINS)
IndEntry.pack()
def getTrain():
name=nameEntry.get()
ty=tyEntry.get()
stops=[]
st=IndEntry.get().split(" ")
trainNew=Train(name,ty,st,[],[])
trains.append(trainNew)
TRAINS.destroy()
enter=Button(TRAINS,text="enter",command=getTrain)
enter.pack()
def addSituation():
window=Toplevel()
def getSituation():
s=False
if confirmEntry.get().lower()=="yes":
s=True
situations.append(situation(textEntry.get(),s))
textEntry=Entry(window)
confirmEntry=Entry(window)
Label(window,text="enter situation text").pack()
textEntry.pack()
Label(window,text="use a random number(yes/no)").pack()
confirmEntry.pack()
enter=Button(window,text="enter",command=getSituation)
enter.pack()
def removeSituation():
def process(event):
sel=carlist.curselection()
st=situations[int(sel[0])]
situations.remove(st)
window.destroy()
window=Toplevel()
carlist=Listbox(window,width=70)
carlist.pack()
for entry in situations:
setout=entry.getText()
carlist.insert(END,setout)
carlist.bind("<Double-Button-1>",process)
def removeCar():
def process(event):
sel=carlist.curselection()
st=roster.getCars()[int(sel[0])]
roster.deleteCar(st)
print st.getNumber()
window.destroy()
window=Toplevel()
carlist=Listbox(window,width=70)
carlist.pack()
for entry in roster.getCars():
setout=entry.getNumber()+" "+entry.getType()+" "+entry.getRoadName()+" "+entry.getLading()+" "+entry.getCondition()+" "+entry.getCurrentLocation()+" "+entry.getDestination()
carlist.insert(END,setout)
carlist.bind("<Double-Button-1>",process)
def removeLoco():
def process(event):
sel=carlist.curselection()
st=roster.getLocos()[int(sel[0])]
roster.deletelocomotive(st)
print st.getNumber()
window.destroy()
window=Toplevel()
carlist=Listbox(window,width=70)
carlist.pack()
for entry in roster.getLocos():
setout=entry.getNumber()+" "+entry.getType()+" v"+entry.getRoadName()+" "+entry.getLading()+" "+entry.getCondition()+" "+entry.getCurrentLocation()+" "+entry.getDestination()
carlist.insert(END,setout)
carlist.bind("<Double-Button-1>",process)
def removeIndustry():
def process(event):
sel=carlist.curselection()
st=railroad.keys()[int(sel[0])]
del railroad[st]
window.destroy()
window=Toplevel()
carlist=Listbox(window,width=70)
carlist.pack()
for entry in railroad.keys():
carlist.insert(END,entry)
carlist.bind("<Double-Button-1>",process)
def removeTrain():
def process(event):
sel=carlist.curselection()
st=trains[int(sel[0])]
train.remove(st)
window=Toplevel()
carlist=Listbox(window,width=70)
carlist.pack()
for entry in trains:
carlist.insert(END,entry.getTrainName())
carlist.bind("<Double-Button-1>",process)
def removeOrder():
def process2(event):
def process():
sel=orderlist.curselection()
st=trains[int(sel[0])]
print st
window2=Toplevel()
orderlist=Listbox(window2,width=70)
orderlist.pack()
sel=carlist.curselection()
st=railroad.keys()[int(sel[0])]
for order in railroad[st]:
mts=", ".join(order.getNeededCars())+", ".join(order.getLoads())
orderlist.insert(END,mts)
orderlist.bind("<Double-Button-1>",process)
window=Toplevel()
carlist=Listbox(window,width=70)
carlist.pack()
for entry in railroad.keys():
carlist.insert(END,entry)
carlist.bind("<Double-Button-1>",process2)
import sys
from Tkinter import *
try:
situations=pickle.load(open("situations.p"))
except IOError:
situations=[]
try:
railroad=pickle.load(open("railroad.p"))
except IOError:
railraod={}
try:
cars=pickle.load(open("cars.p"))
print type(cars)
except IOError:
cars=[]
try:
trains=pickle.load(open("trains.p"))
except IOError:
trains=[]
try:
locos=pickle.load(open("locos.p"))
except IOError:
locos=[]
roster=roster(cars,locos)
root = Tk()
def end():
if askyesno("Confirm","do you really wish to quit"):
for train in trains:
train.clear()
ca=[]
for car in roster.getCars():
car.setCurrentLocation(car.getDestination())
ca.append(car)
pickle.dump(railroad,open("railroad.p","wb"))
pickle.dump(situations,open("situations.p","wb"))
pickle.dump(trains,open("trains.p","wb"))
pickle.dump(roster.getCars(),open("cars.p","wb"))
pickle.dump(roster.getLocos(),open("locos.p","wb"))
root.destroy()
root.quit()
cars=roster.getCars()
carList=Listbox(root,width=70)
carList.insert(END,"cars")
for car in cars:
entry=car.getNumber()+"|"+car.getRoadName()+"|"+car.getType()+"|"+car.getLading()+"|"+car.getCondition()+"|"+car.getCurrentLocation()+"|"+car.getDestination()
carList.insert(END,entry)
carList.grid(row=0,column=0)
locos=roster.getLocos()
print len(locos)
locoList=Listbox(root,width=70)
locoList.insert(END,"locomotives")
for car in locos:
entry=car.getNumber()+"|"+car.getType()+"|"+car.getCondition()+"|"+car.getCurrentLocation()+"|"+car.getTypecar()
locoList.insert(END,entry)
trainList=Listbox(root, width=70)
trainList.insert(END,"Trains")
for train in trains:
trainList.insert(END,train.printTrain())
industryList=Listbox(root,width=70)
industryList.insert(END,"Industries and orders")
for location in railroad.keys():
for order in railroad[location]:
industryList.insert(END,order.printThis())
industryList.grid(row=1,column=1)
trainList.grid(row=1, column=0)
locoList.grid(row=0,column=1)
menubar=Menu(root)
fileMenu=Menu(menubar,tearoff=0)
fileMenu.add_command(label="quit", command=end)
menubar.add_cascade(label="file", menu=fileMenu)
rosterMenu=Menu(menubar,tearoff=0)
rosterMenu.add_command(label="add locomotive", command=ShowLocomotiveEntry)
rosterMenu.add_command(label="add Car", command=ShowCarEntry)
rosterMenu.add_command(label="remove car",command=removeCar)
rosterMenu.add_command(label="remove locomotive",command=removeLoco)
menubar.add_cascade(label="roster", menu=rosterMenu)
railroadMenu=Menu(menubar,tearoff=0)
railroadMenu.add_command(label="add industry", command=ShowIndustryEntry)
railroadMenu.add_command(label="remove industry", command=removeIndustry)
railroadMenu.add_command(label="add order", command=ShowOrderEntry)
railroadMenu.add_command(label="remove order", command=removeOrder)
railroadMenu.add_command(label="add train", command=showAddTrain)
railroadMenu.add_command(label="remove train", command=removeTrain)
menubar.add_cascade(label="railroad", menu=railroadMenu)
situtationMenu=Menu(menubar,tearoff=0)
situtationMenu.add_command(label="add situation", command=addSituation)
situtationMenu.add_command(label="remove situation", command=removeSituation)
menubar.add_cascade(label="situtations", menu=situtationMenu)
b5=Button(root,text="generate manifest",command=generate)
b5.grid(row=2,column=0)
endb=Button(root,text="quit",command=end)
endb.grid(row=2,column=1)
root.config(menu=menubar)
root.protocol('WM_DELETE_WINDOW',end)
root.mainloop()
| UTF-8 | Python | false | false | 29,968 | py | 19 | schedualgenerator.py | 9 | 0.537407 | 0.533669 | 0 | 812 | 35.905172 | 274 |
alexkimwoo/tcp_socket_server | 10,170,482,573,942 | 14916b231949ab0909317c877ed63e927d2632a9 | f3f01e336b78e7386cedcce44b51dd6694fcc495 | /socket_server.py | a98518c170cb9debc2b80d9128c0ab916a4354d5 | []
| no_license | https://github.com/alexkimwoo/tcp_socket_server | 8e03a3d5f14533c3a30b0d751b4515f2c80ad704 | 8f4bf9814212dfe51f307ca9ca6aa5cc4bacaa4c | refs/heads/master | 2016-09-12T21:19:37.218937 | 2016-05-18T14:04:41 | 2016-05-18T14:04:41 | 59,041,572 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #=======================================================================================================================
"""SOCKET SERVER V0.1"""
__author__ = "alexkimwoo"
__copyright__ = "Copyright 2016, Alex Kim Woo"
"""
CALL PROGRAM EXAMPLE
#=======================================================================================================================
import socket_server
import datetime
import time
import thread
#=======================================================================================================================
value = 0
#=======================================================================================================================
def now():
return datetime.datetime.now().strftime('%Y.%m.%d %H:%M:%S')
#=======================================================================================================================
def count():
global value
value += 1
return value
#=======================================================================================================================
def server_business(name, delay):
while True:
b = socket_server.Business()
resp_ping = 'pong'
resp_time = now()
resp_r = str(count())
rules_result = (['ping', resp_ping],['time', resp_time],['r', resp_r])
b.refresh_data(rules_result)
time.sleep(delay)
#=======================================================================================================================
try:
thread.start_new_thread(server_business, ("SERVER_BUSINESS", 0.5, ) )
except Exception,err:
print '%s' % str(err)
#=======================================================================================================================
socket_server.myServer(port=9000)
#=======================================================================================================================
while True:
pass
"""
#=======================================================================================================================
try:
import sys, os, time, datetime
import thread
import ConfigParser
import SocketServer
import threading
except Exception,import_lib_err:
print 'IMPORT LIBRARIES FAILED . ERROR => %s' % import_lib_err
exit()
quit()
#=======================================================================================================================
server_responses = ()
#=======================================================================================================================
def now():
return datetime.datetime.now().strftime('%Y.%m.%d %H:%M:%S')
#=======================================================================================================================
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
global server_responses
name = "SOCKET_SERVER"
client = '%s:%s' % (self.client_address[0], self.client_address[1])
print '[%s] %s | NEW CLIENT CONNECTED: %s' % (now(),name,client)
start_time = datetime.datetime.today()
while True:
try:
data = self.request.recv(100).strip()
if not data:
print '[%s] %s | CONNECTION CLOSED: %s' % (now(),name,client)
break
resp = 'null'
resp_to_client = server_responses
for n in resp_to_client:
if data == n[0]:
resp = n[1]
break
resp = '<%s> NO RESPONSE' % data
if not resp:
break
curr_time = datetime.datetime.today()
delta = curr_time - start_time
try:
resp2 = resp.replace('\r\n','')
except:
resp2 = resp
print '[%s] %s | CLIENT: %s (%s DAYS %.2d:%.2d:%.2d) - < RECEIVED: %s >> RESPONSE: %s >' % (now(),name,client,delta.days,delta.seconds//3600,(delta.seconds//60)%60, delta.seconds%60, data, resp2)
self.request.sendall(resp)
except Exception,err:
print '[%s] %s | CONECTION FAILED. ERROR => %s' % (now(),name,err)
break
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
#=======================================================================================================================
class Business:
def __init__(self):
pass
def refresh_data(self,responses):
global server_responses
server_responses = responses
#=======================================================================================================================
class myServer:
def __init__(self,port):
HOST, PORT = '0.0.0.0', port
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
print '[%s] SOCKET_SERVER | SERVER STARTED (%s:%s)' % (now(),ip, port)
try:
while True:
pass
except KeyboardInterrupt:
print '%s SOCKET_SERVER | SERVER CLOSED (%s:%s)' % (now(),ip, port)
server.shutdown()
server.server_close()
try:
sys.exit(0)
exit()
quit()
except SystemExit:
os._exit(0)
#=======================================================================================================================
| UTF-8 | Python | false | false | 5,870 | py | 3 | socket_server.py | 2 | 0.346678 | 0.339353 | 0 | 145 | 38.455172 | 212 |
JohnTitor00/intermediate-Python-DanBader | 12,025,908,468,657 | 9caadffb73621c61d7832ff2e9aeab5f176308fc | e3b3799df1959e47638ae64d844b28db2c83165f | /for_each_loops_with_enumerate_and_range.py | 44959240d04f9f51bbb2d74f634e74052a1e01e3 | []
| no_license | https://github.com/JohnTitor00/intermediate-Python-DanBader | 6130c109dc1bf4b8b1b6ce8341089897713bd82a | b1c04783f56bfad472435e5a3b8b209f65f527fd | refs/heads/master | 2021-01-24T12:23:30.427775 | 2018-03-06T13:03:25 | 2018-03-06T13:03:25 | 123,134,220 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Common way C style, Java Style
my_items = ['a', 'b', 'c']
i =0
while i< len(my_items)
print(my_items[i])
i += 1
#---------------------------------------
for i in range(len(my_items)):
print(my_items[i])
for item in my_items:
print(item)
for i, item in enumerate(my_items):
print(i, item)
for (int i = a; i<n; i+=s) {...}
#c style
#same
#py style
for i in range(a, f, s):
| UTF-8 | Python | false | false | 406 | py | 23 | for_each_loops_with_enumerate_and_range.py | 22 | 0.504926 | 0.5 | 0 | 23 | 15.652174 | 40 |
Gaosj/HIN | 249,108,127,317 | 27c08dbd2212fce0e0d276ea7a3cbfd92a65d33b | 739ac0bb30af95a5218fe9e053bbc07162183420 | /src/HERec_spl.py | 5460d09eca5763d697dc8fef216df0050021c1b5 | []
| no_license | https://github.com/Gaosj/HIN | 1ae5711de3f3959be6e423794bb25cd6cab9a805 | 1c6301b2ce1e74eb06d3ea193c9c6ba135aa31a7 | refs/heads/master | 2020-04-16T13:21:50.883281 | 2019-01-24T08:29:57 | 2019-01-24T08:29:57 | 165,623,599 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# encoding=utf-8
from math import sqrt, fabs
import numpy as np
from src.data_process import input_gen
from src.data_process import get_metapaths
from src.metapath2vec import Metapath2vec
from src.deepwalk import Deepwalk
class HNERec:
def __init__(self, unum, inum, ratedim, userdim, itemdim, embedding, train_rate):
self.unum = unum
self.inum = inum
self.ratedim = ratedim
self.userdim = userdim
self.itemdim = itemdim
print('ratedim: ', ratedim, ' userdim: ', userdim, ' itemdim: ', itemdim)
self.steps = 100
self.delta = 0.02
self.beta_e = 0.1
self.beta_h = 0.1
self.beta_p = 2
self.beta_w = 0.1
self.beta_b = 0.1
self.reg_u = 1.0
self.reg_v = 1.0
self.train_file = '../data/ep_' + str(train_rate) + '.train'
self.test_file = '../data/ep_' + str(train_rate) + '.test'
self.embedding = embedding
self.train_rate = train_rate
self.user_metapaths, self.item_metapaths, self.user_metapathnum, self.item_metapathnum = get_metapaths(train_rate, embedding)
self.X = None
self.Y = None
self.R = None
self.T = None
self.user_metapathdims = None
self.item_metapathdims = None
def run(self):
# input_gen(self.train_rate)
#
if self.embedding is 'dpwk':
Deepwalk(enum=self.unum, pnum=self.inum, dnum=3541, dtnum=28, train_rate=0.9).run()
if self.embedding is 'mp2vec':
Metapath2vec(self.train_rate).run()
print('Start load embedding.')
self.X, self.user_metapathdims = self.load_embedding(self.user_metapaths, self.unum)
self.Y, self.item_metapathdims = self.load_embedding(self.item_metapaths, self.inum)
print('Load embedding finished.')
self.R, self.T = self.load_rating(self.train_file, self.test_file)
print('max_steps: ', self.steps)
print('delta: ', self.delta, 'beta_e: ', self.beta_e, 'beta_h: ', self.beta_h, 'beta_p: ', self.beta_p,
'beta_w: ', self.beta_w, 'beta_b', self.beta_b, 'reg_u', self.reg_u, 'reg_v', self.reg_v)
print('Load rating finished.')
print('train size : ', len(self.R))
print('test size : ', len(self.T))
self.initialize()
self.recommend()
self.get_prediction()
def load_embedding(self, metapaths, num):
X = {}
for i in range(num):
X[i] = {}
metapath_dims = []
ctn = 0
for metapath in metapaths:
sourcefile = '../data/embedding/' + metapath
print('Loading embedding data, location: %s' % sourcefile)
with open(sourcefile) as infile:
k = int(infile.readline().strip().split(' ')[1])
print('Metapath: %s. The dim of metapath embedding: %d' % (metapath, k))
metapath_dims.append(k)
# 根据不同的元路径,创建一个二维数组.
# 数组的第二维度为 Expert/Project 的特征空间的表示 row=Expert/Project col=feature(1,..,k)
for i in range(num):
# 第i个Expert/Project,在当前metapath下的特征空间的表示
X[i][ctn] = np.zeros(k)
for line in infile.readlines():
# 获取特征空间向量中每个维度的值
arr = line.strip().split(' ')
# 将序号转成index
i = int(arr[0]) - 1
# 将每个维度值附给 X[i][ctn][j]
for j in range(k):
X[i][ctn][j] = float(arr[j + 1])
ctn += 1
return X, metapath_dims
def load_rating(self, trainfile, testfile):
# 从源数据中分割了 百分之train_rate的数据,作为真实值,在分割之前打乱了数据顺序
r_train = []
r_test = []
with open(trainfile) as infile:
for line in infile.readlines():
user, item, rating = line.strip().split('\t')
rating = float(rating)
r_train.append([int(user) - 1, int(item) - 1, int(rating)])
with open(testfile) as infile:
for line in infile.readlines():
user, item, rating = line.strip().split('\t')
rating = float(rating)
r_test.append([int(user) - 1, int(item) - 1, int(rating)])
return r_train, r_test
def initialize(self):
# 利用正态分布填充一个 row_num=unum col_num=itemdim 的矩阵
# unum X itemdim
self.E = np.random.randn(self.unum, self.itemdim) * 0.1
# inum X userdim
self.H = np.random.randn(self.inum, self.userdim) * 0.1
# unum X ratedim
self.U = np.random.randn(self.unum, self.ratedim) * 0.1
# inum X ratedim
self.V = np.random.randn(self.inum, self.ratedim) * 0.1
# unum X 3
self.pu = np.ones((self.unum, self.user_metapathnum)) * 1.0 / self.user_metapathnum
# inum X 3
self.pv = np.ones((self.inum, self.item_metapathnum)) * 1.0 / self.item_metapathnum
self.Wu = {}
self.bu = {}
for k in range(self.user_metapathnum):
# userdim X 128
self.Wu[k] = np.random.randn(self.userdim, self.user_metapathdims[k]) * 0.1
with open('../data/predict_Wu.txt', 'w') as outfile:
for data in self.Wu[0]:
outfile.write(str(data) + '\n')
# userdim X 1
self.bu[k] = np.random.randn(self.userdim) * 0.1
self.Wv = {}
self.bv = {}
for k in range(self.item_metapathnum):
# itemdim X 128
self.Wv[k] = np.random.randn(self.itemdim, self.item_metapathdims[k]) * 0.1
# itemdim X 1
self.bv[k] = np.random.randn(self.itemdim) * 0.1
def sigmod(self, x):
# Sigmoid函数常被用作神经网络的阈值函数,将变量映射到0,1之间
return 1 / (1 + np.exp(-x))
def cal_u(self, i):
ui = np.zeros(self.userdim)
for k in range(self.user_metapathnum):
# 将生成的 userdim X 128(由正态分布的值填充)与某专家的embedding 128 X 1 做点乘,再加上 userdim X 1(由正态分布填充的矩阵)
# s3 最后的维度是 userdim X 1
# TODO 为何要使用Sigmoid 函数?
s3 = self.sigmod(self.Wu[k].dot(self.X[i][k]) + self.bu[k])
# ui 最终为 s3 乘以每条元路径的权重,通过这样的方式,将所有元路径的embedding融合在一起?
ui += self.pu[i][k] * s3
return self.sigmod(ui)
def cal_v(self, j):
# 原理同cal_u
vj = np.zeros(self.itemdim)
for k in range(self.item_metapathnum):
vj += self.pv[j][k] * self.sigmod((self.Wv[k].dot(self.Y[j][k]) + self.bv[k]))
return self.sigmod(vj)
def get_rating(self, i, j):
ui = self.cal_u(i)
vj = self.cal_v(j)
return self.U[i, :].dot(self.V[j, :]) + self.reg_u * ui.dot(self.H[j, :]) + self.reg_v * self.E[i, :].dot(vj)
def maermse(self):
m = 0.0
mae = 0.0
rmse = 0.0
n = 0
for t in self.T:
n += 1
i = t[0]
j = t[1]
r = t[2]
r_p = self.get_rating(i, j)
if r_p > 5: r_p = 5
if r_p < 1: r_p = 1
m = fabs(r_p - r)
mae += m
rmse += m * m
mae = mae * 1.0 / n
rmse = sqrt(rmse * 1.0 / n)
return mae, rmse
def recommend(self):
mae = []
rmse = []
perror = 99999
cerror = 9999
n = len(self.R)
s = 0
for step in range(self.steps):
total_error = 0.0
for t in self.R:
# 用作训练的e-id
i = t[0]
# 用作训练的p-id
j = t[1]
# 用作训练的rating值
rij = t[2]
rij_t = self.get_rating(i, j)
eij = rij - rij_t
total_error += eij * eij
# SGD优化
U_g = -eij * self.V[j, :] + self.beta_e * self.U[i, :]
V_g = -eij * self.U[i, :] + self.beta_h * self.V[j, :]
self.U[i, :] -= self.delta * U_g
self.V[j, :] -= self.delta * V_g
ui = self.cal_u(i)
for k in range(self.user_metapathnum):
x_t = self.sigmod(self.Wu[k].dot(self.X[i][k]) + self.bu[k])
pu_g = self.reg_u * -eij * (ui * (1 - ui) * self.H[j, :]).dot(x_t) + self.beta_p * self.pu[i][k]
Wu_g = self.reg_u * -eij * self.pu[i][k] * np.array(
[ui * (1 - ui) * x_t * (1 - x_t) * self.H[j, :]]).T.dot(
np.array([self.X[i][k]])) + self.beta_w * self.Wu[k]
bu_g = self.reg_u * -eij * ui * (1 - ui) * self.pu[i][k] * self.H[j, :] * x_t * (
1 - x_t) + self.beta_b * self.bu[k]
# print pu_g
self.pu[i][k] -= 0.1 * self.delta * pu_g
self.Wu[k] -= 0.1 * self.delta * Wu_g
self.bu[k] -= 0.1 * self.delta * bu_g
H_g = self.reg_u * -eij * ui + self.beta_h * self.H[j, :]
self.H[j, :] -= self.delta * H_g
vj = self.cal_v(j)
for k in range(self.item_metapathnum):
y_t = self.sigmod(self.Wv[k].dot(self.Y[j][k]) + self.bv[k])
pv_g = self.reg_v * -eij * (vj * (1 - vj) * self.E[i, :]).dot(y_t) + self.beta_p * self.pv[j][k]
Wv_g = self.reg_v * -eij * self.pv[j][k] * np.array(
[vj * (1 - vj) * y_t * (1 - y_t) * self.E[i, :]]).T.dot(
np.array([self.Y[j][k]])) + self.beta_w * self.Wv[k]
bv_g = self.reg_v * -eij * vj * (1 - vj) * self.pv[j][k] * self.E[i, :] * y_t * (
1 - y_t) + self.beta_b * self.bv[k]
self.pv[j][k] -= 0.1 * self.delta * pv_g
self.Wv[k] -= 0.1 * self.delta * Wv_g
self.bv[k] -= 0.1 * self.delta * bv_g
E_g = self.reg_v * -eij * vj + 0.01 * self.E[i, :]
self.E[i, :] -= self.delta * E_g
perror = cerror
cerror = total_error / n
self.delta = 0.93 * self.delta
if abs(perror - cerror) < 0.0001:
s += 1
break
# print 'step ', step, 'crror : ', sqrt(cerror)
MAE, RMSE = self.maermse()
mae.append(MAE)
rmse.append(RMSE)
print('MAE: ', min(mae), ' RMSE: ', min(rmse))
def get_prediction(self):
print('get rating matrix...')
np.savetxt('../data/predict_U.txt', self.U)
np.savetxt('../data/predict_V.txt', self.V)
np.savetxt('../data/predict_H.txt', self.H)
np.savetxt('../data/predict_E.txt', self.E)
np.savetxt('../data/predict_pu.txt', self.pu)
np.savetxt('../data/predict_pv.txt', self.pv)
# np.savetxt('../data/predict_Wu.txt', self.Wu)
self.save_multi_matrix(self.Wu, self.user_metapathnum, "Wu")
# np.savetxt('../data/predict_Wv.txt', self.Wv)
self.save_multi_matrix(self.Wv, self.item_metapathnum, "Wv")
# np.savetxt('../data/predict_bu.txt', self.bu)
# np.savetxt('../data/predict_bv.txt', self.bv)
self.save_multi_matrix(self.bu, self.user_metapathnum, "bu")
self.save_multi_matrix(self.bv, self.item_metapathnum, "bv")
def save_multi_matrix(self, matrix, matrix_num, matrix_name):
for i in range(matrix_num):
np.save('../data/predict_' + matrix_name + '_' + str(i) + '.txt', matrix[i])
def save(self, targetfile, matrix):
total = 0
with open(targetfile, 'w') as outfile:
for i in range(matrix.shape[0])[1:]:
for j in range(matrix.shape[1])[1:]:
if matrix[i][j] != 0 and i != j:
outfile.write(str(i) + '\t' + str(j) + '\t' + str(int(matrix[i][j])) + '\n')
total += 1
print('total = ', total)
if __name__ == "__main__":
deepwalk = 'dpwk'
mp2vec = 'mp2vec'
hnrec = HNERec(unum=31868, inum=43286, ratedim=10, userdim=15, itemdim=10, embedding=mp2vec, train_rate=0.9)
hnrec.run()
| UTF-8 | Python | false | false | 12,684 | py | 7 | HERec_spl.py | 6 | 0.4885 | 0.473139 | 0 | 321 | 36.925234 | 133 |
diogodanielsoaresferreira/SecureMessagingRepository | 7,413,113,573,465 | 4c35d63b0d340cf897678576c236a2f53b8d82be | e8f3499caed2fd0b714a51a34907519d8ffb07c5 | /client/publicKeyCache.py | 3cb07d9d581ab76bd2eb2656db2e4048db21959e | []
| no_license | https://github.com/diogodanielsoaresferreira/SecureMessagingRepository | 071dbecf5be57105af2c60124a85acd704659463 | 61fa9738fd11b29e8cd60bcd8de5a533863a591d | refs/heads/master | 2020-03-28T10:43:36.531826 | 2018-09-10T11:36:18 | 2018-09-10T11:36:18 | 148,138,517 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
#
# Diogo Daniel Soares Ferreira N 76504
# Luis Davide Jesus Leira N 76514
#
# Security Messaging Repository System 2017-2018
#
# Cache for public keys for clients
# Implemented with LRU policy
#
# Public key must be stored in string format
MAX_LENGTH_PUBLIC_KEY_CACHE = 32
class PublicKeyCache:
def __init__(self):
self.cache = []
def add(self, aid, publicKey):
# If key already exists, update it
if(self.getIndex(aid) != None):
return self.updatePublicKey(aid, publicKey)
# Else, save public key on cache
self.cache.append({aid: publicKey})
# If size is above the maximum size, delete the key that was accessed more time ago
if len(self.cache) >= MAX_LENGTH_PUBLIC_KEY_CACHE:
self.cache = self.cache[1:]
# Search public key by id
def search(self, aid):
idx = self.getIndex(aid)
# If id exists, put it last on the list
if(idx != None):
value = self.cache[idx][aid]
self.updatePublicKey(aid, value)
return value
# Get index of id, if exists
def getIndex(self, aid):
for idx, elemId in enumerate(self.cache):
if aid in elemId:
return idx
# Update public key and change its location on cache
def updatePublicKey(self, aid, publicKey):
idx = self.getIndex(aid)
self.cache = self.cache[:idx]+self.cache[idx+1:]+[self.cache[idx]]
| UTF-8 | Python | false | false | 1,292 | py | 14 | publicKeyCache.py | 13 | 0.702012 | 0.684985 | 0 | 49 | 25.367347 | 85 |
mruiz42/cjk-trainer | 6,244,882,484,703 | d4e8353e43b2f9887ae66a4d4b7c501a8c32fe71 | 24ab3cd080e091f911ae0f079f8f5dd24e2b3223 | /src/driverUi/callImportCsvDialog.py | e6870f525aa8242a4333dc33277754dcfcb8d00a | [
"MIT"
]
| permissive | https://github.com/mruiz42/cjk-trainer | 9f4d3494c0fe43c2866554c4b07ba9b51fa265d6 | b565c40185d7184515be335d848a653cc7f86b51 | refs/heads/master | 2021-12-28T11:33:15.535423 | 2021-12-15T12:28:39 | 2021-12-15T12:28:39 | 168,592,616 | 3 | 0 | null | false | 2019-04-04T02:51:59 | 2019-01-31T20:36:53 | 2019-04-03T10:39:24 | 2019-04-04T02:51:59 | 28,432 | 0 | 0 | 0 | Python | false | false | from src.setupUi.ImportCsvDialog import *
from src.utilities.CsvTools import importDialogHelper
from src.utilities.SqlTools import *
from PyQt5 import QtWidgets
class ImportCSVDialog(QtWidgets. QDialog):
def __init__(self, mainWindow, parent=None):
super(ImportCSVDialog, self).__init__(parent)
self.icd = Ui_ImportCsvDialog()
self.mainWindow = mainWindow
self.icd.setupUi(self)
self.comboBox_changeEvent()
self.languages = ["", "Chinese (Simplified)", "Chinese (Traditional)", "Chinese (Pinyin)", "Spanish", "English", "Hindi", "Arabic",
"Portuguese", "Russian", "Japanese", "Japanese (Romanji)", "Punjabi", "German", "Javanese", "Malay", "Telugu",
"Vietnamese", "Korean", "French", "Turkish", "Italian", "Thai", "Persian", "Polish",
"Romanian", "Dutch", "Czech", "Swedish"]
self.icd.comboBox_definition.addItems(sorted(self.languages))
self.icd.comboBox_vocabulary.addItems(sorted(self.languages))
self.icd.comboBox_definition.setCurrentText(" ")
self.icd.comboBox_vocabulary.setCurrentText(" ")
self.icd.buttonBox.accepted.connect(self.acceptInput)
self.icd.comboBox_separator.currentIndexChanged.connect(self.comboBox_changeEvent)
self.icd.comboBox_format.currentIndexChanged.connect(self.comboBox_changeEvent)
self.icd.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setDisabled(True)
self.icd.lineEdit_tableName.textEdited.connect(self.enableButtonBox)
self.icd.plainTextEdit_deckName.textChanged.connect(self.enableButtonBox)
self.icd.comboBox_definition.currentIndexChanged.connect(self.enableButtonBox)
self.icd.comboBox_vocabulary.currentIndexChanged.connect(self.enableButtonBox)
def enableButtonBox(self):
lineEdit = self.icd.lineEdit_tableName
deckName = self.icd.plainTextEdit_deckName
definition = self.icd.comboBox_definition
vocabulary = self.icd.comboBox_vocabulary
if (lineEdit.text() != '' and deckName.toPlainText() != ''
and definition.currentText() != "" and vocabulary.currentText() != ""):
self.icd.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setDisabled(False)
else:
self.icd.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setDisabled(True)
def comboBox_changeEvent(self):
'''This function will set the self.sep variable to the current text as well as adjust the placeholder text in
plainTextEdit'''
sep = self.icd.comboBox_separator.currentText()
format = self.icd.comboBox_format.currentText()
if sep == "tab":
sep = "\t"
placeholderText = format.strip("][")
placeholderText = placeholderText.replace("][", sep)
placeholderText += "\n"
self.icd.plainTextEdit_deckName.setPlaceholderText(
QtWidgets.QApplication.translate("ImportCsvDialog", placeholderText * 3,
None, -1))
def acceptInput(self):
table_name = self.icd.lineEdit_tableName.text()
vocab_list = self.icd.plainTextEdit_deckName.toPlainText().splitlines()
vocabularyLang = self.icd.comboBox_vocabulary.currentText()
definitionLang = self.icd.comboBox_definition.currentText()
line_format = self.icd.comboBox_format.currentText()
separator = self.icd.comboBox_separator.currentText()
print(vocab_list)
# Parse the file and return a list of vocabulary test_data
word_list = importDialogHelper(vocab_list, table_name, line_format, separator)
for i in word_list:
print(i)
print(table_name)
db = self.mainWindow.database
db.insertDeck(table_name, vocabularyLang, definitionLang)
db.insertManyCards(word_list)
self.mainWindow.loadDeckList()
| UTF-8 | Python | false | false | 3,937 | py | 37 | callImportCsvDialog.py | 27 | 0.667513 | 0.666751 | 0 | 77 | 50.12987 | 139 |
lilykissme/try_1024 | 2,448,131,378,400 | 8cf0a47c98d6d11506ce76aa0259880031f65880 | 67241ef5d08650c2db9b50eb9e9666d4d489ac96 | /test_python/backup_ver1_2.py | ea6d99ddf518e92c9fad958e5415a54261c2a405 | []
| no_license | https://github.com/lilykissme/try_1024 | 6a9f964f88a78f47f5619c653934fe5aa70bc7dc | 9599d5cedb52ad96b6a0c1f085c101ddff400381 | refs/heads/master | 2016-09-14T20:36:33.092844 | 2016-04-15T05:09:30 | 2016-04-15T05:09:30 | 56,291,443 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #-*- coding: UTF-8 -*-
#!/usr/bin/python
import os
import time
#优化版本2.0
#1.需要备份的文件地址url,必须包含在列表中!
source=['C:\\hoho\\Jekyll-Pithy-master\\images']
#2.备份文件必须保存在主备份页下
target_dir='C:\\hoho\\backup\\'
#3.备份文件以zip格式保存
#4.文件名是当前时间加上注释,存放在当前日期的目录下
now=time.strftime('%H%M%S')
today=target_dir+time.strftime('%Y%m%d')
#在用户创建文件名的时候,手动输入添加一个注释
comment=raw_input('enter a comment -->')
if len(comment)=='0' : #用来检查注释已被输入
target=today+os.sep+now+'.zip'
else :
target=today+os.sep+now+'_'+comment.replace(' ','_')+'.zip'
#创建目录,
if not os.path.exists(today):
os.mkdir(today)
print 'successful created directory',today
#zip文件名 (之前版本中用来确定文件名的语句)
#target=today+os.sep+now+'.zip'
#使用意os.sep变量,代替目录分隔符"\\",,即在Linux、Unix下它是'/',在 Windows下它是'\\',
#而在Mac OS下它是':'。使用os.sep而非直接使用字符,会使我们的程序具 有移植性,可以在上述这些系统下工作。
# 使用zip指令,使用zip压缩器处理文件
zip_command="C:\Progra~1\WinRAR\Rar.exe a %s %s" %(target,' '.join(source))
print zip_command
if os.system(zip_command)==0:
print 'successful backup to',target
else:
print 'backup FAILED' | UTF-8 | Python | false | false | 1,430 | py | 27 | backup_ver1_2.py | 26 | 0.70334 | 0.693517 | 0 | 44 | 22.136364 | 75 |
JongKyuHong/learn_python | 11,519,102,290,300 | 78d9051eb1e886a3867d608197c07d4abcf4b0f9 | e4964dbee8cf3c54814688ede60197ba13050836 | /알고리즘/20210201/큰수의 법칙.py | 95e0104acbfe18ade2734d324250cc1543565dde | []
| no_license | https://github.com/JongKyuHong/learn_python | ec8813283b67fbe1c39d8b295dc6795cb27415ed | d89f3f8590e33a31d32b734c8f645e9807a8328f | refs/heads/master | 2023-05-13T01:07:03.358444 | 2021-06-03T12:56:07 | 2021-06-03T12:56:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n,m,k = map(int,input().split())
array = list(map(int,input().split()))
array.sort()
first = array[n-1]
second = array[n-2]
sum = 0
while True:
for i in range(k):
if m == 0:
break
sum += first
m -= 1
if m == 0:
break
sum += second
m -= 1
print(sum) | UTF-8 | Python | false | false | 308 | py | 237 | 큰수의 법칙.py | 208 | 0.487013 | 0.464286 | 0 | 17 | 17.176471 | 38 |
TTOFFLINE-LEAK/ttoffline | 4,294,967,317,469 | b308fc891c85842ea05eba07c593b9d2e16fdc21 | 8e69eee9b474587925e22413717eb82e4b024360 | /v1.0.0.test/toontown/avatar/ToontownAvatarUtils.py | 6f275d5cddeec0cd91a71ca3ca636227bf691208 | [
"MIT"
]
| permissive | https://github.com/TTOFFLINE-LEAK/ttoffline | afaef613c36dc3b70514ccee7030ba73c3b5045b | bb0e91704a755d34983e94288d50288e46b68380 | refs/heads/master | 2020-06-12T15:41:59.411795 | 2020-04-17T08:22:55 | 2020-04-17T08:22:55 | 194,348,185 | 5 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null | from panda3d.core import CollisionTube, CollisionNode
from libotp.nametag.NametagGroup import *
from toontown.pets import Pet
from toontown.suit import SuitDNA, Suit
from toontown.toon import NPCToons, Toon, ToonDNA
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
def createToon(toonId, x=0, y=0, z=0, h=0, p=0, r=0, parent=render, anim='neutral', LOD=1000, isDisguised=False, suitType='f', coll=True):
newToon = NPCToons.createLocalNPC(toonId)
if not newToon:
newToon = NPCToons.createLocalNPC(1)
newToon.head = newToon.find('**/__Actor_head')
if isDisguised:
newToon.putOnSuit(suitType, False, False)
if coll:
collTube = CollisionTube(0, 0, 0.5, 0, 0, 4, 2)
collNode = CollisionNode('suit')
collNode.addSolid(collTube)
newToon.attachNewNode(collNode)
else:
newToon.useLOD(LOD)
if coll:
newToon.initializeBodyCollisions('toon')
newToon.setPosHpr(x, y, z, h, p, r)
newToon.reparentTo(parent)
newToon.loop(anim)
return newToon
def createUniqueToon(name, dna, hat, glasses, backpack, shoes, x=0, y=0, z=0, h=0, p=0, r=0, parent=render, anim='neutral', LOD=1000, isDisguised=False, suitType='f', suitDept='c', isWaiter=False, isRental=False, coll=True, colorType=NametagGroup.CCNonPlayer, cogLevels=(0, 0, 0, 0, 0), cheesyEffect=ToontownGlobals.CENormal, nametagStyle=100):
newToon = Toon.Toon()
newToon.setName(name)
newToon.setPickable(0)
newToon.setPlayerType(colorType)
if nametagStyle == 100:
font = loader.loadFont(TTLocalizer.InterfaceFont)
else:
font = loader.loadFont(TTLocalizer.NametagFonts[nametagStyle])
newToon.nametag.setFont(font)
newDNA = ToonDNA.ToonDNA()
newDNA.newToonFromProperties(*dna)
newToon.setDNAString(newDNA.makeNetString())
newToon.applyCheesyEffect(cheesyEffect, 0)
newToon.head = newToon.find('**/__Actor_head')
newToon.setHat(*hat)
newToon.setBackpack(*backpack)
newToon.setGlasses(*glasses)
newToon.setShoes(*shoes)
if isDisguised:
if isWaiter:
cogType = 4
else:
cogType = 0
newToon.cogLevels = []
for l in cogLevels:
newToon.cogLevels.append(l)
if cogType in ToontownGlobals.PutOnSuitRental or isRental:
index = ToontownGlobals.CogDepts.index(suitDept)
newToon.putOnSuit(index, cogType=cogType, rental=True)
else:
newToon.putOnSuit(suitType, cogType=cogType, rental=isRental)
if coll:
collTube = CollisionTube(0, 0, 0.5, 0, 0, 4, 2)
collNode = CollisionNode('suit')
collNode.addSolid(collTube)
newToon.attachNewNode(collNode)
else:
newToon.useLOD(LOD)
if coll:
newToon.initializeBodyCollisions('toon')
newToon.setPosHpr(x, y, z, h, p, r)
newToon.reparentTo(parent)
newToon.loop(anim)
return newToon
def createCog(cogType, x=0, y=0, z=0, h=0, p=0, r=0, isSkelecog=False, isWaiter=False, isVirtual=False, isSkeleRevive=False, colorType=NametagGroup.CCSuit, anim='neutral', parent=render, name=None, dept=None, level=None, coll=True):
newCog = Suit.Suit()
newCog.dna = SuitDNA.SuitDNA()
newCog.dna.newSuit(cogType)
newCog.setDNA(newCog.dna)
newCog.setPlayerType(colorType)
newCog.setPickable(0)
level = level if level != None else newCog.getActualLevel()
if isWaiter:
newCog.makeWaiter()
if isSkelecog:
newCog.makeSkeleton()
newCog.setName(TTLocalizer.Skeleton)
if isVirtual:
newCog.makeVirtual()
if isSkeleRevive:
level = '%s%s' % (level, TTLocalizer.SkeleRevivePostFix)
if name != None:
newCog.setName(name)
if dept is False:
nameInfo = TTLocalizer.SuitBaseNameWithoutDept % {'name': newCog._name, 'level': level}
else:
nameInfo = TTLocalizer.SuitBaseNameWithLevel % {'name': newCog._name, 'dept': dept if dept != None else newCog.getStyleDept(),
'level': level}
newCog.setPosHpr(x, y, z, h, p, r)
newCog.reparentTo(parent)
newCog.loop(anim)
newCog.setDisplayName(nameInfo)
if coll:
collTube = CollisionTube(0, 0, 0.5, 0, 0, 4, 2)
collNode = CollisionNode('suit')
collNode.addSolid(collTube)
newCog.attachNewNode(collNode)
return newCog
def createDoodle(name, head, ears, nose, tail, body, color, colorScale, eyes, gender, x=0, y=0, z=0, h=0, p=0, r=0, parent=render, coll=True):
doodle = Pet.Pet()
doodle.setDNA([head, ears, nose, tail, body, color, colorScale, eyes, gender])
doodle.setName(name)
doodle.setPickable(0)
doodle.reparentTo(parent)
doodle.setPosHpr(x, y, z, h, p, r)
doodle.enterNeutralHappy()
if coll:
doodle.initializeBodyCollisions('pet')
return doodle | UTF-8 | Python | false | false | 4,925 | py | 717 | ToontownAvatarUtils.py | 715 | 0.663147 | 0.647919 | 0 | 125 | 38.408 | 344 |
SND96/twine-stories | 2,894,808,002,918 | bc50720391c5086c771adf0306351dc663960b73 | 7ba7a992293ccf405d604010cc5c8a2b6ec660e1 | /parser.py | 121339065cebb5bda922ba1a4de5447330763a77 | []
| no_license | https://github.com/SND96/twine-stories | ceaf31ab37eb12ffe5d61350004e406d5e8ebdb8 | 7f5d60f45bafc95dfff7874ac4ba042b5c3e6ff2 | refs/heads/master | 2021-01-23T04:39:40.686501 | 2017-08-29T15:15:04 | 2017-08-29T15:15:04 | 92,935,553 | 4 | 1 | null | false | 2017-08-24T10:43:51 | 2017-05-31T10:38:24 | 2017-07-18T13:04:42 | 2017-08-24T10:43:51 | 10,334 | 0 | 0 | 0 | Python | null | null | def make_file(node):
with open('AROWF-recently.txt', 'r') as f:
num_line = 0
fname ="Start"
#Variables used to signal the start of the parsing
start = 0
begin = 0
#Storing the options and the next node
next_node = [""]*3
ccline = [""]*3
node = ":: "+node+"\n"
question = 0
#For storing the question statement
statement = ""
for line in f.readlines():
alpha = 0
if(question):
statement += line
# question = 0
if(line[0] == ":" and start == 1):
break
if(line == node):
start = 1
question = 1
elif(start!=1):
continue
if line[0]!='[':
continue
question = 0
statement = statement[:statement.rfind('\n')]
length = len(line)
# print(line)
alpha = 0
nodes = 0
initial = 0
for i in range (length):
if (line[i] == '.'):
nodes = i
break
if line[i].isalpha():
alpha = 1
if (alpha):
ccline[num_line] += line[i]
for i in range (nodes,length):
if(line[i] == ']'):
break
if(initial == 2):
next_node[num_line] += line[i]
if(initial==1 and line[i].isspace()==0):
next_node[num_line] += line[i]
initial = 2
if(line[i] == '>'):
initial = 1
num_line += 1
return(next_node, num_line, statement, ccline)
| UTF-8 | Python | false | false | 1,711 | py | 113 | parser.py | 48 | 0.406195 | 0.391584 | 0 | 65 | 25.246154 | 56 |
HugoPelletier/redis-training | 8,624,294,335,124 | c869f9c4a2a5a8129c305a60c83bfee926e7384f | c6862ce38474732e18bb1a407e9312a2a60fafbf | /sorting/sorting.py | 4f8fabd20dfb9cf99ede1af72ee75ba48ed0adc6 | []
| no_license | https://github.com/HugoPelletier/redis-training | bbd815a1226d67a48af09b35b6c5b66799dbeee5 | 10526ac99812489b6dfd430ba5a548ea78642bd8 | refs/heads/master | 2021-08-29T04:57:58.109906 | 2017-12-13T13:02:29 | 2017-12-13T13:02:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import redis
def main():
r = redis.client.StrictRedis(db=0)
r.flushdb()
r.sadd('products', 'product:12345678')
r.sadd('products', 'product:22287343')
r.sadd('products', 'product:3333849')
r.hmset('product:12345678', {
'title': 'This is the title of product 12345678',
'regular_price_ca': '1235',
'sale_price_ca': '1000',
'position': "450"
})
r.hmset('product:22287343', {
'title': 'This is the title of product 22287343',
'regular_price_ca': '1500',
'sale_price_ca': '1200',
'position': "90"
})
r.hmset('product:3333849', {
'title': 'This is the title of product 3333849',
'regular_price_ca': '100',
'sale_price_ca': '10',
'position': "3500"
})
print r.sort('products', by='*->position', start=0, num=1)
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 895 | py | 6 | sorting.py | 5 | 0.541899 | 0.427933 | 0 | 38 | 22.578947 | 62 |
pzivich/Presentations | 3,865,470,569,007 | 9f1fa7b423d37ec8aee8ef3c52637509d473eebf | e03bda20ecdc11224b4c6a2aa2614aeb32281aaf | /BIRS-2022/code/didactic_example.py | 9f740909a6a20c1b2c204c54cf8c89fdcdc147ae | [
"CC-BY-4.0"
]
| permissive | https://github.com/pzivich/Presentations | 01ea9b7dacbe3bedea54447d7950ba446f324cc0 | d149f6ac71ef09deffa11a8395b2b1a9403f9530 | refs/heads/master | 2023-06-26T13:03:43.411511 | 2023-06-16T21:57:02 | 2023-06-16T21:57:02 | 344,915,434 | 5 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from delicatessen import MEstimator
from delicatessen.estimating_equations import ee_logistic_regression
from delicatessen.utilities import inverse_logit
from dgm import generate_data_1, generate_x, generate_data, generate_data_2, generate_data_3
np.random.seed(13659321)
# Creating generic W,V distribution plot
width = 0.30 # the width of the bars: can also be len(x) sequence
fig, ax = plt.subplots(figsize=(6.5, 4.5))
ax.bar([1, 2, 3],
[0.25, 0.75, 0.65],
width, color='blue', edgecolor='k', label=r'$W$')
ax.bar([1.3, 2.3, 3.3],
[0.2, 0.2, 0.35],
width, color='cyan', edgecolor='k', label=r'$V$')
plt.xticks([1.15, 2.15, 3.15],
[r'$S=1$', r'$S=2$', r'$S=3$'])
plt.yticks([0, 0.25, 0.5, 0.75, 1.0])
plt.ylabel("Population Proportion")
plt.legend()
plt.tight_layout()
plt.savefig("figure_didactic_popprops.png", format='png', dpi=300)
plt.close()
# Calculating the true value
pop1 = generate_x(generate_data_1(n=1000000))
truth = np.mean(pop1['X'])
print(truth)
# print(np.mean(pop1['X*']))
d = generate_data(n1=1500, n2=750, n3=200)
d.to_csv("exdat.csv")
d.info()
# print(d.describe())
# sensitivity = np.mean(d.loc[(d['S'] == 3) & (d['X'] == 1), 'X*'])
# specificity = np.mean(d.loc[(d['S'] == 3) & (d['X'] == 0), 'X*'])
# print(sensitivity)
# print(specificity)
# print(np.mean(d.loc[d['S'] == 2, 'X*']))
s = np.asarray(d['S'])
x = np.asarray(d['X'])
xm = np.asarray(d['X*'])
d['C'] = 1
w = np.asarray(d[['C', 'W']])
p = np.where(d['S'] == 1, 1, np.nan)
p = np.where(d['S'] == 2, 0, p)
def psi_approach_2(theta):
data = d.loc[d['S'] == 2].copy()
return data['X*'] - theta
def psi_approach_3(theta):
data = d.loc[d['S'] == 3].copy()
return data['X'] - theta
def psi_approach_4(theta):
sens = np.where(s == 3, (x == 1)*(xm-theta[0]), 0)
spec = np.where(s == 3, (x == 0)*((1-xm)-theta[1]), 0)
s_model = ee_logistic_regression(theta[4:],
X=w, y=p)
s_model = np.nan_to_num(s_model, copy=False, nan=0.)
pi_s = inverse_logit(np.dot(w, theta[4:]))
weight = pi_s / (1 - pi_s)
mu_2 = np.where(s == 2, (xm - theta[2])*weight, 0)
mu_1 = np.ones(s.shape)*theta[3]*(theta[0] + theta[1] - 1) - (theta[2] + theta[1] - 1)
return np.vstack([sens[None, :],
spec[None, :],
mu_2[None, :],
mu_1[None, :],
s_model])
estr1 = MEstimator(psi_approach_2, init=[0.5])
estr1.estimate(solver='lm')
estr2 = MEstimator(psi_approach_3, init=[0.5])
estr2.estimate(solver='lm')
estr3 = MEstimator(psi_approach_4, init=[0.5, 0.5, 0.5, 0.5, 0., 0.])
estr3.estimate(solver='lm')
# Creating plot of results from a single data set
plt.vlines(truth, 0.5, 4.5, colors='gray', linestyles='--')
plt.scatter(estr1.theta[0], 3, s=100, color='blue')
plt.hlines(3, estr1.confidence_intervals()[0][0], estr1.confidence_intervals()[0][1], colors='blue')
plt.scatter(estr2.theta[0], 2, s=100, color='green')
plt.hlines(2, estr2.confidence_intervals()[0][0], estr2.confidence_intervals()[0][1], colors='green')
plt.scatter(estr3.theta[3], 1, s=100, color='k')
plt.hlines(1, estr3.confidence_intervals()[3][0], estr3.confidence_intervals()[3][1], colors='k')
plt.xlim([0.25, 0.75])
plt.xlabel(r"$\hat{\mu}$")
plt.ylim([0.5, 4.5])
plt.yticks([1, 2, 3, 4], ["Approach 4", "Approach 3", "Approach 2", "Approach 1"])
plt.tight_layout()
plt.savefig("didactic_example.png", format='png', dpi=300)
plt.close()
reps = 2000
bias_a2, cover_a2 = [], []
bias_a3, cover_a3 = [], []
bias_a4, cover_a4 = [], []
for i in range(reps):
d = generate_data(n1=1500, n2=750, n3=200)
s = np.asarray(d['S'])
x = np.asarray(d['X'])
xm = np.asarray(d['X*'])
d['C'] = 1
w = np.asarray(d[['C', 'W']])
p = np.where(d['S'] == 1, 1, np.nan)
p = np.where(d['S'] == 2, 0, p)
# Approach 1
# no analysis
# Approach 2
mest = MEstimator(psi_approach_2, init=[0.5])
mest.estimate(solver='lm')
est = mest.theta[0]
ci = mest.confidence_intervals()[0]
bias_a2.append(est - truth)
if ci[0] < truth < ci[1]:
cover_a2.append(1)
else:
cover_a2.append(0)
# Approach 3
mest = MEstimator(psi_approach_3, init=[0.5])
mest.estimate(solver='lm')
est = mest.theta[0]
ci = mest.confidence_intervals()[0]
bias_a3.append(est - truth)
if ci[0] < truth < ci[1]:
cover_a3.append(1)
else:
cover_a3.append(0)
# Approach 4
mest = MEstimator(psi_approach_4, init=[0.5, 0.5, 0.5, 0.5, 0., 0.])
mest.estimate(solver='lm')
est = mest.theta[3]
ci = mest.confidence_intervals()[3]
bias_a4.append(est - truth)
if ci[0] < truth < ci[1]:
cover_a4.append(1)
else:
cover_a4.append(0)
print(np.mean(bias_a2))
print(np.mean(cover_a2))
print(np.mean(bias_a3))
print(np.mean(cover_a3))
print(np.mean(bias_a4))
print(np.mean(cover_a4))
fig, ax = plt.subplots(figsize=(6.5, 4.5))
# Reference line
ax.hlines([0], [0], [5], colors='gray', linestyles='--', zorder=1)
# Drawing violin plot results
parts = ax.violinplot([bias_a2, bias_a3, bias_a4], positions=[2, 3, 4],
showmeans=True, widths=0.35)
parts['bodies'][0].set_zorder(2)
for pc in parts['bodies']:
pc.set_color("blue")
for partname in ('cbars', 'cmins', 'cmaxes', 'cmeans'):
vp = parts[partname]
vp.set_edgecolor('blue')
ax2 = ax.twinx()
ax2.hlines([0.95], [0], [5], colors='gray', linestyles=':', zorder=1)
ax2.plot([2, 3, 4],
[np.mean(cover_a2), np.mean(cover_a3), np.mean(cover_a4)],
'D', color='mediumblue', markersize=7, zorder=3)
plt.xticks([1, 2, 3, 4],
["Approach 1", "Approach 2", "Approach 3", "Approach 4"])
plt.xlim([0.5, 4.5])
ax.set_ylim([-0.25, 0.25])
ax.set_ylabel(r"$\hat{\mu} - \mu$")
ax2.set_ylim([0, 1])
ax2.set_ylabel("95% CI Coverage")
ax.spines['top'].set_visible(False)
ax2.spines['top'].set_visible(False)
plt.tight_layout()
plt.savefig("figure_didactic_results.png", format='png', dpi=300)
plt.close()
| UTF-8 | Python | false | false | 6,138 | py | 24 | didactic_example.py | 14 | 0.586999 | 0.527045 | 0 | 206 | 28.796117 | 101 |
round5/round5.github.io | 9,723,805,994,619 | add112d927aecfc9d498f3911fe2e173b8233b15 | 16a804b88ce5e8623d967693ce27c636fab04758 | /Supporting_Documentation/Scripts/Parameters/main/summarize.py | 51af32b90ddff085234a53911bc51afe42947b09 | []
| no_license | https://github.com/round5/round5.github.io | b6a9b47e997e52223c6486a48a119a0c22388119 | 5e511b50bbf2727858b827f77ae045d61a690269 | refs/heads/master | 2021-07-25T01:31:59.705180 | 2020-04-10T18:21:31 | 2020-04-10T18:21:31 | 141,458,867 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from __future__ import division
import sys
import os
import argparse
# Path hack.
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.insert(0,currentdir + "/parameter_definitions")
from chosen_parameter_sets import getParameterSets
for paramSet in getParameterSets():
paramSet.analyze()
print "================================================================================"
print "%.80s" % ("===== Round5 %s ============================================================================" % paramSet.name)
print "================================================================================"
print
print paramSet
print
| UTF-8 | Python | false | false | 742 | py | 35 | summarize.py | 13 | 0.48248 | 0.477089 | 0 | 22 | 32.727273 | 132 |
eikei54/practice_scripts | 8,426,725,853,547 | 8667727162b0159086b0e8a77d7ddf8f347c3258 | 732224935ea72bf1b0773c512dee15907de7dda0 | /99_wd_work/invert_bin.py | 09ff1e817936c17edc7fca8b589e9598eb481413 | []
| no_license | https://github.com/eikei54/practice_scripts | c6bfe50fa53ef5e6dd85c0e120b0cdc74ac5cd4c | 261d939efc4cd5b0bc60cb20931f826cba787f3e | refs/heads/master | 2021-07-24T17:14:45.442429 | 2020-04-19T09:38:21 | 2020-04-19T09:38:21 | 150,737,909 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | with open("ccb7_rid_77.bin", "rb") as binary_file:
# Read the whole file at once
data = binary_file.read()
print(~data) | UTF-8 | Python | false | false | 131 | py | 39 | invert_bin.py | 22 | 0.633588 | 0.610687 | 0 | 4 | 32 | 50 |
wwh51/python_snippet | 18,184,891,565,447 | 3da5ad06907d38c8e94fe39b5ed9cd31f4e04368 | 2973f4df93fac606f2e368d5de9a44d19653d92a | /webcam.py | af8d939a18cdcfffe8c678774e543da16bce9115 | []
| no_license | https://github.com/wwh51/python_snippet | fce31899a887fd9cebd57a31f00a4907a32c2a6d | 0e7354c769e19c39b13e1809d539b912273180a6 | refs/heads/master | 2021-01-17T07:26:09.586088 | 2016-06-01T07:46:00 | 2016-06-01T07:46:00 | 15,489,443 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import cv2.cv as cv
import time
import datetime
import winsound
from PIL import Image, ImageDraw, ImageFont
# Camera 0 is the integrated web cam on my netbook
camera_port = 0
#Number of frames to throw away while the camera adjusts to light levels, just when it start
ramp_frames = 20
# Now we can initialize the camera capture object with the cv2.VideoCapture class.
# All it needs is the index to a camera port.
# Captures a single image from the camera and returns it in PIL format
def get_image(cam):
# read is the easiest way to get a full image out of a VideoCapture object.
retval, im = cam.read()
return im
def add_num(file_jpg):
img = Image.open(file_jpg)
draw = ImageDraw.Draw(img)
# myfont = ImageFont.truetype('C:/windows/fonts/Arial.ttf', size=40)
# fillcolor = "#ff0000"
width, height = img.size
draw.text((40, 40), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
img.save(file_jpg, "jpeg")
del draw
# Ramp the camera - these frames will be discarded and are only used to allow v4l2
# to adjust light levels, if necessary
print("Taking image...")
# Take the actual image we want to keep
file_index = 0
while file_index < 1:
for i in xrange(1):
winsound.PlaySound('bell.wav', winsound.SND_FILENAME)
camera = cv2.VideoCapture(0)
camera.set(3,640);
camera.set(4,480);
for i in xrange(ramp_frames):
temp = get_image(camera)
camera_capture = get_image(camera)
#file = r'\\MEL-STORAGE1\share\dwang\pre\test_1.png'
file_jpg = r'c:\wwh\{0}.jpg'.format(file_index)
file_index = file_index + 1
cv2.imwrite(file_jpg, camera_capture)
add_num(file_jpg)
del(camera)
im = Image.open(file_jpg, "jpeg")
im.show()
time.sleep(10)
# cv.NamedWindow("camera", 1)
# capture = cv.CaptureFromCAM(0)
# for i in xrange(30):
# img = cv.QueryFrame(capture)
# cv.ShowImage("camera", img)
# cv.DestroyAllWindows()
# You'll want to release the camera, otherwise you won't be able to create a new
# capture object until your script exits
| UTF-8 | Python | false | false | 2,125 | py | 9 | webcam.py | 8 | 0.668235 | 0.648 | 0 | 69 | 29.797101 | 92 |
jeezs/tredly | 4,174,708,256,022 | 202b65ac4a6fd2e1a717125849c176dba1364a7f | b3ad1f46a8f071f680ffa06d7d4ba98df9bcf08d | /components/tredly-libs/python-common/objects/tredly/resolvconffile.py | 6fd2b43d14d72ac9ec4ff124643c1ccefb9e1f5d | [
"MIT"
]
| permissive | https://github.com/jeezs/tredly | 3c77998bbee1ca7447b0f4746c41a224f3d87301 | eaf53e7274dd294bfdf93ea02fb934fdf340c50e | refs/heads/master | 2017-12-18T17:23:10.164693 | 2016-08-18T05:27:17 | 2016-08-18T05:27:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # A class to represent an Unbound File
import os.path
import re
class ResolvConfFile:
# Constructor
def __init__(self, filePath = '/etc/resolv.conf', search = [], servers = []):
self.filePath = filePath
self.search = search
self.nameservers = servers
# Action: reads resolv.conf file, and stores parsed lines to self
#
# Pre: this object exists
# Post: data has been read if file exists
#
# Params: clear - whether or not to clear this object when re-reading
#
# Return: True if success, False otherwise
def read(self, clear = True):
# only read in the data if the file actually exists
if (self.fileExists()):
# if clear is set then clear hte array first
if (clear):
self.search = []
self.nameservers = []
# file exists so process it
with open(self.filePath) as resolvConf:
for line in resolvConf:
# strip off leading and following whitespace
line = line.strip()
# ignore empty lines
if (len(line) > 0):
lineList = line.split()
if (lineList[0] == "search"):
# remove the first element as we arent interested in it
del lineList[0]
# set the linelist as the search list
self.search = lineList
elif (lineList[0] == "nameserver"):
# append the second element to the search list
self.nameservers.append(lineList[1])
return True
else:
return False
# Action: checks whether the path to the unbound file exists or not
#
# Pre:
# Post:
#
# Params:
#
# Return: True if exists, False otherwise
def fileExists(self):
return os.path.isfile(self.filePath)
# Action: writes out a resolv.conf file
#
# Pre: this object exists
# Post: this object has been written to self.filePath in the resolv.conf format
#
# Params:
#
# Return: True if successful, False otherwise
def write(self):
try:
with open(self.filePath, 'w') as resolvConf:
searchLine = 'search'
for search in self.search:
# append the dns search path to the variable
searchLine = searchLine + ' ' + search
# print it to the file
print(searchLine, file=resolvConf)
# loop over the nameservers, printing one per line to the file
for nameserver in self.nameservers:
# write the line to the file
print('nameserver ' + nameserver, file=resolvConf)
return True
except IOError:
return False
return False
| UTF-8 | Python | false | false | 3,121 | py | 116 | resolvconffile.py | 99 | 0.50817 | 0.506568 | 0 | 90 | 33.633333 | 83 |
ponson/DL_Book | 2,963,527,455,069 | 5773798aeac46e83a201a6288bd132f4b194fd9c | 85d83cac21585e70ee7414bc52444965e7ebfe2f | /src/05_11_tf_serving_client.py | 47daea14e7493a9cf125920dd4cc09832ce956b4 | []
| no_license | https://github.com/ponson/DL_Book | fddb1048483227d746b1d29fd71cb0fabf47200f | 70572583658d3f0362d27330a56b9589ad9d9281 | refs/heads/main | 2023-08-17T17:44:44.589620 | 2021-10-20T01:56:02 | 2021-10-20T01:56:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import numpy as np
import requests
from skimage import io
from skimage.transform import resize
uploaded_file='./myDigits/4.png'
image1 = io.imread(uploaded_file, as_gray=True)
# 縮小圖形為(28, 28)
image_resized = resize(image1, (28, 28), anti_aliasing=True)
# 插入第一維,代表筆數
X1 = image_resized.reshape(1,28,28,1)
# 顏色反轉
X1 = np.abs(1-X1)
# 將預測資料轉為 Json 格式
data = json.dumps({
"instances": X1.tolist()
})
# 呼叫 TensorFlow Serving API
headers = {"content-type": "application/json"}
json_response = requests.post(
'http://localhost:8501/v1/models/MLP:predict',
data=data, headers=headers)
# 解析預測結果
predictions = np.array(json.loads(json_response.text)['predictions'])
print(np.argmax(predictions, axis=-1))
| UTF-8 | Python | false | false | 848 | py | 145 | 05_11_tf_serving_client.py | 15 | 0.671392 | 0.635309 | 0 | 29 | 24.758621 | 69 |
jaskonzhou/aliex | 13,881,334,335,888 | bff61906eeba6e102e2593fa49629ca01fc47ff8 | ea37859cfe97f71865a2312efbfc659f1801978e | /ajk_es/ajk_es/settings.py | be920b49459fb88fb3b211d0b0f408ed8c7120cf | []
| no_license | https://github.com/jaskonzhou/aliex | f10a4ef6a7900e60ba83e738b351cf676452ccaa | db0b3ebc3a3b02a113c7ef848c5bfd68daab215b | refs/heads/master | 2020-04-14T02:31:29.813750 | 2018-12-30T12:58:06 | 2018-12-30T12:58:06 | 163,584,085 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Scrapy settings for ajk_es project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'ajk_es'
SPIDER_MODULES = ['ajk_es.spiders']
NEWSPIDER_MODULE = 'ajk_es.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'ajk_es (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 4.5
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3047.4 Safari/537.36',
}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'ajk_es.middlewares.AjkEsSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# #'ajk_es.middlewares.MyCustomDownloaderMiddleware': 543,
# # 'ajk_es.middlewares.UserAgentMiddleware': 543,
# #'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 511,
# # 'ajk_es.middlewares.IPPOOLS': 540,
#
# }
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'ajk_es.pipelines.AjkEsPipeline': 300,
'ajk_es.pipelines.AjkesPipelinejson': 400,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
#ip 池设置
#http://www.daxiangdaili.com/web?tid=556562604570496
IPPOOL=[
{"ippaddr": "125.118.144.161:61234"},
{"ippaddr": "118.254.153.3:3128"},
{"ippaddr": "114.227.131.232:6666"},
{"ippaddr": "125.109.195.80:31000"},
{"ippaddr": "118.254.148.172:3128"},
{"ippaddr": "115.213.224.28:44998"},
{"ippaddr": "110.88.247.180:35545"},
{"ippaddr": "101.201.209.207:7777"},
{"ippaddr": "60.174.74.40:8118"},
{"ippaddr": "59.58.222.204:25721"},
{"ippaddr": "60.175.196.97:30272"},
{"ippaddr": "1.192.230.7:45053"},
{"ippaddr": "124.235.144.136:80"},
{"ippaddr": "121.231.168.163:6666"},
{"ippaddr": "123.180.69.246:8010"},
{"ippaddr": "60.182.179.101:22019"},
{"ippaddr": "14.112.76.126:61234"},
{"ippaddr": "117.69.231.170:61234"},
{"ippaddr": "110.73.6.189:8123"},
{"ippaddr": "175.155.247.78:808"},
{"ippaddr": "182.88.190.245:8123"},
{"ippaddr": "59.51.123.108:3128"},
{"ippaddr": "222.76.187.73:8118"},
{"ippaddr": "123.53.132.243:37922"},
{"ippaddr": "180.121.131.59:3128"},
{"ippaddr": "120.37.165.173:48138"},
{"ippaddr": "182.202.221.60:61234"},
{"ippaddr": "119.190.34.70:80"},
{"ippaddr": "211.159.219.158:80"},
{"ippaddr": "180.121.131.233:808"},
{"ippaddr": "163.177.151.23:80"},
{"ippaddr": "14.215.177.73:80"},
{"ippaddr": "14.215.177.58:80"},
{"ippaddr": "125.121.133.50:808"},
{"ippaddr": "180.149.131.67:80"},
{"ippaddr": "121.8.98.197:80"},
{"ippaddr": "180.97.104.14:80"},
{"ippaddr": "218.59.139.238:80"},
{"ippaddr": "120.199.64.163:8081"},
{"ippaddr": "27.40.155.118:61234"},
{"ippaddr": "114.112.104.223:80"},
{"ippaddr": "112.80.255.32:80"},
{"ippaddr": "180.116.204.110:6666"},
{"ippaddr": "218.107.137.197:8080"},
{"ippaddr": "171.80.88.4:1080"},
{"ippaddr": "123.125.142.40:80"},
{"ippaddr": "49.73.191.52:8118"},
{"ippaddr": "27.217.107.25:8118"},
{"ippaddr": "111.62.243.64:8080"},
{"ippaddr": "115.239.210.42:80"},
{"ippaddr": "112.80.255.21:80"},
{"ippaddr": "163.177.151.162:80"},
{"ippaddr": "121.8.98.198:80"},
{"ippaddr": "222.76.187.147:8118"},
{"ippaddr": "49.81.251.123:8118"},
{"ippaddr": "123.125.115.86:80"},
{"ippaddr": "222.85.5.50:61234"},
{"ippaddr": "49.79.194.13:61234"},
{"ippaddr": "180.212.140.84:8118"},
{"ippaddr": "220.181.163.231:80"},
{"ippaddr": "14.118.254.34:6666"},
{"ippaddr": "218.26.227.108:80"}
]
| UTF-8 | Python | false | false | 5,929 | py | 49 | settings.py | 29 | 0.676853 | 0.514435 | 0 | 164 | 35.115854 | 126 |
rhondamak/course-2018-spr-proj | 18,537,078,864,418 | e61f0cae95c5c86ff2186f07134f194754ae1599 | e9833af3cc5dd04a711277141b4450c180452b69 | /aoconno8_dmak1112_ferrys/getClosestMBTAStops.py | 8c2b29c45e389074275655a834a2524ebfb5cdd1 | []
| no_license | https://github.com/rhondamak/course-2018-spr-proj | 8bb182a0865eaa4b0f78439dbb7b8e4244cb7117 | c7a43805695bc7529119734a629e13c0266fe0e8 | refs/heads/master | 2021-05-02T07:23:51.934229 | 2018-07-23T13:49:09 | 2018-07-23T13:49:09 | 120,826,584 | 0 | 0 | null | true | 2018-07-23T13:49:10 | 2018-02-08T22:36:57 | 2018-05-04T02:26:33 | 2018-07-23T13:49:09 | 251,696 | 0 | 0 | 0 | HTML | false | null | import urllib.request
import json
import dml
import prov.model
import datetime
import uuid
from tqdm import tqdm
import rtree
import shapely.geometry
import numpy as np
class getClosestMBTAStops(dml.Algorithm):
'''
Returns the closest x mbta stops
'''
contributor = 'aoconno8_dmak1112_ferrys'
reads = ['aoconno8_dmak1112_ferrys.mbta', 'aoconno8_dmak1112_ferrys.alc_licenses']
writes = ['aoconno8_dmak1112_ferrys.closest_mbta_stops']
@staticmethod
def execute(trial = False):
startTime = datetime.datetime.now()
print("Getting closest MBTA stops to every alcohol license...")
# Set up the database connection.
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('aoconno8_dmak1112_ferrys', 'aoconno8_dmak1112_ferrys')
api_key = dml.auth['services']['googlegeocoding']['key']
# mbta
num_mbta_stops = 3
mbta = repo.aoconno8_dmak1112_ferrys.mbta.find()
projected_mbta = getClosestMBTAStops.project(mbta, lambda t: (t['attributes']['latitude'], t['attributes']['longitude'], t['attributes']['name']))
# alc
alc = repo.aoconno8_dmak1112_ferrys.alc_licenses.find()
projected_alc = getClosestMBTAStops.project(alc, lambda t: (t['License Number'], t['Street Number'] + ' ' + t['Street Name'] + ' ' + str(t['Suffix']) + ' ' + t['City'], t['Business Name'], t['DBA']))
if trial:
# algorithm wasnt working well on trial because the mbta stops
# and alcohol licenses were so far away from each other
# so I just picked a small subset of close ones
projected_alc = [["678", (42.3516079, -71.080906), "Business Name", "Name"]]
alc_lat = projected_alc[0][1][0]
alc_long = projected_alc[0][1][1]
projected_mbta = [[42.350067, -71.078068, "Copley"], [42.348227, -71.075493, "Back Bay"], [42.349224, -71.080600, "Ring Rd @ Boylston St"]]
index = rtree.index.Index()
for i in tqdm(range(len(projected_mbta))):
lat = projected_mbta[i][0]
lon = projected_mbta[i][1]
index.insert(i, shapely.geometry.Point(lon, lat).bounds)
cache = {}
mbta_dist = []
for alc_entry in tqdm(projected_alc):
if not trial:
alc_address = alc_entry[1].replace(' ', '+')
if alc_address in cache:
alc_lat = cache[alc_address][0]
alc_long = cache[alc_address][1]
else:
alc_url = 'https://maps.googleapis.com/maps/api/geocode/json?address=' + alc_address + 'MA&key='+ api_key
response = urllib.request.urlopen(alc_url).read().decode("utf-8")
google_json = json.loads(response)
try:
alc_lat = google_json["results"][0]["geometry"]["location"]['lat']
alc_long = google_json["results"][0]["geometry"]["location"]['lng']
cache[alc_address] = (alc_lat,alc_long)
except IndexError:
continue
# get x nearest mbta stops from the alc license
try:
nearest = index.nearest((alc_long,alc_lat,alc_long,alc_lat), num_results=num_mbta_stops)
except TypeError:
pass
mbta_coords = []
for point in nearest:
mbta_coords += [projected_mbta[point]]
mbta_dist.append({
"alc_license": alc_entry[0],
"alc_coord":(alc_lat, alc_long),
"alc_name": alc_entry[2] if (type(alc_entry[3]) != str and np.isnan(alc_entry[3])) else alc_entry[3],
"mbta_coords":(mbta_coords)
})
repo.dropCollection("closest_mbta_stops")
repo.createCollection("closest_mbta_stops")
repo['aoconno8_dmak1112_ferrys.closest_mbta_stops'].insert_many(mbta_dist)
repo['aoconno8_dmak1112_ferrys.closest_mbta_stops'].metadata({'complete':True})
print(repo['aoconno8_dmak1112_ferrys.closest_mbta_stops'].metadata())
repo.logout()
endTime = datetime.datetime.now()
return {"start":startTime, "end":endTime}
@staticmethod
def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):
'''
Create the provenance document describing everything happening
in this script. Each run of the script will generate a new
document describing that invocation event.
'''
# Set up the database connection.
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('aoconno8_dmak1112_ferrys', 'aoconno8_dmak1112_ferrys')
doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in <folder>#<filename> format.
doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in <user>#<collection> format.
doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.
doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.
doc.add_namespace('geocode', 'https://maps.googleapis.com/maps/api/geocode')
this_script = doc.agent('alg:aoconno8_dmak1112_ferrys#getClosestMBTAStops', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})
licenses = doc.entity('dat:aoconno8_dmak1112_ferrys#alc_licenses', {prov.model.PROV_LABEL:'alc_licenses', prov.model.PROV_TYPE:'ont:DataSet'})
mbta_stops = doc.entity('dat:aoconno8_dmak1112_ferrys#mbta', {prov.model.PROV_LABEL:'mbta', prov.model.PROV_TYPE:'ont:DataSet'})
geocode_locations = doc.entity('geocode:json', {'prov:label':'Google Geocode API', prov.model.PROV_TYPE:'ont:DataResource'})
get_mbta_dist = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)
doc.wasAssociatedWith(get_mbta_dist, this_script)
doc.usage(get_mbta_dist, licenses, startTime, None, {prov.model.PROV_TYPE: 'ont:Computation'})
doc.usage(get_mbta_dist, mbta_stops, startTime, None, {prov.model.PROV_TYPE: 'ont:Computation'})
doc.usage(get_mbta_dist, geocode_locations, startTime, None,
{prov.model.PROV_TYPE:'ont:Retrieval', 'ont:Query':'?address=$&key=$'})
closest_mbta_stops = doc.entity('dat:aoconno8_dmak1112_ferrys#closest_mbta_stops', {prov.model.PROV_LABEL: 'Alcohol Licenses and MBTA Stop Locations', prov.model.PROV_TYPE: 'ont:DataSet'})
doc.wasAttributedTo(closest_mbta_stops, this_script)
doc.wasGeneratedBy(closest_mbta_stops, get_mbta_dist, endTime)
doc.wasDerivedFrom(closest_mbta_stops, licenses, get_mbta_dist, get_mbta_dist, get_mbta_dist)
doc.wasDerivedFrom(closest_mbta_stops, mbta_stops, get_mbta_dist, get_mbta_dist, get_mbta_dist)
doc.wasDerivedFrom(closest_mbta_stops, geocode_locations, get_mbta_dist, get_mbta_dist, get_mbta_dist)
repo.logout()
return doc
def project(R, p):
return [p(t) for t in R]
#getClosestMBTAStops.execute()
#doc = getClosestMBTAStops.provenance()
#print(doc.get_provn())
#print(json.dumps(json.loads(doc.serialize()), indent=4))
## eof
| UTF-8 | Python | false | false | 7,521 | py | 153 | getClosestMBTAStops.py | 102 | 0.606568 | 0.5833 | 0 | 159 | 46.295597 | 208 |
614674490/cloud_robotics_win10 | 11,690,900,996,189 | dbb57fffd274e57fefd39648deedcd3cac47d7b5 | ea96cef0945cfe18fb5596748c0b9009d72b95d2 | /tfenvs/tftest.py | a274e2c09c70eff95e0364e8387e28291d720a21 | []
| no_license | https://github.com/614674490/cloud_robotics_win10 | 04fb0f473af6d24270e94615782b3409896ba8eb | f9d3e27005d9f757f57de08cb46a60b7dab619d5 | refs/heads/main | 2023-06-29T12:16:26.756606 | 2021-07-22T04:48:56 | 2021-07-22T04:48:56 | 388,008,476 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as if
message = tf.constant('Welcome to the exciting world of Deep Neural Networks!')
#执行计算图
with tf.Session() as sess:
print(sess.run(message).decode())
| UTF-8 | Python | false | false | 186 | py | 61 | tftest.py | 20 | 0.744318 | 0.744318 | 0 | 5 | 34.2 | 79 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.