repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
zhaopingsun/RadarGUI | 10,943,576,716,043 | 42f0f788efd42236758e6a1a2946d1d92bcee6c9 | 06e877c541be92a0ecaeb1ece7fd8556ad99a664 | /PyRadar.py | da329631a234e47e5d424fca80add1f627839d29 | [] | no_license | https://github.com/zhaopingsun/RadarGUI | ded352e4e000dd0c3d3e2fdaac48b3f4221c5aba | 06fbf90aa89f3ed8325337248268859b76634973 | refs/heads/master | 2020-04-08T11:17:47.778466 | 2018-08-01T22:56:56 | 2018-08-01T22:56:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from mayavi import mlab
class Radar:
def __init__(self, path, name):
self.AbsolutePath = path + name
file = open(self.AbsolutePath, 'rb')
file.seek(0)
self.RawData = np.array([int(i) for i in file.read()])
self.Name = name[:-4]
self.Count = int(len(self.RawData) / 2432)
self.RawArray = self.RawData.reshape(self.Count, 2432)
self.NumberOfElevation = [self.RawArray[i][44] + self.RawArray[i][45] * 256 for i in range(0, self.Count)] # 层数
self.StartOfReflectivity = [self.RawArray[i][46] + self.RawArray[i][47] * 256 for i in
range(0, self.Count)] # 起始距离
self.StartOfSpeed = [self.RawArray[i][48] + self.RawArray[i][49] * 256 for i in range(0, self.Count)]
self.StepOfReflectivity = [self.RawArray[i][50] + self.RawArray[i][51] * 256 for i in
range(0, self.Count)] # 库长
self.StepOfSpeed = [self.RawArray[i][52] + self.RawArray[i][53] * 256 for i in range(0, self.Count)]
self.NumberOfReflectivity = [self.RawArray[i][54] + self.RawArray[i][55] * 256 for i in
range(0, self.Count)] # 库数
self.NumberOfSpeed = [self.RawArray[i][56] + self.RawArray[i][57] * 256 for i in range(0, self.Count)]
self.PointerOfReflectivity = [self.RawArray[i][64] + self.RawArray[i][65] * 256 for i in
range(0, self.Count)] # 数据位置指针
self.PointerOfSpeed = [self.RawArray[i][66] + self.RawArray[i][67] * 256 for i in range(0, self.Count)]
self.PointerOfSpectralWidth = [self.RawArray[i][66] + self.RawArray[i][67] * 256 for i in range(0, self.Count)]
self.ResolutionOfSpeed = [self.RawArray[i][70] + self.RawArray[i][71] * 256 for i in
range(0, self.Count)] # 速度分辨率
self.vcp = [self.RawArray[i][72] + self.RawArray[i][73] * 256 for i in
range(0, self.Count)] # 11:降水,16;21:降水,14;31:晴空,8;32:晴空,7.
self.Elevation = [(self.RawArray[i][42] + 256 * self.RawArray[i][43]) / 8 * 180 / 4096 for i in
range(0, self.Count)] # 仰角
self.Azimuth = [(self.RawArray[i][36] + 256 * self.RawArray[i][37]) / 8 * 180 / 4096 for i in
range(0, self.Count)] # 方位角
self.Storage = self.getStorage()
self.AllInfo = self.getAllInfo()
self.x, self.y, self.z, self.r = self.getXyzr()
self.AllInfo = self.getAllInfo()
self.space_info = self.get_space_info()
self.elevation_list = self.get_elevation_list()
def getStorage(self):
Storage = [[
[0, 0, [], []], # 反射率,距离
[0, 0, [], []], # 速度,距离
[0, 0, [], []] # 谱宽,距离
] for i in range(0, self.Count)]
for i in range(0, self.Count):
Storage[i][0][0] = self.Elevation[i]
Storage[i][1][0] = self.Elevation[i]
Storage[i][2][0] = self.Elevation[i]
Storage[i][0][1] = self.Azimuth[i]
Storage[i][1][1] = self.Azimuth[i]
Storage[i][2][1] = self.Azimuth[i]
for j in range(0, self.NumberOfReflectivity[i]):
if self.RawArray[i][self.PointerOfReflectivity[i] + j] != 0 and self.RawArray[i][
self.PointerOfReflectivity[i] + j] != 1 and (
self.RawArray[i][self.PointerOfReflectivity[i] + j] - 2) / 2 - 32 >= 0:
Storage[i][0][2].append((self.RawArray[i][self.PointerOfReflectivity[i] + j] - 2) / 2 - 32)
else:
Storage[i][0][2].append(0)
Storage[i][0][3].append(self.StartOfReflectivity[i] + j * self.StepOfReflectivity[i])
for j in range(0, self.NumberOfSpeed[i]):
if self.ResolutionOfSpeed[i] == 2:
if self.RawArray[i][self.PointerOfSpeed[i] + j] != 0 and self.RawArray[i][
self.PointerOfSpeed[i] + j]:
Storage[i][1][2].append((self.RawArray[i][self.PointerOfSpeed[i] + j] - 2) / 2 - 63.5)
else:
Storage[i][1][2].append(0)
if self.ResolutionOfSpeed[i] == 4:
if self.RawArray[i][self.PointerOfSpeed[i] + j] != 0 and self.RawArray[i][
self.PointerOfSpeed[i] + j]:
Storage[i][1][2].append(self.RawArray[i][self.PointerOfSpeed[i] + j] - 2 - 127)
else:
Storage[i][1][2].append(0)
Storage[i][1][3].append(self.StartOfSpeed[i] + j * self.StepOfSpeed[i])
for j in range(0, self.NumberOfSpeed[i]):
if self.RawArray[i][self.PointerOfSpectralWidth[i] + j] != 0 and self.RawArray[i][
self.PointerOfSpectralWidth[i] + j] != 1:
Storage[i][2][2].append((self.RawArray[i][self.PointerOfSpectralWidth[i] + j] - 2) / 2 - 63.5)
else:
Storage[i][2][2].append(0)
Storage[i][2][3].append(self.StartOfSpeed[i] + j * self.StepOfSpeed[i])
return Storage
def get_space_info(self):
AllInfo_ = [[], [], [], []] # 仰角 方位角 距离 反射率
for i in self.Storage:
for j in range(0, int(len(i[0][2]))):
if 1: # 剔除反射率零点,以[0,0,0,0]代替以不影响矩阵形状 i[0][2][j] > 0
AllInfo_[0].append(i[0][0]) # 仰角
AllInfo_[1].append(i[0][1]) # 方位角
AllInfo_[3].append(i[0][2][j]) # 反射率因子
AllInfo_[2].append(i[0][3][j]) # 距离
AllInfo_[0].append(0)
AllInfo_[1].append(0)
AllInfo_[2].append(0)
AllInfo_[3].append(75)
while (len(AllInfo_[0])) % 460 != 0: # 标准化为460倍数(补[0,0,0,0]法)
AllInfo_[0].append(0)
AllInfo_[1].append(0)
AllInfo_[2].append(0)
AllInfo_[3].append(0)
return AllInfo_
def getAllInfo(self):
AllInfo_ = [[], [], [], []] # 仰角 方位角 距离 反射率
for i in self.Storage:
# if i[0][0] <= 1 and i[0][0] >= 0: # 设定仰角范围
if 1:
for j in range(0, int(len(i[0][2]))):
if 1: # 剔除反射率零点,以[0,0,0,0]代替以不影响矩阵形状 i[0][2][j] > 0
AllInfo_[0].append(i[0][0]) # 仰角
AllInfo_[1].append(i[0][1]) # 方位角
AllInfo_[3].append(i[0][2][j]) # 反射率因子
AllInfo_[2].append(i[0][3][j]) # 距离
AllInfo_[0].append(0)
AllInfo_[1].append(0)
AllInfo_[2].append(0)
AllInfo_[3].append(75)
while (len(AllInfo_[0])) % 460 != 0: # 标准化为460倍数(补[0,0,0,0]法)
AllInfo_[0].append(0)
AllInfo_[1].append(0)
AllInfo_[2].append(0)
AllInfo_[3].append(0)
return AllInfo_
def getXyzr(self):
Info_1 = np.array(self.AllInfo)
x = Info_1[2] * np.cos(np.deg2rad(Info_1[0])) * np.cos(np.deg2rad(Info_1[1]))
y = Info_1[2] * np.cos(np.deg2rad(Info_1[0])) * np.sin(np.deg2rad(Info_1[1]))
z = Info_1[2] * np.sin(np.deg2rad(Info_1[0]))
r = Info_1[3]
return x, y, z, r
def draw(self):
x, y, z, r = self.x, self.y, self.z, self.r
plt.style.use('dark_background')
plt.subplot(1, 1, 1)
plt.title(self.Name)
plt.contourf(x.reshape(int(len(x) / 460), 460), y.reshape(int(len(y) / 460), 460),
r.reshape(int(len(z) / 460), 460), cmap='jet') # contourf jet gray
plt.colorbar()
plt.savefig('C:/data/gui/temp/' + self.Name, dpi=300)
plt.close()
def grey(self):
x, y, z, r = self.x, self.y, self.z, self.r
plt.style.use('dark_background')
plt.subplot(1, 1, 1)
plt.title(self.Name)
plt.contourf(x.reshape(int(len(x) / 460), 460), y.reshape(int(len(y) / 460), 460),
r.reshape(int(len(z) / 460), 460), cmap='gist_gray') # contourf jet gray
plt.colorbar()
plt.savefig('C:/data/img/Z9592' + self.Name, dpi=300)
plt.close()
def get_elevation_list(self):
if self.vcp[0] == 11:
return [0.5, 1.45, 2.4, 3.35, 4.3, 5.2, 6.2, 7.5, 8.7, 10.0, 12.0, 14.0, 16.7, 19.5]
if self.vcp[0] == 12:
return [0.5, 0.9, 1.3, 1.8, 2.4, 3.1, 4.0, 5.1, 6.4, 8.0, 10.0, 12.5, 15.6, 19.5]
if self.vcp[0] == 21:
return [0.5, 1.45, 2.4, 3.35, 4.3, 6.0, 9.9, 14.6, 19.5]
if self.vcp[0] == 31:
return [0.5, 1.5, 2.5, 3.5, 3.5]
# 按仰角绘制PPI
def ppi(self, elevation):
AllInfo = [[], [], [], []] # 仰角 方位角 距离 反射率
for i in self.Storage:
if elevation - 0.5 <= i[0][0] <= elevation + 0.5: # 设定仰角范围
for j in range(0, int(len(i[0][2]))):
if 1: # 剔除反射率零点,以[0,0,0,0]代替以不影响矩阵形状 i[0][2][j] > 0
AllInfo[0].append(i[0][0]) # 仰角
# print(i[0][0])
AllInfo[1].append(i[0][1]) # 方位角
AllInfo[3].append(i[0][2][j]) # 反射率因子
AllInfo[2].append(i[0][3][j]) # 距离
AllInfo[0].append(0)
AllInfo[1].append(0)
AllInfo[2].append(0)
AllInfo[3].append(75)
while (len(AllInfo[0])) % 460 != 0: # 标准化为460倍数(补[0,0,0,0]法)
AllInfo[0].append(0)
AllInfo[1].append(0)
AllInfo[2].append(0)
AllInfo[3].append(0)
Info_1 = np.array(AllInfo)
x = Info_1[2] * np.cos(np.deg2rad(Info_1[0])) * np.cos(np.deg2rad(Info_1[1]))
y = Info_1[2] * np.cos(np.deg2rad(Info_1[0])) * np.sin(np.deg2rad(Info_1[1]))
z = Info_1[2] * np.sin(np.deg2rad(Info_1[0]))
r = Info_1[3]
plt.style.use('dark_background')
plt.subplot(1, 1, 1)
plt.title(self.Name)
plt.tricontourf(x, y, r, cmap='jet') # contourf jet gray
plt.colorbar()
plt.savefig('C:/data/gui/temp/ppi_ref/' + self.Name + '_ppi_' + str(elevation) + '.png', dpi=300)
plt.close()
def rhi(self, azimuth):
AllInfo = [[], [], [], []] # 仰角 方位角 距离 反射率
for i in self.Storage:
if azimuth - 0.5 <= i[0][1] <= azimuth + 0.5: # 设定仰角范围
for j in range(0, int(len(i[0][2]))):
if 1: # 剔除反射率零点,以[0,0,0,0]代替以不影响矩阵形状 i[0][2][j] > 0
AllInfo[0].append(i[0][0]) # 仰角
# print(i[0][0])
AllInfo[1].append(i[0][1]) # 方位角
AllInfo[3].append(i[0][2][j]) # 反射率因子
AllInfo[2].append(i[0][3][j]) # 距离
AllInfo[0].append(0)
AllInfo[1].append(0)
AllInfo[2].append(0)
AllInfo[3].append(75)
while (len(AllInfo[0])) % 460 != 0: # 标准化为460倍数(补[0,0,0,0]法)
AllInfo[0].append(0)
AllInfo[1].append(0)
AllInfo[2].append(0)
AllInfo[3].append(0)
Info_1 = np.array(AllInfo)
y = Info_1[2] * np.cos(np.deg2rad(Info_1[0]))
z = Info_1[2] * np.sin(np.deg2rad(Info_1[0]))
r = Info_1[3]
plt.style.use('dark_background')
plt.subplot(1, 1, 1)
plt.title(self.Name)
plt.tricontourf(y, z, r, cmap='jet') # contourf jet gray
plt.colorbar()
plt.savefig('C:/data/gui/temp/rhi_ref/' + self.Name + '_rhi_' + str(azimuth) + '.png', dpi=300)
plt.close()
def points(self):
x = []
y = []
z = []
r = []
for i in range(len(self.r)):
if 70 > self.r[i] > 0:
x.append(np.sqrt(self.x[i]))
y.append(np.sqrt(self.y[i]))
z.append(np.sqrt(self.z[i]))
r.append(self.r[i])
points = mlab.points3d(x, y, z, r, colormap='jet', scale_factor=.25)
mlab.show()
# 0.5° 仰角速绘
def ppi(absolute_path):
Name = absolute_path[-46:-4]
file = open(absolute_path, 'rb')
file.seek(0)
RawData = np.array([int(i) for i in file.read()])
Count = int(len(RawData) / 2432)
RawArray = RawData.reshape(Count, 2432)
Elevation = [(RawArray[i][42] + 256 * RawArray[i][43]) / 8 * 180 / 4096 for i in range(0, Count)] # 仰角
Azimuth = [(RawArray[i][36] + 256 * RawArray[i][37]) / 8 * 180 / 4096 for i in range(0, Count)] # 方位角
PointerOfReflectivity = [RawArray[i][64] + RawArray[i][65] * 256 for i in range(0, Count)] # 数据位置指针
StartOfReflectivity = [RawArray[i][46] + RawArray[i][47] * 256 for i in range(0, Count)] # 起始距离
StepOfReflectivity = [RawArray[i][50] + RawArray[i][51] * 256 for i in range(0, Count)] # 库长
AllInfo = [[], [], [], []] # 仰角 方位角 反射率 距离
NumberOfReflectivity = []
for i in range(Count):
if 0 < Elevation[i] < 1:
NumberOfReflectivity = int(RawArray[i][54] + RawArray[i][55] * 256)
for j in range(NumberOfReflectivity):
AllInfo[0].append(Elevation[i])
AllInfo[1].append(Azimuth[i])
reflectivity = (RawArray[i][PointerOfReflectivity[i] + j] - 2) / 2 - 32
if reflectivity != 0 and reflectivity != 1 and reflectivity >= 0:
AllInfo[2].append(reflectivity)
else:
AllInfo[2].append(0)
AllInfo[3].append(StartOfReflectivity[i] + j * StepOfReflectivity[i])
AllInfo[0].append(0)
AllInfo[1].append(0)
AllInfo[2].append(75)
AllInfo[3].append(0)
while (len(AllInfo[0])) % 460 != 0: # 标准化为460倍数(补[0,0,0,0]法)
AllInfo[0].append(0)
AllInfo[1].append(0)
AllInfo[2].append(0)
AllInfo[3].append(0)
Info_1 = np.array(AllInfo)
x = Info_1[3] * np.cos(np.deg2rad(Info_1[0])) * np.cos(np.deg2rad(Info_1[1]))
y = Info_1[3] * np.cos(np.deg2rad(Info_1[0])) * np.sin(np.deg2rad(Info_1[1]))
r = Info_1[2]
plt.style.use('dark_background')
plt.subplot(1, 1, 1)
plt.title(Name)
plt.contourf(x.reshape(int(len(x) / 460), 460), y.reshape(int(len(y) / 460), 460),
r.reshape(int(len(r) / 460), 460), cmap='jet') # contourf jet gray
plt.colorbar()
# plt.show()
plt.savefig('C:/data/gui/temp/animation/' + Name, dpi=300)
plt.close()
def test(absolute_path):
Name = absolute_path[-46:-4]
file = open(absolute_path, 'rb')
file.seek(0)
RawData = np.array([int(i) for i in file.read()])
Count = int(len(RawData) / 2432)
RawArray = RawData.reshape(Count, 2432)
Elevation = [(RawArray[i][42] + 256 * RawArray[i][43]) / 8 * 180 / 4096 for i in range(0, Count)] # 仰角
Azimuth = [(RawArray[i][36] + 256 * RawArray[i][37]) / 8 * 180 / 4096 for i in range(0, Count)] # 方位角
PointerOfReflectivity = [RawArray[i][64] + RawArray[i][65] * 256 for i in range(0, Count)] # 数据位置指针
StartOfReflectivity = [RawArray[i][46] + RawArray[i][47] * 256 for i in range(0, Count)] # 起始距离
StepOfReflectivity = [RawArray[i][50] + RawArray[i][51] * 256 for i in range(0, Count)] # 库长
AllInfo = [[], [], [], []] # 仰角 方位角 反射率 距离
NumberOfReflectivity = []
for i in range(Count):
if 5 < Elevation[i] < 8:
print(Elevation[i])
NumberOfReflectivity = int(RawArray[i][54] + RawArray[i][55] * 256)
for j in range(NumberOfReflectivity):
AllInfo[0].append(Elevation[i])
AllInfo[1].append(Azimuth[i])
reflectivity = (RawArray[i][PointerOfReflectivity[i] + j] - 2) / 2 - 32
if reflectivity != 0 and reflectivity != 1 and reflectivity >= 0:
AllInfo[2].append(reflectivity)
else:
AllInfo[2].append(0)
AllInfo[3].append(StartOfReflectivity[i] + j * StepOfReflectivity[i])
AllInfo[0].append(0)
AllInfo[1].append(0)
AllInfo[2].append(75)
AllInfo[3].append(0)
while (len(AllInfo[0])) % 460 != 0: # 标准化为460倍数(补[0,0,0,0]法)
AllInfo[0].append(0)
AllInfo[1].append(0)
AllInfo[2].append(0)
AllInfo[3].append(0)
Info_1 = np.array(AllInfo)
x = Info_1[3] * np.cos(np.deg2rad(Info_1[0])) * np.cos(np.deg2rad(Info_1[1]))
y = Info_1[3] * np.cos(np.deg2rad(Info_1[0])) * np.sin(np.deg2rad(Info_1[1]))
r = Info_1[2]
plt.style.use('dark_background')
plt.subplot(1, 1, 1)
plt.title(Name)
plt.tricontourf(x, y, r, cmap='jet') # tripcolor
plt.colorbar()
# plt.show()
plt.savefig('C:/data/gui/temp/animation/' + Name, dpi=300)
plt.close()
| UTF-8 | Python | false | false | 17,434 | py | 4 | PyRadar.py | 2 | 0.50957 | 0.448791 | 0 | 359 | 45.420613 | 120 |
Faiyaz42/Resume-Projects | 7,799,660,656,349 | af0031013a45d2e26b944075ed9f751ef90151d1 | f07914af8d8abef96f851691477cd242db495434 | /Hangman/WordGuess.py | 0e2401f6e9a88534d4913dab5957132b70bc2324 | [
"Apache-2.0"
] | permissive | https://github.com/Faiyaz42/Resume-Projects | a126e9cd8e1c9859689426076dd6bae21f54c846 | 9d6e5597aba6672ad7bd20d02e4b7cf7d4654fcd | refs/heads/main | 2023-03-28T17:38:37.819818 | 2021-03-30T18:54:16 | 2021-03-30T18:54:16 | 330,059,753 | 1 | 1 | null | false | 2021-02-13T23:29:28 | 2021-01-16T01:27:21 | 2021-02-07T17:56:35 | 2021-02-13T23:29:28 | 38,896 | 0 | 0 | 0 | Python | false | false | import random
from SecretWord import SecretWord
class WordGuess:
def __init__(self, wordDic):
self.words_dict = wordDic
self.guess_words = [] #constructor,initiation
self.guesses = 0
self.random_word = ''
self.current_guess = ''
def play(self):
""" Plays out a single full game of Word Guess """ #play game
self.random_word = self.chooseSecretWord() #choose random word
print('A secret word has been randomly chosen!')
acontainer = SecretWord() #container(instance) to hold random word
sorted_container = SecretWord() #sorted container or instance
acontainer.setWord(self.random_word) #make linked list
sorted_container.setWord(self.random_word) # '' ''
string1 = str(acontainer) #str of original random word
string2 = sorted_container.sort()
string2 = str(sorted_container) #str of sorted word
find_distance = self.editDistance(string1,string2,len(string1),len(string2)) #find edit distance
alloted_guesses = 2*find_distance #find the alloted number of guesse (*2)
if alloted_guesses < 5:
alloted_guesses = 5
elif alloted_guesses > 15:
alloted_guesses = 15
self.guesses = int(alloted_guesses)
while self.guesses > 0 and acontainer.isSolved() == False:
print('You have %d guesses remaining' % (self.guesses)) #if user hasnt guessed it yet loop
acontainer.printProgress() #print progress
self.getGuess() #get guess
acontainer.update(self.current_guess) #update
if self.current_guess not in string1 and self.current_guess != '*': #if wrong guess
self.guesses = self.guesses - 1 #deduct
if self.guesses > 0 and acontainer.isSolved() == True: #if successfully solved
print('You solved the puzzle!')
print('The secret word was: %s ' % (str(acontainer)))
elif self.guesses == 0: #if failed
print('You have run out of guesses\nGame Over')
print('The secret word was: %s ' % (str(acontainer)))
self.guess_words = []
self.guesses = 0 #reset
self.random_word = ''
self.current_guess = ''
def chooseSecretWord(self):
""" Chooses the secret word that will be guessed """ #choose a random word from the dict
item = random.choice(list(self.words_dict))
return str(item )
def editDistance(self, s1, s2,length1,length2): # edit distance with length1 of string1 and length2 of string 2 ,for later recursion
""" Recursively returns the total number of insertions and deletions required to convert S1 into S2 """
if length1 == 0: #if first string is empty,return second string value since its being totally transferred
return length2
if length2 == 0: #vice versa
return length1
if s1[length1-1]==s2[length2-1]:
return self.editDistance(s1,s2,length1-1,length2-1)
#recursively find the distance for eahc operation and find the minimum
return 1 + min(self.editDistance(s1, s2, length1, length2-1), # Insert
self.editDistance(s1, s2, length1-1, length2)) # Remove
def getGuess(self):
""" Queries the user to guess a character in the secret word """
ask = True #ask loop
while ask:
user_input = input('Enter a character that has not been guessed or * for a hint: ')
self.current_guess = str(user_input)
if user_input == '*': #if asked for hint
hint = self.words_dict[self.random_word]
print('Hint: %s' % (hint)) #show hint and deduct 1
self.guesses = self.guesses - 1
ask = False
elif self.current_guess not in self.guess_words: #if guess is not repeated
self.guess_words.append(self.current_guess) #add to guesses list
ask = False
elif user_input in self.guess_words: #if guess is repeated
print('Invalid guess. You have already guessed this letter.')
| UTF-8 | Python | false | false | 5,378 | py | 42 | WordGuess.py | 13 | 0.477501 | 0.465787 | 0 | 91 | 57.967033 | 165 |
kingbar1990/labs | 7,533,372,671,335 | 3d5177889b1d9e13f27a17e566775ede24106596 | 3bd7c46a7bb2da9bfaf4a7dc3e34591e54211f8d | /Lab2.py | 55b5377421becfa51aeaa42269bbd6a7c1673779 | [] | no_license | https://github.com/kingbar1990/labs | a5d997aa06c59a608e1913990abf1c87b70311b8 | c1b4a55b76fbe1d927fd3ac7f779a5cb8e4ac681 | refs/heads/master | 2020-04-06T18:43:15.427704 | 2018-10-24T16:30:59 | 2018-10-24T16:30:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from math import exp, cos, sin, pi
import random
import time
import sys
n = int(raw_input('Enter n(10000): ') or 10000)
mu = float(raw_input('Enter mu(5): ') or 5)
k = float(raw_input('Enter k(5): ') or 5)
m = 50
maximum = 0
def fx(x):
return (1 - x) * x
def fy(y, mu):
return exp(-mu * y)
def fz(z, k):
return 1 + cos(2 * pi * k * z)
def func(x, y, z, mu, k):
return fx(x) * fy(y, mu) * fz(z, k)
def analytical():
"""Analytical method"""
i_x = 1 / 6.0
i_y = (-1 / float(mu)) * (exp(-mu) - 1)
i_z = (1 + 1/(2*pi*k) * sin(2*pi*k))
print '\n' + "Analytical value: {}".format(i_x * i_y * i_z)
def rectangle():
"""Rectangle method"""
start = time.time()
h = 1/float(n)
x, y, z, i_x, i_y, i_z = h/2, h/2, h/2, 0, 0, 0
for _ in xrange(n):
i_x += fx(x)
x += h
i_y += fy(y, mu)
y += h
i_z += fz(z, k)
z += h
delta_time = time.time() - start
print '\n' + "Rectangle value: {}".format((i_x * h) * (i_y * h) * (i_z * h)) +\
'\n' + "time processing: {}".format(delta_time)
def simple():
"""Simple calculation of integral"""
start = time.time()
arr = [0]*m
mi, d, s = 0, 0, 0
for i in xrange(m):
for _ in xrange(n):
s += func(random.random(), random.random(), random.random(), mu, k)
arr[i] = s/n
mi += arr[i]
s = 0
for j in xrange(m):
d += (arr[j] - mi/m)**2
delta_time = time.time() - start
print '\n' + "Simple method: {}".format(mi / m) + \
'\n' + "time processing: {}".format(delta_time) + \
'\n' + "dispersion: {}".format(d / m) + \
'\n' + "laboriousness: {}".format(d / m * delta_time)
def find_min_max():
"""Finding function maximum"""
def f(x, y, z):
return (1 - x) * x * exp(-mu * y) * (1 + cos(2 * pi * k * z))
max_func = - sys.maxint - 1
min_func = sys.maxint
maximal_x, maximal_y, maximal_z = None, None, None
minimal_x, minimal_y, minimal_z = None, None, None
for i in xrange(1000000):
randx, randy, randz = random.random(), random.random(), random.random()
result = f(randx, randy, randz)
max_func = max(max_func, result)
if max_func == result:
maximal_x, maximal_y, maximal_z = randx, randy, randz
min_func = min(min_func, result)
if min_func == result:
minimal_x, minimal_y, minimal_z = randx, randy, randz
global maximum
maximum = max_func
print '\n' + "Maximal (x, y):", (maximal_x, maximal_y, maximal_z)
print "Max func value:", max_func, '\n'
print "Minimal (x, y):", (minimal_x, minimal_y, minimal_z)
print "Min func value:", min_func
def neyman():
"""Neyman calculation of integral"""
start = time.time()
arr = [0]*m
mi, d, s = 0, 0, 0
for i in xrange(m):
for _ in xrange(n):
if func(random.random(), random.random(), random.random(), mu, k) > random.random()*maximum:
s += 1
arr[i] = (s/float(n))*maximum*1
mi += arr[i]
s = 0
for j in xrange(m):
d += (arr[j] - mi/m)**2
delta_time = time.time() - start
print '\n' + "Neyman method: {}".format(mi / m) + \
'\n' + "time processing: {}".format(delta_time) + \
'\n' + "dispersion: {}".format(d / m) + \
'\n' + "laboriousness: {}".format(d / m * delta_time)
analytical(), rectangle(), simple(), find_min_max(), neyman()
| UTF-8 | Python | false | false | 3,501 | py | 4 | Lab2.py | 3 | 0.502142 | 0.484719 | 0 | 126 | 26.785714 | 104 |
ralf-meyer/RLSVRD | 19,353,122,663,798 | 66e48d9b66db49e44b7bbc54184956f1bdac321e | 6f81cc8e67475b23b5b343dc14c55b6227ee49c8 | /__init__.py | 68a9d0e4d6a382c68824b2ef371f961a8601c04d | [] | no_license | https://github.com/ralf-meyer/RLSVRD | 232778c185c3e460f81f5c0c7f97512b5ef19026 | 10ce8567ec521c71d8ab4dcdc5685ef1840b6c4d | refs/heads/master | 2021-04-27T17:56:12.614967 | 2018-05-15T14:23:47 | 2018-05-15T14:23:47 | 122,330,560 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from RLSVRD import RLSVRD
from IRWLS_SVR import IRWLS_SVR
| UTF-8 | Python | false | false | 58 | py | 6 | __init__.py | 4 | 0.827586 | 0.827586 | 0 | 2 | 28 | 31 |
LouPlus/jobplus3-12 | 17,257,178,633,714 | b69c31159bb3e39526506030e8808f92138a6890 | 556b48bb805a1be3609c844fc4a251b83a893817 | /app/config.py | 29fc0fd77ff5e0babf04fd2b018879bf9b35a500 | [] | no_license | https://github.com/LouPlus/jobplus3-12 | 3a48fea7001dda32f2756113b51215c1e14f9a9f | bce7eec354187d0ce69621d5cb16cd8a3012e00d | refs/heads/master | 2021-05-13T20:43:28.162737 | 2018-01-30T16:20:57 | 2018-01-30T16:20:57 | 116,917,555 | 0 | 3 | null | false | 2018-01-30T16:20:58 | 2018-01-10T06:29:39 | 2018-01-15T14:26:06 | 2018-01-30T16:20:58 | 28 | 0 | 3 | 2 | Python | false | null | # 域名 和 端口 可根据个人环境改变
DOMAIN_NAME = 'root@localhost'
PORT = '3306'
class BaseConfig(object):
SECRET_KEY = 'wubba lubba dub dub'
INDEX_PER_PAGE = 9
class DevelopementConfig(BaseConfig):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://' + DOMAIN_NAME + ':' + PORT + '/jobplus?charset=utf8'
class ProductionConfig(BaseConfig):
DEBUG = False
class TestingConfig(BaseConfig):
DEBUG = False
configs = {
'development':DevelopementConfig,
'production': ProductionConfig,
'testing': TestingConfig
} | UTF-8 | Python | false | false | 542 | py | 13 | config.py | 8 | 0.723735 | 0.712062 | 0 | 28 | 17.392857 | 98 |
seokzin/Algorithm_Python | 5,703,716,607,205 | 84b0a76f0a736c6b8ad362db4c4883575495f81a | d17522373f7c82d7f16807a34aa90369dca85621 | /Code/SWEA/1966-숫자를 정렬하자.py | 814e0e7b0bbf90da78eec681270da22c742d7358 | [] | no_license | https://github.com/seokzin/Algorithm_Python | 30f15d120a73132ebfc0d55629eab6db2e24eaec | 8a6dbe19dd3613fa2e5db544db69183053dc1f5b | refs/heads/master | 2022-11-15T00:18:11.063138 | 2021-12-27T16:52:30 | 2021-12-27T16:56:20 | 276,327,838 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def selection_sort(s):
if s:
x = min(s)
s.remove(x)
return [x] + selection_sort(s)
else:
return []
for tc in range(1, int(input())+1):
n = int(input())
arr = list(map(int, input().split()))
print(f'#{tc}', *selection_sort(arr))
# 재귀적 선택정렬 직접 구현해봄 | UTF-8 | Python | false | false | 328 | py | 356 | 1966-숫자를 정렬하자.py | 355 | 0.513245 | 0.506623 | 0 | 16 | 17.9375 | 41 |
FabienCharmet/ImpactEval | 807,453,879,054 | 18ab0b5080dff7c527a56a46afc93be683f44700 | ab551cad61f922203918367b681afa3624ac4af5 | /ImpactEval.py | 84c1e818c1ee954b69256557df07576a87847cab | [] | no_license | https://github.com/FabienCharmet/ImpactEval | 115a1a7f6b845e801be66e72d69947be8c020f73 | e976c1acd2bcf4159f9a9dd667f38b39745174f6 | refs/heads/master | 2023-01-15T18:35:46.226873 | 2020-11-21T08:46:13 | 2020-11-21T08:46:13 | 297,141,005 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Éditeur de Spyder
"""
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import itertools
import random
G=nx.DiGraph()
Gfunc=nx.DiGraph()
"""
IMPORT FUNCTION OF THE RESOURCE GRAPH
"""
# Rarray = [[0,1,0.1],[1,0,0.7],[0,2,0.4],[2,0,0.1],[0,3,0.2],[3,0,0.1],
# [1,3,0.6],[3,1,0.9],[2,3,0.9],[3,2,0.5]]
# SEarray = [[4,1,0.3],[5,2,0.9]]
# BFarray = [[0,6,0.9],[0,7,0.1],[1,6,0.6],[6,8,0.8],[7,8,0.7]]
Rarray = [[0,1,0.8],[1,0,0.6]]
SEarray = [[2,0,0.8],[3,0,0.8]]
BFarray = [[0,4,0.7],[1,4,0.6]]
Rnodes = set()
for g in Rarray:
Rnodes.add(g[0])
SEnodes = set()
for se in SEarray:
SEnodes.add(se[0])
# Calculating which nodes are Business Resource nodes
# i.e. nodes from Rarray (resource graph) connected to BFarray (business graph)
set_infranodes = set((x[0] for x in Rarray)).union((x[1] for x in Rarray))
BRnodes = set_infranodes.intersection((x[0] for x in BFarray))
# print(BRnodes)
BFnodes = set()
for bf in BFarray:
BFnodes.add(bf[1])
BFnodes = sorted(BFnodes)
Gtemp=Rarray
Gtemp+=SEarray
Gtemp+=BFarray
# print(Gnodes)
# print(SEnodes)
# print(BFnodes)
# print(BRnodes)
for a in Gtemp:
G.add_edge(a[0],a[1],weight=a[2])
for a in BFarray:
Gfunc.add_edge(a[0],a[1],weight=a[2])
"""
IMPORT FUNCTION OF THE TRANSITION MATRICES
"""
# np.random.seed()
"""
COMPUTING THE IMPACT ON A TARGET NODE
"""
def compute_impact_proba(ntimes):
counter_array=[0]*G.number_of_nodes()
# np.random.seed(42)
for i in range(ntimes):
var_sampling = []
for i in Gtemp:
var_sampling.append([i[0],i[1],np.random.rand()])
# print(var_sampling)
number_of_ticks=0
tick_array=[0]*G.number_of_nodes()
# Checking if business resources are impacted
for brsource in BRnodes:
varcont=True
# print("\n\n")
for sesource in SEnodes:
paths = nx.all_simple_paths(G, source=sesource, target=brsource)
pathlist = list(paths)
random.shuffle(pathlist)
# print(type(pathlist))
for p in pathlist:
# print(p)
ind=0
path_array=[0] * len(p)
path_array[0]=1
while(ind<len(p)-1):
proba = np.random.rand()
proba = (x for x in var_sampling if x[0]==p[ind] and x[1]==p[ind+1])
proba = list(proba)
proba = proba[0][2]
# print(G[p[ind]][p[ind+1]]["weight"])
# print(str(proba) + " " + str(G[p[ind]][p[ind+1]]["weight"]) + " " + str(proba<=G[p[ind]][p[ind+1]]["weight"]))
if(proba<=G[p[ind]][p[ind+1]]["weight"]):
path_array[ind+1]=1
# else:
# print(str(proba) + " " + str(G[p[ind]][p[ind+1]]["weight"]) + " " + str(proba<=G[p[ind]][p[ind+1]]["weight"]))
# print(ind)
# print(G[p[ind]][p[ind+1]]["weight"])
ind+=1
if(0 not in path_array):
# print(p)
# print(path_array)
# print("success for br: " + str(brsource) + " and se: " + str(sesource))
varcont=False
if(tick_array[brsource]==0):
# if(brsource==1):
# print(path_array)
tick_array[brsource]=1
counter_array[brsource]+=1
number_of_ticks+=1
break
else:
print("error")
if(varcont==False):
break
for bfsource in BFnodes:
varcont=True
for brsource in BRnodes:
if(tick_array[brsource]==1):
paths = nx.all_simple_paths(G, source=brsource, target=bfsource)
pathlist=list(paths)
random.shuffle(pathlist)
gen = (p for p in pathlist if len(p)==2)
# gen = (p for p in list(paths))
for p in gen:
ind=0
path_array=[0] * len(p)
path_array[0]=tick_array[brsource]
# print(p)
while(ind<len(p)-1):
proba = np.random.rand()
proba = (x for x in var_sampling if x[0]==p[ind] and x[1]==p[ind+1])
proba = list(proba)
proba = proba[0][2]
if(proba<=G[p[ind]][p[ind+1]]["weight"]):
path_array[ind+1]=1
# print(ind)
# print(G[p[ind]][p[ind+1]]["weight"])
ind+=1
if(0 not in path_array):
# print(path_array)
# print(p)
# print("success for bf: " + str(bfsource) + " and br: " + str(brsource))
varcont=False
if(tick_array[bfsource]==0):
tick_array[bfsource]=1
counter_array[bfsource]+=1
number_of_ticks+=1
else:
print("error")
break
if(varcont==False):
break
if(number_of_ticks>len(BFnodes + list(BRnodes))):
print("error")
for bftarget in BFnodes:
if(tick_array[bfsource]==1):
break
varcont=True
for bfsource in BFnodes:
if(tick_array[bfsource]==1) and (bfsource != bftarget):
paths = nx.all_simple_paths(G, source=bfsource, target=bftarget )
pathlist=list(paths)
random.shuffle(pathlist)
gen = (p for p in pathlist if len(p)==2)
for p in gen:
path_array=[0] * len(p)
path_array[0]=tick_array[bfsource]
# print(p)
proba = np.random.rand()
proba = (x for x in var_sampling if x[0]==p[ind] and x[1]==p[ind+1])
proba = list(proba)
proba = proba[0][2]
if(proba<=G[p[0]][p[1]]["weight"]):
path_array[1]=1
# print(ind)
# print(G[p[ind]][p[ind+1]]["weight"])
if(0 not in path_array):
# print(path_array)
# print(p)
# print("success for bf: " + str(bfsource) + " and br: " + str(brsource))
varcont=False
if(tick_array[bftarget]==0):
tick_array[bftarget]=1
counter_array[bftarget]+=1
number_of_ticks+=1
else:
print("error")
break
if(varcont==False):
break
proba_array = [x / ntimes for x in counter_array]
for i in SEnodes:
proba_array[i]=1.0
print(proba_array)
def verbose_compute_impact_proba():
ntimes=1
counter_array=[0]*G.number_of_nodes()
# np.random.seed(42)
print("Evaluating each random variable\n")
for i in range(ntimes):
var_sampling = []
for i in Gtemp:
var_sampling.append([i[0],i[1],np.random.rand()])
print("Current state: \n")
print(var_sampling)
# print(var_sampling)
number_of_ticks=0
tick_array=[0]*G.number_of_nodes()
# Checking if business resources are impacted
print("\nChecking if resource nodes are impacted\n")
for brsource in BRnodes:
print("Evaluating resource node: " + str(brsource) + "\n")
varcont=True
# print("\n\n")
for sesource in SEnodes:
print("Evaluating the impact of shock event " + str(sesource) + " on node " + str(brsource))
paths = nx.all_simple_paths(G, source=sesource, target=brsource)
pathlist = list(paths)
print("List of paths between shock event" + str(sesource)+ " and node " + str(brsource))
print(pathlist)
random.shuffle(pathlist)
# print(type(pathlist))
for p in pathlist:
print("\nEvaluating impact of SE: " + str(sesource) + " on node: " + str(brsource) + " via path: " + str(p))
# print(p)
ind=0
path_array=[0] * len(p)
path_array[0]=1
while(ind<len(p)-1):
proba = np.random.rand()
proba = (x for x in var_sampling if x[0]==p[ind] and x[1]==p[ind+1])
proba = list(proba)
proba = proba[0][2]
# print(G[p[ind]][p[ind+1]]["weight"])
# print(str(proba) + " " + str(G[p[ind]][p[ind+1]]["weight"]) + " " + str(proba<=G[p[ind]][p[ind+1]]["weight"]))
if(proba<=G[p[ind]][p[ind+1]]["weight"]):
path_array[ind+1]=1
# else:
# print(str(proba) + " " + str(G[p[ind]][p[ind+1]]["weight"]) + " " + str(proba<=G[p[ind]][p[ind+1]]["weight"]))
# print(ind)
# print(G[p[ind]][p[ind+1]]["weight"])
ind+=1
print("Instantiation of random variables in path: " + str(p))
print(path_array)
if(0 not in path_array):
print("SE: " + str(sesource) + " has impacted node: " + str(brsource) + ". No need for further checks.\n")
# print(p)
# print(path_array)
# print("success for br: " + str(brsource) + " and se: " + str(sesource))
varcont=False
if(tick_array[brsource]==0):
# if(brsource==1):
# print(path_array)
tick_array[brsource]=1
counter_array[brsource]+=1
number_of_ticks+=1
break
else:
print("error")
if(varcont==False):
break
print("SE: " + str(sesource) + " has not impacted node: " + str(brsource) + ". Continuing checks for next SE.\n")
for bfsource in BFnodes:
print("Evaluating Business Function node: " + str(bfsource) + "\n")
varcont=True
print("For each resource nodes connected to "+ str(bfsource))
for brsource in BRnodes:
if(tick_array[brsource]==1):
print("Evaluating impact of resource node " + str(brsource) + " on node " + str(bfsource))
paths = nx.all_simple_paths(G, source=brsource, target=bfsource)
pathlist=list(paths)
random.shuffle(pathlist)
gen = (p for p in pathlist if len(p)==2)
# gen = (p for p in list(paths))
for p in gen:
ind=0
path_array=[0] * len(p)
path_array[0]=tick_array[brsource]
# print(p)
while(ind<len(p)-1):
proba = np.random.rand()
proba = (x for x in var_sampling if x[0]==p[ind] and x[1]==p[ind+1])
proba = list(proba)
proba = proba[0][2]
if(proba<=G[p[ind]][p[ind+1]]["weight"]):
path_array[ind+1]=1
# print(ind)
# print(G[p[ind]][p[ind+1]]["weight"])
ind+=1
print("Instantiation of random variables in path: " + str(p))
print(path_array)
if(0 not in path_array):
print("Node " + str(brsource) + " has impacted node: " + str(bfsource) + ". No need for further checks.\n")
# print(path_array)
# print(p)
# print("success for bf: " + str(bfsource) + " and br: " + str(brsource))
varcont=False
if(tick_array[bfsource]==0):
tick_array[bfsource]=1
counter_array[bfsource]+=1
number_of_ticks+=1
else:
print("error")
break
if(varcont==False):
break
print("Node " + str(brsource) + " has not impacted node: " + str(bfsource) + ". Continuing checks for next SE.\n")
if(number_of_ticks>len(BFnodes + list(BRnodes))):
print("error")
for bftarget in BFnodes:
if(tick_array[bfsource]==1):
break
print("Evaluating Business Function node " + str(bftarget) + "\n")
varcont=True
for bfsource in BFnodes:
if(tick_array[bfsource]==1) and (bfsource != bftarget):
paths = nx.all_simple_paths(G, source=bfsource, target=bftarget )
pathlist=list(paths)
if(len(pathlist)>0):
print("Evaluating impact of business node " + str(bfsource) + " on node " + str(bftarget))
else:
print("There are no business function nodes impacting node " + str(bftarget))
break
gen = (p for p in pathlist if len(p)==2)
for p in gen:
print(p)
path_array=[0] * len(p)
path_array[0]=tick_array[bfsource]
# print(p)
proba = np.random.rand()
proba = (x for x in var_sampling if x[0]==p[ind] and x[1]==p[ind+1])
proba = list(proba)
proba = proba[0][2]
if(proba<=G[p[0]][p[1]]["weight"]):
path_array[1]=1
# print(ind)
# print(G[p[ind]][p[ind+1]]["weight"])
print("Instantiation of random variables in path: " + str(p))
print(path_array)
if(0 not in path_array):
print("BF: " + str(bfsource) + " has impacted node: " + str(bftarget) + ". No need for further checks.\n")
# print(path_array)
# print(p)
# print("success for bf: " + str(bfsource) + " and br: " + str(brsource))
varcont=False
if(tick_array[bftarget]==0):
tick_array[bftarget]=1
counter_array[bftarget]+=1
number_of_ticks+=1
else:
print("error")
break
if(varcont==False):
break
print("BF: " + str(bfsource) + " has not impacted node: " + str(bftarget) + ". Continuing checks for next SE.\n")
proba_array = [x / ntimes for x in counter_array]
for i in SEnodes:
proba_array[i]=1.0
print("Final array after one iteration:")
print(proba_array)
def verbose_inclusion_exclusion():
proba_array=[0]*G.number_of_nodes()
print("Evaluating all possible target nodes\n")
for bf in BFnodes + list(BRnodes):
print("Evaluating node: " + str(bf))
# print("\n\n\n")
list_of_paths = []
list_of_proba = []
bfproba=0
sbfproba=""
print("Evaluating all possible shock events\n")
for se in SEnodes:
print("Evaluating shock event: " + str(se))
paths = nx.all_simple_paths(G, source=se, target=bf)
temp_paths = nx.all_simple_paths(G, source=se, target=bf)
print("List of paths between shock event " + str(se)+ " and node " + str(bf))
print(list(temp_paths))
for p in list(paths):
list_of_paths.append(list(p))
probaset = set()
for ind in range(0,len(p)-1):
probaset=probaset.union({tuple([p[ind],p[ind+1],G[p[ind]][p[ind+1]]["weight"]])})
# print([p[ind],p[ind+1],G[p[ind]][p[ind+1]]["weight"]])
# print(p)
# print(probaset)
list_of_proba+=[probaset]
# print(list_of_proba)
# print("\n\n")
# for i in range(len(list_of_paths)):
# print(list_of_paths[i])
# print(list_of_proba[i])
# print("\n\n")
print("\n Aggregating all paths between all shock events and node " + str(bf))
print(list_of_paths)
print("\n For each path, generating a set containing all probabilities of variables in the path")
print(list_of_proba)
print("Generating all possible probability sets related to path combinations of size 1 to " + str(len(list_of_proba)))
for i in range(1,len(list_of_proba)+1):
proof_combination = list(itertools.combinations(list_of_proba,i))
# print(len(proof_combination))
# print(len(proof_combination[0]))
# print(proof_combination)
# temp = proof_combination[0]
for comb in proof_combination:
combset = set()
for elem in comb:
# print(set(elem))
combset = combset.union(set(elem))
print("\nCurrent combination : " +str(combset))
# print("Source: " + str(bf))
# print(combset)
proba=1
sproba=""
print("\n Computing path probabilities by multiplying all probabilities in " + str(combset))
for probaelem in combset:
# print(probaelem[2])
proba*=probaelem[2]
sproba+=" * " + str(probaelem[2])
#
print("\n Path probability: " + str(sproba[2:]) + " = " + str(round(proba,3)))
# print(elem)
# print(proba)
# print(bfproba)
bfproba+=((-1)**(i+1))*proba
sbfproba+=" + ((-1)**("+str(i+1)+"))*"+str(round(proba,3))
print("\n Probability for node " + str(bf) + ": \n" + sbfproba[3:] + " = " + str(round(bfproba,3)))
# print(len(list_of_proba))
# print(len(list_of_paths))
# print("\n\n")
proba_array[bf]=bfproba
for i in SEnodes:
proba_array[i]=1.0
print("\nProbability array:")
print(proba_array)
def inclusion_exclusion():
proba_array=[0]*G.number_of_nodes()
for bf in BFnodes + list(BRnodes):
# print("\n\n\n")
list_of_paths = []
list_of_proba = []
bfproba=0
for se in SEnodes:
paths = nx.all_simple_paths(G, source=se, target=bf )
for p in list(paths):
list_of_paths.append(list(p))
probaset = set()
for ind in range(0,len(p)-1):
probaset=probaset.union({tuple([p[ind],p[ind+1],G[p[ind]][p[ind+1]]["weight"]])})
# print([p[ind],p[ind+1],G[p[ind]][p[ind+1]]["weight"]])
# print(p)
# print(probaset)
list_of_proba+=[probaset]
# print(list_of_proba)
# print("\n\n")
# for i in range(len(list_of_paths)):
# print(list_of_paths[i])
# print(list_of_proba[i])
# print("\n\n")
for i in range(1,len(list_of_proba)+1):
proof_combination = list(itertools.combinations(list_of_proba,i))
# print(len(proof_combination))
# print(len(proof_combination[0]))
# temp = proof_combination[0]
for comb in proof_combination:
combset = set()
for elem in comb:
# print(set(elem))
combset = combset.union(set(elem))
# print("Source: " + str(bf))
# print(combset)
proba=1
for probaelem in combset:
# print(probaelem[2])
proba*=probaelem[2]
# print(elem)
# print(proba)
# print(bfproba)
bfproba+=((-1)**(i+1))*proba
# print(len(list_of_proba))
# print(len(list_of_paths))
# print("\n\n")
proba_array[bf]=bfproba
for i in SEnodes:
proba_array[i]=1.0
print(proba_array)
# nx.draw(G)
labeldict={}
for i in range(G.number_of_nodes()):
labeldict[i]=str(i)
# nx.draw(G,labels=labeldict,with_labels=True)
# plt.show()
sampling = [10**x for x in range(3,4)]
# sampling = [1]
for i in sampling:
compute_impact_proba(i)
verbose_compute_impact_proba()
verbose_inclusion_exclusion()
#inclusion_exclusion()
# print(BFnodes)
# print(BRnodes)
# a = set({tuple([0,1]),tuple([0,4]),tuple([0,3]),tuple([0,2]),tuple([0,2])})
# b = set({tuple([1,1]),tuple([1,4]),tuple([1,3]),tuple([1,2]),tuple([0,2])})
# c = set({tuple([2,1]),tuple([2,4]),tuple([2,3]),tuple([2,2]),tuple([1,2])})
# a=a.union({tuple([0,5])})
# for x in list(itertools.combinations(a,2)):
# print(x)
# d = set.union(a,b,c)
# print(d)
# for x in list(itertools.combinations(d,2)):
# print(x)
# nx.draw(G)
# plt.savefig("simple_path.png") # save as png
# plt.draw() # display
# print("Nodes of graph: ")
# print(G.nodes())
# print("Edges of graph: ")
# # [print(G.get_edge_data(*e)) for e in G.edges]
# for e in G.edges:
# print(G.get_edge_data(*e)['weight'])
# print(e)
| UTF-8 | Python | false | false | 23,316 | py | 1 | ImpactEval.py | 1 | 0.437916 | 0.423118 | 0 | 566 | 40.190813 | 148 |
Mumbaikar007/Code | 12,816,182,420,545 | 09aedb3c3130d28e63b4f9ef5bdc447f343af387 | dee4f176db476b621a208e1e9c8562740228c84c | /CoursesCodes/PythonGUI/basicsOfTkinter.py | dbf7f5b90266d9e9e37ed8ca7ccad4cb5d641a11 | [] | no_license | https://github.com/Mumbaikar007/Code | 7a62f2bcb9e01c06d3d83370f78298a76f94ee87 | b55ee260f3e4e33fb59ba6ed35c2ee29443bbc11 | refs/heads/master | 2021-09-27T05:31:00.973615 | 2018-11-06T10:42:31 | 2018-11-06T10:42:31 | 104,623,021 | 6 | 8 | null | false | 2018-10-31T15:55:05 | 2017-09-24T06:10:27 | 2018-10-30T17:15:53 | 2018-10-31T15:41:01 | 8,054 | 4 | 7 | 0 | Makefile | false | null |
from tkinter import *
a = Tk()
a.title("My First Window")
a.mainloop()
| UTF-8 | Python | false | false | 75 | py | 206 | basicsOfTkinter.py | 174 | 0.64 | 0.64 | 0 | 7 | 9.571429 | 26 |
rahulmahato46/leetcode-june20 | 2,645,699,899,507 | 9a8b242b4651e10e6b319f7e6e36eb675d1fab27 | f67447f374c4a4e8889e02b4614eba0e05ea9544 | /queue-construction-by-height-part2.py | 3273b5a955bc61f7d630871a88f2252550df9c72 | [] | no_license | https://github.com/rahulmahato46/leetcode-june20 | 6519cd55ae12f8e699797588a0652da3c747e5ab | 2f378037e5c234cf4a806082d6192485338cd97c | refs/heads/master | 2022-10-02T16:31:30.334070 | 2020-06-06T14:34:54 | 2020-06-06T14:34:54 | 268,527,999 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def reconstructQueue(self, people: List[List[int]]) -> List[List[int]]:
new_arr = []
people = sorted(people, key= lambda x:(-x[0],x[1]))
for var in people:
new_arr.insert(var[1],var)
return new_arr | UTF-8 | Python | false | false | 261 | py | 3 | queue-construction-by-height-part2.py | 3 | 0.563218 | 0.551724 | 0 | 7 | 36.428571 | 75 |
lanzhou2012/Object-detection | 7,060,926,280,841 | e174df36f21abe5971bcd0d7d8d4e36341e6745e | 9fd4467dcd715b6b33c4e0cb24656c82a592ac1d | /main/my_method/validation.py | 201aaf3d5fbfd60f3bdbb6d10a388f0bbf57a03e | [] | no_license | https://github.com/lanzhou2012/Object-detection | 911711c17070c2a6d677099bc4796bb2446b7aa4 | 11d506051c274484bea31335a1ec5e12569f9719 | refs/heads/master | 2020-03-05T22:59:31.088966 | 2017-03-27T07:15:36 | 2017-03-27T07:15:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import keras
from keras.applications.imagenet_utils import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.models import Model
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import imread
import tensorflow as tf
import os
import operator
import itertools
from collections import Counter
import pickle
from ssd import SSD300
from ssd_utils import BBoxUtility
import pascal_VOC
def numDups(a, b):
if len(a)>len(b):
a,b = b,a
a_count = Counter(a)
b_count = Counter(b)
return sum(min(b_count[ak], av) for ak,av in a_count.items())
def load_model():
# matplotlib inline
plt.rcParams['figure.figsize'] = (8, 8)
plt.rcParams['image.interpolation'] = 'nearest'
np.set_printoptions(suppress=True)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.45
set_session(tf.Session(config=config))
voc_classes = ['Aeroplane', 'Bicycle', 'Bird', 'Boat', 'Bottle',
'Bus', 'Car', 'Cat', 'Chair', 'Cow', 'Diningtable',
'Dog', 'Horse','Motorbike', 'Person', 'Pottedplant',
'Sheep', 'Sofa', 'Train', 'Tvmonitor']
NUM_CLASSES = len(voc_classes) + 1
input_shape=(300, 300, 3)
model = SSD300(input_shape, num_classes=NUM_CLASSES)
model.load_weights('../data/weights_SSD300.hdf5', by_name=True)
bbox_util = BBoxUtility(NUM_CLASSES)
return model, bbox_util
def validates_images(model, bbox_util):
inputs = []
images = []
files = []
for filename in os.listdir('../data/VOC2007/JPEGImages'):
if filename.endswith('.jpg'):
files.append(filename)
b =0
for filename in sorted(files):
if b < 3:
img_path = '../data/VOC2007/JPEGImages/' + filename
img = image.load_img(img_path, target_size=(300, 300))
img = image.img_to_array(img)
images.append(imread(img_path))
inputs.append(img.copy())
b += 1
inputs = preprocess_input(np.array(inputs))
preds = model.predict(inputs, batch_size=1, verbose=1)
results = bbox_util.detection_out(preds)
return results, img
def process_images(results):
image_list = []
for i in range(len(results)):
a_list = []
# Parse the outputs.
det_label = results[i][:, 0]
det_conf = results[i][:, 1]
det_xmin = results[i][:, 2]
det_ymin = results[i][:, 3]
det_xmax = results[i][:, 4]
det_ymax = results[i][:, 5]
# Get detections with confidence higher than 0.4, as it gives the highest % accuracy of labels.
top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.4]
# Format of a_list: confidence, labels, xmin, ymin, xmax, ymax
a_list.append(det_conf[top_indices])
a_list.append(det_label[top_indices].tolist())
a_list.append(det_xmin[top_indices])
a_list.append(det_ymin[top_indices])
a_list.append(det_xmax[top_indices])
a_list.append(det_ymax[top_indices])
image_list.append(a_list)
return image_list
def checker(image_list, img):
for i in range(len(image_list)):
for j in range(image_list[i][0].shape[0]):
if j < 1:
xmin = int(round(image_list[i][2][j] * img.shape[1]))
ymin = int(round(image_list[i][3][j] * img.shape[0]))
xmax = int(round(image_list[i][4][j] * img.shape[1]))
ymax = int(round(image_list[i][5][j] * img.shape[0]))
print(xmin, ymin, xmax, ymax)
print(image_list[i][2][j], image_list[i][3][j], image_list[i][4][j], image_list[i][5][j])
input('1')
with open('../data/VOC2007.pkl', 'rb') as read:
x = pickle.load(read)
sorted_x = sorted(x.items(), key=operator.itemgetter(0))
number_correct = 0
total = 0
extras = 0
a = 0
for i, j in enumerate(sorted_x):
if a < 3:
print(j[1])
list_of_one_hot = [[k for k, int1 in enumerate(a_list[3:]) if int1 == 1.0] for a_list in j[1]]
list_of_one_hot = list(itertools.chain.from_iterable(list_of_one_hot))
# This counts how many labels there are in total
total += len(list_of_one_hot)
# This counts how many labels are correct
similarity = numDups(image_list[i][1], list_of_one_hot)
number_correct +=similarity
# This counts how many extra labels are identified
extras += len(image_list[1][i])
a+=1
return number_correct, total, extras
if __name__ == '__main__':
model, bbox_util = load_model()
results, img = validates_images(model, bbox_util)
image_list = process_images(results)
number_correct, total, extras = checker(image_list, img)
if extras > total:
extras -= total
else:
extras = 0
percentage = (number_correct - extras)/total*100
print('Total: {}\nCorrect: {}\nExtras: {}\nPercentage: {}%'.format(total, number_correct, extras, percentage))
| UTF-8 | Python | false | false | 5,234 | py | 3 | validation.py | 1 | 0.588269 | 0.570501 | 0 | 151 | 33.662252 | 114 |
lewbenj/qbb2018-answers | 19,292,993,120,608 | 9e6cd6aac38cebf5e32f2bb8fc3ef76cd800561e | 7809ad3224f25b41b5be2c3bf572eb69701b1a17 | /week8-lab/motif.py | a4460f6586853a93655691ef88474dac6f7753d2 | [] | no_license | https://github.com/lewbenj/qbb2018-answers | 3346ca5d2785c987d8fbba0740ace224d7a2be39 | e845c928b22450cd8ed26c211420bb293e7043b6 | refs/heads/master | 2020-03-27T09:06:04.647369 | 2018-12-24T21:40:29 | 2018-12-24T21:40:29 | 146,313,771 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import sys
import matplotlib.pyplot as plt
#Usage: ./motif.py ER4_peaks.narrowPeak.bed
Bed = open(sys.argv[1])
Percentage = []
for count, line in enumerate(Bed):
"Skip the header"
if line.startswith("#"):
continue
else:
fields = line.rstrip("\r\n").split("\t")
#print(fields)
sm = (fields[3])
em = float(fields[4])
sp = float(fields[10])
ep = float(fields[11])
#print(sm,em,sp,ep)
percentage = abs((sm-sp)/(ep-sp))
Percentage.append(percentage)
fig, ax = plt.subplots()
fig.set_size_inches(12, 9)
plt.hist(Percentage, bins=60, color="red")
ax.set_xlabel("Relative position in the sequences")
ax.set_ylabel("How often the motifs are detected")
fig.suptitle("Top 100 motifs - Week 8 - Density plot")
fig.savefig("Motifs.png")
plt.tight_layout()
plt.close(fig) | UTF-8 | Python | false | false | 862 | py | 37 | motif.py | 35 | 0.642691 | 0.62181 | 0 | 36 | 22.972222 | 54 |
n-kimberly/Playground_Python | 34,359,763,233 | 9bbb388ecbf25e2d0c4c6ce889878a271e58111a | 6db5887ee3f7236c5ec3f55c32db887a061f8060 | /hb/w1/d4_markov-ffs/n-grams.py | 623b9fdff8cd99d7cc1f5b51e0f68c0a16adc315 | [] | no_license | https://github.com/n-kimberly/Playground_Python | f43851410071f2e5bb3256495d6398e478b358ab | fad99fdb690e0eeb3f2c9141e70bf4a9a33a2aef | refs/heads/master | 2020-03-29T14:14:53.271813 | 2018-10-02T04:39:03 | 2018-10-02T04:39:03 | 141,058,022 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Further Study
Do any of the following:
Replace our dummy file with your own. Pass in this file from the command line, using sys.argv.
The longer the sequence of words to the left of the arrow, the closer it becomes to the original text, as well as valid English, because there are fewer and fewer random successors. Here, we use n_grams (word pairs) and a successor, but we could use trigrams or n-grams (sequences of n words). The longer the n-gram, the closer you get to the source text.
Modify the program to allow any number of words to use as keys so you can easily choose the size of your n-gram used in your chain rather than always using bi-grams.
Begin on a capital letter and end only at an instance of sentence punctuation.
See what happens when you mix two different authors together as a single source. This often works best when they have somewhat similar writing styles; trying to combine Dr. Seuss and the Bible probably wouldn’t work as well as combining two Jane Austen books.
i.e.
>> python3 n-grams.py pe.txt sorority-speech.txt kant.txt
"""
import sys
import random
import markov_helpers as mh
def make_chains(text_string, n):
chains = {}
words = text_string.split()
current_index = 0
remaining_index = len(words)-1
while current_index < len(words)-(n+1):
n_gram = tuple(words[current_index:current_index+n])
current_index += 1
remaining_index -= 1
subsequent_word = words[current_index+n]
if n_gram in chains:
chains[n_gram].append(subsequent_word)
else:
chains[n_gram] = [subsequent_word]
return chains
n_gram = 3
input_text = ""
for i in range(len(sys.argv[1:])):
print(sys.argv[i+1])
input_text += mh.open_and_read_file(sys.argv[i+1])
print(input_text)
chains = make_chains(input_text, n_gram)
for chain in chains:
print(chain,":",chains[chain])
random_text = mh.make_text(chains, n_gram)
print(random_text)
| UTF-8 | Python | false | false | 1,978 | py | 241 | n-grams.py | 206 | 0.702935 | 0.697874 | 0 | 58 | 33.068966 | 355 |
foxreymann/Violent-Python-Examples | 9,285,719,335,126 | 7345c6f5a2500994b348de9369dea92b01bf4ced | 6c4824d50ec1d302a37edaafc2abc82b3a7153df | /Chapter-1/iteration.py | f8ddffbc25bac66b25597978b124a074d89ad73c | [] | no_license | https://github.com/foxreymann/Violent-Python-Examples | 223414bd410af54d3987898188c4365ca0682549 | 1f74792f1ec55f91569981c24cf44384ef396518 | refs/heads/master | 2021-08-06T06:25:09.375291 | 2017-11-03T16:53:06 | 2017-11-03T16:53:06 | 109,045,939 | 0 | 0 | null | true | 2017-10-31T19:57:01 | 2017-10-31T19:56:59 | 2017-10-26T22:38:30 | 2012-12-23T19:08:49 | 844 | 0 | 0 | 0 | null | false | null | import socket
def retBanner(ip, port):
try:
socket.setdefaulttimeout(2)
s = socket.socket()
s.connect((ip,port))
banner = s.recv(1024)
return banner.decode().strip('\n')
except:
return
def checkVulns(banner):
if "vsFTPd 3.0.3" in banner:
return "vsFTPd is vulnerable"
else:
return "FTP Server is not vulnerable"
def main():
ports = [21, 22, 80]
for host in range(109, 112):
ip = '100.109.237.' + str(host)
for port in ports:
banner = retBanner(ip, port)
if banner:
print(ip + ':' + str(port) + ': ' + banner)
print(checkVulns(banner))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 737 | py | 4 | iteration.py | 3 | 0.522388 | 0.483039 | 0 | 30 | 23.566667 | 59 |
nekazino/pythonlab1 | 17,446,157,175,005 | 8345b90b387b177f4554bbaf8886e59a0a6b06db | 9130af7f40de20a5fcf9eecfed747df1e4f84a0d | /src/program.py | b13a9b768647865255b830b0b91c07107032f2e7 | [] | no_license | https://github.com/nekazino/pythonlab1 | 9e222081ecc393e75a117a7bea37d3cac7b896ba | a094fe24d91b9c521bb45a4e77be3b84bf2f5ab3 | refs/heads/master | 2019-04-09T14:50:28.232033 | 2017-05-06T19:00:32 | 2017-05-06T19:00:32 | 89,696,201 | 1 | 0 | null | false | 2017-05-06T19:00:33 | 2017-04-28T10:34:23 | 2017-05-04T20:30:30 | 2017-05-06T19:00:33 | 10 | 0 | 0 | 0 | Python | null | null | '''
program
=======
Provides a kivy.App implementation which create main program window.
'''
import kivy
kivy.require('1.6.0')
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
from abstractdeptreefactory import *
from treewidget import TreeWidget
import threading
class Program(App):
'''kivy.App implementation which create main program window.
'''
def __init__(self, dep_tree_factory):
if not isinstance(dep_tree_factory, AbstractDepTreeFactory):
raise TypeError("dep_tree_factory must be an AbstractDepTreeFactory implementation.")
super(Program, self).__init__()
self.factory = dep_tree_factory;
def build(self):
'''Initializes gui components.
'''
parent = BoxLayout(padding=10, orientation="vertical")
tb_package_name = TextInput(size_hint=(1, 0.05), multiline=False, hint_text="package name")
btn_get_deps = Button(text="Build", size_hint=(1, 0.05))
trv_deps = TreeWidget(size_hint=(1, 0.85), hide_root=True)
lbl_status = Label(text="ready", size_hint=(1, 0.05))
def load_dep():
lbl_status.text = "building..."
try:
trv_deps.load(self.factory.create(tb_package_name.text))
except Exception as e:
btn = Button(text="OK")
popup = Popup(
title=str(e),
content=btn,
auto_dismiss=False,
size_hint=(None, None),
size=(350, 100)
)
btn.bind(on_press=popup.dismiss)
popup.open()
lbl_status.text = "ready"
btn_get_deps.bind(on_press=lambda instance: threading.Thread(target=load_dep).start())
parent.add_widget(tb_package_name)
parent.add_widget(btn_get_deps)
parent.add_widget(trv_deps)
parent.add_widget(lbl_status)
return parent
| UTF-8 | Python | false | false | 2,129 | py | 10 | program.py | 6 | 0.596524 | 0.583842 | 0 | 65 | 31.753846 | 99 |
motyliak/motor | 6,004,364,311,599 | d1132f6b5bcf618c7a21c68db0911f66fdb1b21e | cae3f975267a5f410daede1394ca8a0558c94b2c | /main.py | 3467eb2f82db0efc9e17f6d2dd58b58caf88887d | [] | no_license | https://github.com/motyliak/motor | c0bfecb4a9e62c7d7e3224686b2b649bd38da3d0 | e64fcbcddcffe26e0aaadde3183b2be2a6cffa03 | refs/heads/master | 2022-11-07T20:37:43.839236 | 2020-06-23T21:23:18 | 2020-06-23T21:23:18 | 273,765,453 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def motor_off():
pins.digital_write_pin(DigitalPin.P0, 0)
basic.show_icon(IconNames.NO)
def on_button_pressed_a():
global remote_control
remote_control = False
motor_on()
strip.rotate(1)
strip.show()
input.on_button_pressed(Button.A, on_button_pressed_a)
def on_button_pressed_b():
global remote_control
motor_off()
remote_control = True
input.on_button_pressed(Button.B, on_button_pressed_b)
def motor_on():
pins.digital_write_pin(DigitalPin.P0, 1)
basic.show_icon(IconNames.YES)
strip: neopixel.Strip = None
remote_control = False
pins.digital_write_pin(DigitalPin.P0, 1)
pins.set_pull(DigitalPin.P1, PinPullMode.PULL_UP)
basic.show_icon(IconNames.HAPPY)
remote_control = True
strip = neopixel.create(DigitalPin.P2, 8, NeoPixelMode.RGB)
strip.show_rainbow(1, 360)
strip.show()
def on_forever():
strip.show()
if remote_control:
if pins.digital_read_pin(DigitalPin.P1) == 0:
motor_on()
else:
motor_off()
basic.forever(on_forever)
| UTF-8 | Python | false | false | 1,034 | py | 2 | main.py | 2 | 0.68472 | 0.669246 | 0 | 41 | 24.219512 | 59 |
flashiam/wodo-backend | 1,348,619,737,609 | e3b8a0fcda0e199e08cbf270bdb3f6cb0fcd67ce | 43c61d5186ffe1e0ca1fba27ac71a860af3f1712 | /wodo/migrations/0016_auto_20201127_0929.py | 68bfb172e738c74a78b0ea858cfdbc49d294012a | [] | no_license | https://github.com/flashiam/wodo-backend | 00e4b7b15dc60458419a86c04c4fff30bb0028a0 | a020f7ab91410a03b921488d990d8013970b862f | refs/heads/master | 2023-06-05T17:12:05.944944 | 2020-12-04T06:49:52 | 2020-12-04T06:49:52 | 383,648,984 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.2 on 2020-11-27 09:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wodo', '0015_auto_20201127_0928'),
]
operations = [
migrations.AlterField(
model_name='dutydenials',
name='user',
field=models.ForeignKey(default='shiva12', on_delete=django.db.models.deletion.SET_DEFAULT, to='wodo.appuser', to_field='username', verbose_name='username'),
),
migrations.AlterField(
model_name='filtercache',
name='userF',
field=models.ForeignKey(default='shiva12', on_delete=django.db.models.deletion.SET_DEFAULT, to='wodo.appuser', verbose_name='User'),
),
migrations.AlterField(
model_name='saved',
name='userS',
field=models.ForeignKey(default='shiva12', on_delete=django.db.models.deletion.SET_DEFAULT, to='wodo.appuser', to_field='username', verbose_name='User'),
),
migrations.AlterField(
model_name='transaction',
name='userT',
field=models.ForeignKey(default='shiva12', on_delete=django.db.models.deletion.SET_DEFAULT, to='wodo.appuser', to_field='username', verbose_name='User'),
),
migrations.AlterField(
model_name='workrating',
name='userR',
field=models.ForeignKey(default='shiva12', on_delete=django.db.models.deletion.SET_DEFAULT, to='wodo.appuser', to_field='username', verbose_name='User'),
),
]
| UTF-8 | Python | false | false | 1,598 | py | 54 | 0016_auto_20201127_0929.py | 34 | 0.62015 | 0.594493 | 0 | 39 | 39.974359 | 169 |
LsxnH/ATLAS | 10,892,037,068,817 | 32c6ec1b144bde546142b7b664057afcd5892ce6 | 6b83126f9d0507129e732d559c63de21d7e3da6e | /smartScripts/checkEntries.py | 73c9d35c68109200acf9d38408f25b209313ce04 | [] | no_license | https://github.com/LsxnH/ATLAS | 51773ee6496417eae0e211b443d31e3882003581 | b1573bc0f4456c330749570ec7881e3f812615e6 | refs/heads/master | 2020-05-27T13:27:03.574661 | 2019-06-24T07:59:16 | 2019-06-24T07:59:16 | 31,648,326 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import os, fnmatch
import re
import sys
import ROOT
def main():
listOfFiles = os.listdir(sys.argv[1])
pattern = "*.root"
ntotal = 0
for ifile in listOfFiles:
if fnmatch.fnmatch(ifile, pattern):
inputfile = ROOT.TFile(sys.argv[1]+"/"+ifile,"READ")
nominal = inputfile.Get("nominal")
print ifile, " entries: ", nominal.GetEntries()
ntotal += nominal.GetEntries()
print "total entries: ", ntotal
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 540 | py | 8 | checkEntries.py | 3 | 0.581481 | 0.575926 | 0 | 21 | 24.714286 | 64 |
matibilkis/cartpole-tf2 | 7,662,221,676,487 | 14a043409180d2f101ba192a6231b7a94943cf35 | 1d2650d3e0e295635ed9e82e9decf0b34dc319be | /main.py | 291f73b77e7f3d2367894c87ff6a5dd3109a12e7 | [] | no_license | https://github.com/matibilkis/cartpole-tf2 | 1a62a14ddf2f167ae50f296de967a7a18b236733 | c26457f1a18e4c1f614cec99800124d1506de9f2 | refs/heads/master | 2020-09-03T17:37:46.658348 | 2019-11-04T14:37:17 | 2019-11-04T14:37:17 | 219,523,231 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import gym
import keras
import datetime as dt
import tensorflow as tf
import random
import numpy as np
import math
from tensorflow.keras.layers import Dense
from tqdm import tqdm
from gym import wrappers
STORE_PATH = '/run'
MAX_EPSILON = 1
MIN_EPSILON = 0.01
LAMBDA = 0.0005
GAMMA = 0.95
BATCH_SIZE = 40
TAU = 0.08
RANDOM_REWARD_STD = 1.0
# env = gym.make("MountainCar-v0")
state_size = 4
env = gym.make("CartPole-v1")
num_actions = env.action_space.n
class Memory():
def __init__(self, max_memory):
self._max_memory = max_memory
self._samples = []
def add_sample(self, sample):
self._samples.append(sample)
if len(self._samples) > self._max_memory:
self._samples.pop(0)
def sample(self, no_samples):
if no_samples > len(self._samples):
return random.sample(self._samples, len(self._samples))
else:
return random.sample(self._samples, no_samples)
@property
def num_samples(self):
return len(self._samples)
memory = Memory(50000)
class QN(tf.keras.Model):
def __init__(self):
super(QN,self).__init__()
self.l1 = Dense(30, input_shape=(4,), kernel_initializer='random_uniform',
bias_initializer='random_uniform')
self.l2 = Dense(35, kernel_initializer='random_uniform',
bias_initializer='random_uniform')
# self.l21 = Dense(90, kernel_initializer='random_uniform',
# bias_initializer='random_uniform')
self.l3 = Dense(num_actions, kernel_initializer='random_uniform',
bias_initializer='random_uniform')
def call(self, input):
feat = tf.nn.relu(self.l1(input))
feat = tf.nn.relu(self.l2(feat))
# feat = tf.nn.relu(self.l21(feat))
value = self.l3(feat)
return value
def choose_action(state, primary_network, eps):
if random.random() < eps:
return random.randint(0, num_actions - 1)
else:
state = np.expand_dims(np.array(state),axis=0) #otherwise throuhg eerror..
return np.argmax(primary_network(state))
def train(primary_network, memory, tarket_network):
if memory.num_samples < BATCH_SIZE*3:
return 0
else:
batch = memory.sample(BATCH_SIZE)
states = np.array([val[0] for val in batch])
actions = np.array([val[1] for val in batch])
rewards = np.array([val[2] for val in batch])
next_states = np.array([(np.zeros(state_size)
if val[3] is None else val[3]) for val in batch])
prim_qt = primary_network(np.expand_dims(states,axis=0)) # Q_t[s,a]
prim_qtp1 = primary_network(np.expand_dims(next_states,axis=0)) #Q_{t+1}[s_{t+1},a_{t+1}]
updates = rewards
valid_idxs = np.array(next_states).sum(axis=1) != 0
batch_idxs = np.arange(BATCH_SIZE)
opt_q_tp1_eachS = np.argmax(np.squeeze(prim_qtp1.numpy()), axis=1) # Argmax a_{t+1} Q_{t+1} [ s_{t+1}, a_{t+1}]
q_from_target = target_network(np.expand_dims(next_states, axis=0)) #Q^{target} [ s, a]
updates[valid_idxs] += GAMMA*np.squeeze(q_from_target.numpy())[valid_idxs, opt_q_tp1_eachS[valid_idxs]] # update = r + \gamma Q[s_{t+1}, a^{*}_{t+1}]; with a^{*}_{t+1} = ArgMax Q_{s_t+1, a_t+1}
###### In the disc case... a_t = \beta_1 .... > Q[\beta_1] -> Q^{target}[n_1, \beta_1; \beta_2^{*}] with \beta_2^{*} = ArgMax Q[n_1, \beta1, \BB2]
#consequences: for each state in the first layer, the action will be someone.
target_q = np.squeeze(prim_qt.numpy())
target_q[batch_idxs, actions] = updates
with tf.device("/cpu:0"):
with tf.GradientTape() as tape:
tape.watch(primary_network.trainable_variables)
predicted_q = primary_network(states)
target_q = np.expand_dims(target_q,axis=0)
loss = tf.keras.losses.MSE(predicted_q, target_q)
loss = tf.reduce_mean(loss)
grads = tape.gradient(loss, primary_network.trainable_variables)
optimizer.apply_gradients(zip(grads, primary_network.trainable_variables))
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(t*(1-TAU) + e*TAU)
return loss
save=True
#
for agent in range(1):
env = gym.make("CartPole-v0")
env = wrappers.Monitor(env, './videos/' + str(2) + '/')
primary_network = QN()
target_network = QN()
optimizer = tf.keras.optimizers.Adam(lr=0.01)
num_episodes = 200
eps = 1
render = False
# train_writer = tf.summary.create_file_writer("summarie/1")
steps = 0
rews=[]
times=[]
for i in range(num_episodes):
state = env.reset()
cnt=0
avg_loss=0
while True:
# env.render()
action = choose_action(state, primary_network, eps)
next_state, reward, done, info = env.step(action)
reward = np.random.normal(1.0, RANDOM_REWARD_STD)
if cnt==300:
done = True
if done:
next_state = None
memory.add_sample((state, action, reward, next_state))
loss = train(primary_network, memory, target_network)
avg_loss += loss
state = next_state
steps +=1
eps = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON)*np.exp(- LAMBDA*steps)
if done:
avg_loss /= cnt
print(f"Episode: {i}, Reward: {cnt}, avg loss: {avg_loss:.3f}, eps: {eps:.3f}")
rews.append(cnt)
times.append(i+1)
# with train_writer.as_default():
# tf.summary.scalar('reward', cnt, step=i)
# tf.summary.scalar('avg loss', avg_loss, step=i)
break
cnt += 1
if save:
np.save("data"+str(agent),np.array([times, rews]), allow_pickle=True)
| UTF-8 | Python | false | false | 6,001 | py | 22 | main.py | 9 | 0.575571 | 0.556741 | 0 | 173 | 33.687861 | 201 |
luoluo/TestDataGenerator | 6,270,652,252,250 | 0cb4962eb344d4dbc4498cf7d89dbe83b3960e83 | d56cc09d758149cbaef30e59908df5e509209daf | /exectutor.py | c9f6f1519d925f21845b9856b2d9017e6a0b9a36 | [] | no_license | https://github.com/luoluo/TestDataGenerator | e4a4cd6ba5edca3a639fc681b8d747a365f1a753 | fec7ab70ca9f3b32dd582712c644508cd587b72a | refs/heads/master | 2016-09-06T09:39:04.364948 | 2014-08-01T08:05:30 | 2014-08-01T08:05:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from myConfig import MyConfig
from generator import *
class Exectutor():
def __init__(self):
pass
def run(self):
testDataGenerator = TestDataGenerator()
testDataGenerator.loadDescriptionFromFIle("generate.cfg")
testDataGenerator.generate()
| UTF-8 | Python | false | false | 280 | py | 12 | exectutor.py | 8 | 0.692857 | 0.692857 | 0 | 9 | 30.111111 | 65 |
github/codeql | 13,348,758,363,030 | 89d5197ecbae5dacfee8b96debd685a55a645db0 | 167c6226bc77c5daaedab007dfdad4377f588ef4 | /python/ql/test/library-tests/frameworks/django-orm/testapp/tests.py | 6b1bcb7e83c37e8ff2ddcfad617790a1663589ee | [
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | https://github.com/github/codeql | 1eebb449a34f774db9e881b52cb8f7a1b1a53612 | d109637e2d7ab3b819812eb960c05cb31d9d2168 | refs/heads/main | 2023-08-20T11:32:39.162059 | 2023-08-18T14:33:32 | 2023-08-18T14:33:32 | 143,040,428 | 5,987 | 1,363 | MIT | false | 2023-09-14T19:36:50 | 2018-07-31T16:35:51 | 2023-09-14T08:53:44 | 2023-09-14T18:02:59 | 281,478 | 6,371 | 1,465 | 964 | CodeQL | false | false | import importlib
import re
import pytest
# Create your tests here.
def discover_save_tests():
mod = importlib.import_module("testapp.orm_tests")
test_names = []
for name in dir(mod):
m = re.match("test_(save.*)_load", name)
if not m:
continue
name = m.group(1)
test_names.append(name)
return test_names
def discover_load_tests():
mod = importlib.import_module("testapp.orm_tests")
test_names = []
for name in dir(mod):
m = re.match("test_(load.*)", name)
if not m:
continue
name = m.group(1)
if name == "load_init":
continue
test_names.append(name)
return test_names
@pytest.mark.django_db
@pytest.mark.parametrize("name", discover_save_tests())
def test_run_save_tests(name):
mod = importlib.import_module("testapp.orm_tests")
init_func = getattr(mod, f"test_{name}_init", None)
store_func = getattr(mod, f"test_{name}_store", None)
load_func = getattr(mod, f"test_{name}_load", None)
if init_func:
init_func()
store_func()
load_func()
has_run_load_init = False
@pytest.fixture
def load_test_init():
from .orm_tests import test_load_init
test_load_init()
@pytest.mark.django_db
@pytest.mark.parametrize("name", discover_load_tests())
def test_run_load_tests(load_test_init, name):
mod = importlib.import_module("testapp.orm_tests")
load_func = getattr(mod, f"test_{name}", None)
load_func()
assert getattr(mod, "TestLoad").objects.count() == 10
@pytest.mark.django_db
def test_mymodel_form_save():
from .orm_form_test import MyModel, MyModelForm
import uuid
text = str(uuid.uuid4())
form = MyModelForm(data={"text": text})
form.save()
obj = MyModel.objects.last()
assert obj.text == text
@pytest.mark.django_db
def test_none_all():
from .orm_form_test import MyModel
MyModel.objects.create(text="foo")
assert len(MyModel.objects.all()) == 1
assert len(MyModel.objects.none().all()) == 0
assert len(MyModel.objects.all().none()) == 0
@pytest.mark.django_db
def test_orm_inheritance():
from .orm_inheritance import (save_physical_book, save_ebook, save_base_book,
fetch_book, fetch_physical_book, fetch_ebook,
PhysicalBook, EBook,
)
base = save_base_book()
physical = save_physical_book()
ebook = save_ebook()
fetch_book(base.id)
fetch_book(physical.id)
fetch_book(ebook.id)
fetch_physical_book(physical.id)
fetch_ebook(ebook.id)
try:
fetch_physical_book(base.id)
except PhysicalBook.DoesNotExist:
pass
try:
fetch_ebook(ebook.id)
except EBook.DoesNotExist:
pass
@pytest.mark.django_db
def test_poly_orm_inheritance():
from .orm_inheritance import (poly_save_physical_book, poly_save_ebook, poly_save_base_book,
poly_fetch_book, poly_fetch_physical_book, poly_fetch_ebook,
PolyPhysicalBook, PolyEBook,
)
base = poly_save_base_book()
physical = poly_save_physical_book()
ebook = poly_save_ebook()
poly_fetch_book(base.id, test_for_subclass=False)
poly_fetch_book(physical.id)
poly_fetch_book(ebook.id)
poly_fetch_physical_book(physical.id)
poly_fetch_ebook(ebook.id)
try:
poly_fetch_physical_book(base.id)
except PolyPhysicalBook.DoesNotExist:
pass
try:
poly_fetch_ebook(ebook.id)
except PolyEBook.DoesNotExist:
pass
| UTF-8 | Python | false | false | 3,504 | py | 27,332 | tests.py | 11,432 | 0.63984 | 0.637557 | 0 | 143 | 23.503497 | 96 |
Parkyunhwan/BaekJoon | 19,524,921,348,316 | 0f6e4aa765ab25c701382bef01c34b3c3c096659 | c3432a248c8a7a43425c0fe1691557c0936ab380 | /21_04/13/1197_최소스패닝트리.py | 2559f831fbdfe6172a7933360822f395f5faebba | [] | no_license | https://github.com/Parkyunhwan/BaekJoon | 13cb3af1f45212d7c418ecc4b927f42615b14a74 | 9a882c568f991c9fed3df45277f091626fcc2c94 | refs/heads/master | 2022-12-24T21:47:47.052967 | 2022-12-20T16:16:59 | 2022-12-20T16:16:59 | 232,264,447 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | V, E = map(int, input().split())
parent = [x for x in range(V + 1)]
def find_parent(parent, x):
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
def union(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a > b:
parent[a] = b
else:
parent[b] = a
graph = []
result = 0
for _ in range(E):
a, b, cost = map(int, input().split())
graph.append([cost, a, b])
graph.sort()
for cost, a, b in graph:
if find_parent(parent, a) != find_parent(parent, b):
union(parent, a, b)
result += cost
print(result) | UTF-8 | Python | false | false | 627 | py | 425 | 1197_최소스패닝트리.py | 413 | 0.548644 | 0.545455 | 0 | 31 | 19.258065 | 56 |
k6project/scene3d | 19,164,144,076,984 | cef70f8324564892a738a17695a54d7ff1a2d9f5 | af8c31763223f3634ef329ec5725d9f881e4f649 | /python/s3dexport.py | 94e6944a66a7e12dea2d09d4eb938de2c038ad6f | [] | no_license | https://github.com/k6project/scene3d | af2d4236d44bf6b31bfdb82e808b7ed7a29632c5 | f5ac65d8d7004a795879f3e191e9fe8efb9526c9 | refs/heads/master | 2020-03-26T14:42:57.837876 | 2019-03-18T14:17:37 | 2019-03-18T14:17:37 | 145,001,289 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | bl_info = {
"name": "Scene3D binary format",
"category": "Import-Export",
"support": "TESTING"
}
import bpy
import bmesh
import s3dconv
from bpy.props import StringProperty
from bpy_extras.io_utils import ExportHelper
class S3DExport(bpy.types.Operator, ExportHelper):
bl_idname = "export_scene.s3d"
bl_label = "Export for Scene3D"
bl_options = {'PRESET'}
filename_ext = ".s3d"
filter_glob = StringProperty(default="*.s3d", options={"HIDDEN"})
def execute(self, context):
s3dconv.begin(self.filepath)
scene = context.scene
for obj in scene.objects:
if obj.type == "MESH":
mesh = obj.to_mesh(context.scene, True, settings='RENDER', calc_tessface=False)
tmp = bmesh.new()
tmp.from_mesh(mesh)
bmesh.ops.triangulate(tmp, faces=tmp.faces)
tmp.to_mesh(mesh)
tmp.free()
mesh.calc_normals_split()
for face in mesh.polygons:
indices = []
for li in face.loop_indices:
loop = mesh.loops[li]
v = mesh.vertices[loop.vertex_index]
vdata = [ v.co.x, v.co.y, v.co.z ] + [loop.normal.x, loop.normal.y, loop.normal.z]
indices.append(s3dconv.add_vertex(vdata))
s3dconv.add_face(indices)
s3dconv.end()
return {"FINISHED"}
def menu_func_export(self, context):
self.layout.operator("export_scene.s3d", text="Scene3D (.s3d)")
def register():
bpy.utils.register_class(S3DExport)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_class(S3DExport)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
if __name__ == "__main__":
register()
| UTF-8 | Python | false | false | 1,864 | py | 62 | s3dexport.py | 23 | 0.575644 | 0.56706 | 0 | 54 | 33.518519 | 106 |
MichaelT2828/Unit_1 | 16,372,415,370,225 | 01f57d57569d9134bc8c2dbde33e857efeaac3e3 | c70e91646853a5e6732fcb56f5238cd5da840cee | /least common multiple.py | e224d673a4a8ea7f05b82b38af599a71be11239a | [] | no_license | https://github.com/MichaelT2828/Unit_1 | 48bbdcc7b80e53372bbb0ea7350a2c37c87ea564 | d47e06972b4a2b43306be8f1fa85d1af76d1afaa | refs/heads/main | 2023-09-05T22:38:25.696152 | 2021-10-28T05:44:51 | 2021-10-28T05:44:51 | 398,943,742 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def lcm(a, b, c):
#find biggest value
biggest = 0
if a > b and a > c:
biggest = a
elif b > a and b > c:
biggest = b
elif c > a and c > a:
biggest = c
while(True):
if (biggest % a == 0) and (biggest % b == 0) and (biggest % c == 0):
lcm = biggest
break
biggest += 1
return biggest
print(lcm(18, 4, 7))
| UTF-8 | Python | false | false | 395 | py | 14 | least common multiple.py | 5 | 0.455696 | 0.432911 | 0 | 18 | 20.944444 | 76 |
google-research/language | 15,453,292,334,085 | 8d2de083ec705cba9d43eea3398d9d3abcceff5b | ab40571d5051ad53c0f205fa797ba36eac516d06 | /language/bert_extraction/steal_bert_classifier/data_generation/build_aux_membership.py | 7b7c0907a3afc445d961d81595eda52df4c44c9f | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | https://github.com/google-research/language | e941b1a92ab46d40d8d03bb0c314905cb6902ce2 | ac9447064195e06de48cc91ff642f7fffa28ffe8 | refs/heads/master | 2023-08-24T23:10:13.207294 | 2023-05-25T20:47:18 | 2023-05-25T22:29:27 | 153,201,352 | 1,567 | 371 | Apache-2.0 | false | 2023-07-06T23:03:15 | 2018-10-16T00:58:14 | 2023-07-05T08:17:26 | 2023-07-06T23:03:14 | 6,047 | 1,468 | 339 | 85 | Python | false | false | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Construct auxiliary membership classification test sets (RANDOM, SHUFFLE) to check the generalization of the classifier (Section 6.1)."""
import random
import tensorflow.compat.v1 as tf
app = tf.app
flags = tf.flags
gfile = tf.gfile
logging = tf.logging
flags.DEFINE_string("membership_dev_data", None,
"File with original membership classification dev data")
flags.DEFINE_string(
"random_membership_dev_data", None,
"membership classification dev data built from RANDOM scheme")
flags.DEFINE_string("aux_path", None,
"Path to output the auxiliary membership datasets")
FLAGS = flags.FLAGS
def main(_):
with gfile.Open(FLAGS.membership_dev_data, "r") as f:
orig_dev_data = f.read().strip().split("\n")
orig_dev_header = orig_dev_data[0]
orig_dev_data = orig_dev_data[1:]
true_data_membership = []
for point in orig_dev_data:
if point.split("\t")[-1] == "true":
true_data_membership.append(point.split("\t")[1:])
random.shuffle(true_data_membership)
combined_data = []
# shuffle both premise and hypothesis of the original dev data to create
# "fake" examples
for point in true_data_membership:
combined_data.append(point)
premise_tokens = point[0].split()
hypo_tokens = point[1].split()
random.shuffle(premise_tokens)
random.shuffle(hypo_tokens)
fake_point = [" ".join(premise_tokens), " ".join(hypo_tokens), "fake"]
combined_data.append(fake_point)
random.shuffle(combined_data)
final_split = "\n".join(
[orig_dev_header] +
["%d\t%s" % (i, "\t".join(x)) for i, x in enumerate(combined_data)])
gfile.MakeDirs(FLAGS.aux_path + "/shuffle")
with gfile.Open(FLAGS.aux_path + "/shuffle/dev.tsv", "w") as f:
f.write(final_split)
with gfile.Open(FLAGS.random_membership_dev_data, "r") as f:
random_dev_data = f.read().strip().split("\n")
random_dev_data = random_dev_data[1:]
fake_data_membership = []
for point in random_dev_data:
if point.split("\t")[-1] == "fake":
fake_data_membership.append(point.split("\t")[1:])
# combine the "true" examples from the original membership dev set with "fake"
# examples from the RANDOM dev set
combined_data = true_data_membership + fake_data_membership
random.shuffle(combined_data)
final_split = "\n".join(
[orig_dev_header] +
["%d\t%s" % (i, "\t".join(x)) for i, x in enumerate(combined_data)])
gfile.MakeDirs(FLAGS.aux_path + "/random")
with gfile.Open(FLAGS.aux_path + "/random/dev.tsv", "w") as f:
f.write(final_split)
if __name__ == "__main__":
app.run(main)
| UTF-8 | Python | false | false | 3,237 | py | 956 | build_aux_membership.py | 809 | 0.676552 | 0.670065 | 0 | 103 | 30.427184 | 140 |
ekorkut/hackerrank | 7,129,645,743,679 | efce8bf0c1c720df441f68f370240048c36c3e2c | 4ded0f0206710baa717f4e538e60800bc5cea857 | /algorithms/strings/sherlock_and_valid_strings/main.py | f22cff156e6e90a6e8ba6a25336da07a9107a5e4 | [] | no_license | https://github.com/ekorkut/hackerrank | c8f7456bf2f7a0812c60016ca82200380de93a77 | fdcde54a421ebc5c8a007c5f050971fafb727b33 | refs/heads/master | 2018-10-10T18:33:22.900269 | 2018-06-24T14:10:02 | 2018-06-24T14:10:02 | 82,475,484 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/opt/bb/bin/bbpy2.7
input_str = raw_input().strip()
char_to_freq = {}
for c in input_str:
if c in char_to_freq:
char_to_freq[c] += 1
else:
char_to_freq[c] = 1
freqs = [char_to_freq[x] for x in char_to_freq]
distinct_freqs = set(freqs)
print freqs
print distinct_freqs
if len(distinct_freqs) == 1:
print "YES"
elif len(distinct_freqs) > 2:
print "NO"
else:
small_freq = min(distinct_freqs)
small_count = freqs.count(small_freq)
large_freq = max(distinct_freqs)
large_count = freqs.count(large_freq)
if (small_freq == 1) and (small_count == 1):
print "YES"
elif (large_freq-small_freq == 1) and (large_count == 1):
print "YES"
else:
print "NO"
| UTF-8 | Python | false | false | 744 | py | 47 | main.py | 35 | 0.591398 | 0.577957 | 0 | 30 | 23.633333 | 61 |
cal-poly-dxhub/familycaresurveytool | 17,738,214,945,505 | 96a381909fae26be5f25c4ac76dec449009d5dca | e8442d6fe7b77880f532e882c9d80b324683e3fc | /lambda/PostResult/db_config.py | 6d5534b224192e4e002d2ea7252d1d0731acbf69 | [
"Apache-2.0"
] | permissive | https://github.com/cal-poly-dxhub/familycaresurveytool | 12b86ccbc6074af33e360df7f80338572f503cf3 | 2adac2af91abdc3d6bd7dc5b85e1801ca4071687 | refs/heads/master | 2023-01-10T19:31:00.452654 | 2020-08-03T21:53:05 | 2020-08-03T21:53:05 | 240,388,053 | 0 | 0 | null | false | 2023-01-06T02:32:59 | 2020-02-13T23:25:03 | 2020-08-03T21:53:08 | 2023-01-06T02:32:58 | 6,891 | 0 | 0 | 21 | Python | false | false | RDS_HOST=''
RDS_USER=''
RDS_PW=''
RDS_DB=''
| UTF-8 | Python | false | false | 44 | py | 16 | db_config.py | 12 | 0.545455 | 0.545455 | 0 | 4 | 10 | 11 |
mylekiller/NLPProject | 13,237,089,231,672 | 370ff87b8a55f7b1c3557ef90be199c3451e8bdd | d3194b506bde8b33a331e99348fd8c488b0cbe66 | /align.py | bedd7bad3226e9d7422eebda78fe64f8336123e7 | [] | no_license | https://github.com/mylekiller/NLPProject | a014aec336ed852036dccd703a739aef5b39b812 | 226a8cbeab9bbf3cdcbe26c19f15c325386b4288 | refs/heads/master | 2020-04-11T10:05:57.322113 | 2018-12-14T08:26:30 | 2018-12-14T08:26:30 | 161,703,223 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy
import sys
# -*- coding: utf-8 -*-
params = dict()
ewordtypes = set()
fwordtypes = set()
counts = dict()
outputfile = 'align.out'
def EM(trainfile):
log_likelihood = 0
counts = dict()
#E
with open(trainfile) as file:
for line in file:
e = line.strip().split('\t')[1]
f = line.strip().split('\t')[0]
ewords = e.strip().split(' ')
fwords = f.strip().split(' ')
log_product = 1
for j in range(len(fwords)):
summation = 0
for i in range(len(ewords) + 1):
eword = ''
fword = fwords[j]
if i == 0:
eword = 'NULL'
else:
eword = ewords[i - 1]
summation += params[eword][fword]
log_product *= (1.0/(len(ewords) + 1))*(summation)
log_product *= (1./100.)
log_likelihood += numpy.log(log_product)
for j in range(len(fwords)):
total = 0.0
for word in ewords:
total += params[word][fwords[j]]
for i in range(len(ewords) + 1):
eword = ''
fword = fwords[j]
if i == 0:
eword = 'NULL'
else:
eword = ewords[i - 1]
if fword not in counts:
counts[fword] = dict()
if eword not in counts[fword]:
counts[fword][eword] = params[eword][fword] / total
else:
counts[fword][eword] += params[eword][fword] / total
for e in ewordtypes:
total = 0
for fp in params[e]:
total += counts[fp][e]
for f in params[e]:
params[e][f] = counts[f][e]/total
return log_likelihood
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) != 1:
print("Wrong number of arguments. Expected: 1")
exit(1)
trainfile = args[0]
#read in file
with open(trainfile) as file:
for line in file:
e = line.strip().split('\t')[1]
f = line.strip().split('\t')[0]
ewords = e.strip().split(' ')
ewords.append('NULL')
for eword in ewords:
for fword in f.strip().split(' '):
ewordtypes.add(eword)
fwordtypes.add(fword)
if eword not in params:
params[eword] = dict()
if fword not in params[eword]:
params[eword][fword] = 0.0
#initialize to uniform
for e in params:
total = 0.0
for f in params[e]:
total += 1.0
for f in params[e]:
params[e][f] = 1.0/total
print("Training model")
for iteration in range(10):
print("Starting iteration: {}".format(iteration))
log_likelihood = EM(trainfile)
print("The log_likelihood was: {}".format(log_likelihood))
print("Testing model from train.zh-en and writing to align.out")
with open(outputfile, 'w') as outfile, open(trainfile) as infile:
for line in infile:
e = line.strip().split('\t')[1]
f = line.strip().split('\t')[0]
ewords = e.strip().split(' ')
ewords.append('NULL')
fwords = f.strip().split(' ')
for j in range(len(fwords)):
best = None
for i in range(len(ewords)):
if not best or params[ewords[i]][fwords[j]] > best[2]:
best = (j, i, params[ewords[i]][fwords[j]])
if not best[1] == len(ewords) - 1:
outfile.write('{}-{} '.format(best[0],best[1]))
else:
outfile.write('{}-{} '.format(best[0], '_' ))
outfile.write('\n')
| UTF-8 | Python | false | false | 3,062 | py | 11 | align.py | 7 | 0.583932 | 0.568583 | 0 | 123 | 23.886179 | 66 |
Sudarsan-Sridharan/shift-python | 360,777,299,039 | 4b1c2002561ba3dc82e50bb2c101fc56275d3960 | 08727db800af02da3d7b0f3ec6921b0670bed898 | /demo/demo.py | 221cd64c062285dc3387a425509e4d98c1f8f8af | [] | no_license | https://github.com/Sudarsan-Sridharan/shift-python | bc65510656a9f03d960f98d158ad5a93b5248dc5 | 0eadb1d2b6c1dd108f2387355d1af7405bc057ff | refs/heads/master | 2020-06-03T08:10:18.376394 | 2019-06-08T03:40:42 | 2019-06-08T03:40:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import shift
import sys
import time
def demo01(trader):
"""
This method submits a limit buy order by indicating order type, symbol, size, and limit price.
:param trader:
:return:
"""
limit_buy = shift.Order(shift.Order.LIMIT_BUY, "AAPL", 1, 10.00)
trader.submitOrder(limit_buy)
return
def demo02(trader):
"""
This method submits 2 limit buy orders by indicating order type, symbol, size, and limit price.
:param trader:
:return:
"""
aapl_limit_buy = shift.Order(shift.Order.LIMIT_BUY, "AAPL", 10, 10.00)
trader.submitOrder(aapl_limit_buy)
xom_limit_buy = shift.Order(shift.Order.LIMIT_BUY, "XOM", 10, 10.00)
trader.submitOrder(xom_limit_buy)
return
def demo03(trader):
"""
This method prints the local bid order book for corresponding symbols.
:param trader:
:return:
"""
print("AAPL:")
print(" Price\t\tSize\t Dest\t\tTime")
for order in trader.getOrderBook("AAPL", shift.OrderBookType.LOCAL_BID):
print("%7.2f\t\t%4d\t%6s\t\t%19s" %
(order.price, order.size, order.destination, order.time))
print()
print("XOM:")
print(" Price\t\tSize\t Dest\t\tTime")
for order in trader.getOrderBook("XOM", shift.OrderBookType.LOCAL_BID):
print("%7.2f\t\t%4d\t%6s\t\t%19s" %
(order.price, order.size, order.destination, order.time))
def demo04(trader):
"""
This method prints all current waiting orders information.
:param trader:
:return:
"""
print("Symbol\t\t\t\tType\t Price\t\tSize\tExecuted\tID\t\t\t\t\t\t\t\t\t\t\t\t\t\t Status\t\tTimestamp")
for order in trader.getWaitingList():
print("%6s\t%16s\t%7.2f\t\t%4d\t\t%4d\t%36s\t%23s\t\t%26s" %
(order.symbol, order.type, order.price, order.size,
order.executed_size, order.id, order.status, order.timestamp))
return
def demo05(trader):
"""
This method cancels all the orders in the waiting list.
:param trader:
:return:
"""
print("Symbol\t\t\t\tType\t Price\t\tSize\tExecuted\tID\t\t\t\t\t\t\t\t\t\t\t\t\t\t Status\t\tTimestamp")
for order in trader.getWaitingList():
print("%6s\t%16s\t%7.2f\t\t%4d\t\t%4d\t%36s\t%23s\t\t%26s" %
(order.symbol, order.type, order.price, order.size,
order.executed_size, order.id, order.status, order.timestamp))
print()
print("Waiting list size: " + str(trader.getWaitingListSize()))
print("Canceling all pending orders...", end=" ")
# trader.cancelAllPendingOrders() also works
for order in trader.getWaitingList():
trader.submitCancellation(order)
i = 0
while trader.getWaitingListSize() > 0:
i += 1
print(i, end=" ")
time.sleep(1)
print()
print("Waiting list size: " + str(trader.getWaitingListSize()))
return
def demo06(trader):
"""
This method shows how to submit market buy orders.
:param trader:
:return:
"""
aapl_market_buy = shift.Order(shift.Order.MARKET_BUY, "AAPL", 1)
trader.submitOrder(aapl_market_buy)
xom_market_buy = shift.Order(shift.Order.MARKET_BUY, "XOM", 1)
trader.submitOrder(xom_market_buy)
return
def demo07(trader):
"""
This method provides information on the structure of PortfolioSummary and PortfolioItem objects:
getPortfolioSummary() returns a PortfolioSummary object with the following data:
1. Total Buying Power (totalBP)
2. Total Shares (totalShares)
3. Total Realized Profit/Loss (totalRealizedPL)
4. Timestamp of Last Update (timestamp)
getPortfolioItems() returns a dictionary with "symbol" as keys and PortfolioItem as values,
with each providing the following information:
1. Symbol (getSymbol())
2. Shares (getShares())
3. Price (getPrice())
4. Realized Profit/Loss (getRealizedPL())
5. Timestamp of Last Update (getTimestamp())
:param trader:
:return:
"""
print("Buying Power\tTotal Shares\tTotal P&L\tTimestamp")
print("%12.2f\t%12d\t%9.2f\t%26s" % (trader.getPortfolioSummary().getTotalBP(),
trader.getPortfolioSummary().getTotalShares(),
trader.getPortfolioSummary().getTotalRealizedPL(),
trader.getPortfolioSummary().getTimestamp()))
print()
print("Symbol\t\tShares\t\tPrice\t\t P&L\tTimestamp")
for item in trader.getPortfolioItems().values():
print("%6s\t\t%6d\t%9.2f\t%9.2f\t%26s" %
(item.getSymbol(), item.getShares(), item.getPrice(), item.getRealizedPL(), item.getTimestamp()))
return
def demo08(trader):
"""
This method shows how to submit market sell orders.
:param trader:
:return:
"""
aapl_market_sell = shift.Order(shift.Order.MARKET_SELL, "AAPL", 1)
trader.submitOrder(aapl_market_sell)
xom_market_sell = shift.Order(shift.Order.MARKET_SELL, "XOM", 1)
trader.submitOrder(xom_market_sell)
return
def demo09(trader):
"""
This method prints all submitted orders information.
:param trader:
:return:
"""
print("Symbol\t\t\t\tType\t Price\t\tSize\tExecuted\tID\t\t\t\t\t\t\t\t\t\t\t\t\t\t Status\t\tTimestamp")
for order in trader.getSubmittedOrders():
if order.executed_size == order.size:
price = order.executed_price
else:
price = order.price
print("%6s\t%16s\t%7.2f\t\t%4d\t\t%4d\t%36s\t%23s\t\t%26s" %
(order.symbol, order.type, price, order.size,
order.executed_size, order.id, order.status, order.timestamp))
return
def demo10(trader):
"""
This method prints the global bid order book for a corresponding symbol and type.
:param trader:
:return:
"""
print(" Price\t\tSize\t Dest\t\tTime")
for order in trader.getOrderBook("AAPL", shift.OrderBookType.GLOBAL_BID, 5):
print("%7.2f\t\t%4d\t%6s\t\t%19s" %
(order.price, order.size, order.destination, order.time))
def main(argv):
# create trader object
trader = shift.Trader("democlient")
# connect and subscribe to all available order books
try:
trader.connect("initiator.cfg", "password")
trader.subAllOrderBook()
except shift.IncorrectPassword as e:
print(e)
except shift.ConnectionTimeout as e:
print(e)
# demo01(trader)
# demo02(trader)
# demo03(trader)
# demo04(trader)
# demo05(trader)
# demo06(trader)
# demo07(trader)
# demo08(trader)
# demo09(trader)
# demo10(trader)
# disconnect
trader.disconnect()
return
if __name__ == "__main__":
main(sys.argv)
| UTF-8 | Python | false | false | 6,781 | py | 17 | demo.py | 11 | 0.624982 | 0.602861 | 0 | 241 | 27.136929 | 111 |
zcZhangCheng/3d_vision | 7,318,624,307,881 | fcf35fadafbe104cbe6e5b22f13538c3360d8b40 | 13c587886fc2b8398ed67f96fd6cf3c668e55410 | /visual_map/output_re.py | fa25442a19fd1078e8630c2b1cbd3e9ef19919c3 | [] | no_license | https://github.com/zcZhangCheng/3d_vision | 99288f177e75b7259ec710cf4cf9d469c6657904 | 3555c4d8ddfec6574f1d94bc81e3a443dcee67d1 | refs/heads/master | 2023-03-17T16:06:25.909697 | 2019-07-26T09:43:31 | 2019-07-26T09:43:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
d = sys.argv[1]
out_file=os.path.join(d, 'final_reuslt.txt')
with open(out_file,'w') as fw:
fw.seek(0,0)
file_list = os.listdir(d)
for folder in file_list:
full_addr = os.path.join(d, folder)
if os.path.isdir(full_addr):
if ".bag" in folder:
map_name=folder
file_list1 = os.listdir(full_addr)
for folder1 in file_list1:
full_addr1 = os.path.join(full_addr, folder1)
if os.path.isdir(full_addr1):
if ".bag" in folder1:
loc_name=folder1
print(full_addr1)
if not os.path.isfile(os.path.join(full_addr1, 'raw_match.txt')):
fw.write(map_name+' + '+loc_name+' : '+'failed!!!\n')
continue
os.system('python ./assessLoc.py '+full_addr1+' ./assessConfig.yaml')
with open(os.path.join(full_addr1, 'scale_match.txt'),'r') as fr:
fr.seek(0,0)
re_str = fr.readline()
fw.write(map_name+' + '+loc_name+' : '+re_str+'\n')
| UTF-8 | Python | false | false | 1,318 | py | 143 | output_re.py | 96 | 0.430956 | 0.418058 | 0 | 27 | 46.407407 | 97 |
BomberDim/Python-practice | 16,106,127,401,228 | 66423a5ebd3a8ea657ff70efe986de3159bde8ec | f2348887dbffbd2124376b97c942939e78093e8f | /examples№1/example1.py | 95b7606a0521983aea855b0e92db38fcbd728966 | [] | no_license | https://github.com/BomberDim/Python-practice | f2d0c9051b67a0269b04861c205d268611ed259b | b55baabaed088449dc70cda9f09e706f3614fbf3 | refs/heads/master | 2020-05-19T09:57:42.117307 | 2015-12-13T16:17:06 | 2015-12-13T16:17:06 | 42,327,071 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from math import cos, pi
x = 1.426
y = -1.220
z = 3.5
a = 2 * cos(x - pi/6)/0.5 + ((1 - cos(2*y))/2)
b = 1 + (2 / (3 + (z**2/5)))
print("a =", a)
print("b =", b)
input()
| UTF-8 | Python | false | false | 224 | py | 84 | example1.py | 84 | 0.455357 | 0.348214 | 0 | 18 | 11.444444 | 46 |
itsolutionscorp/AutoStyle-Clustering | 2,989,297,251,869 | f1bb4d1f9b770ca29b16988737155debc2777e26 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/difference-of-squares/d5d1107acb684775a1fb8d66887d2658.py | 601132579eb0f44eeb770164dbdfbd1e2736bae4 | [] | no_license | https://github.com/itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | true | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | 2016-05-19T22:14:37 | 2016-05-19T22:35:40 | 133,854 | 0 | 0 | 0 | null | null | null | def square_of_sum(n):
return (sum(range(1, n + 1)) ** 2)
def sum_of_squares(n):
return (sum([i ** 2 for i in range(1, n + 1)]))
def difference(n):
return (square_of_sum(n) - sum_of_squares(n))
| UTF-8 | Python | false | false | 209 | py | 54,209 | d5d1107acb684775a1fb8d66887d2658.py | 21,653 | 0.569378 | 0.54067 | 0 | 10 | 19.9 | 51 |
korJAEYOUNGYUN/django-youtube_clone | 4,896,262,734,569 | e98eaea03db7d6145d0bead55c4c9b35d23054b4 | b73762c15cf6b8a835400fbd2a8aeaef35ad50ab | /django_youtube_clone/apps/video/urls.py | 80c29861f912b3ef7d3bdfdb9fa1bc9e86090be0 | [] | no_license | https://github.com/korJAEYOUNGYUN/django-youtube_clone | b1c7df1656e2effa1d6ef0a8f38407a962e1d646 | 0e13a81286cceb578372ecb751accdeab395e9d7 | refs/heads/master | 2022-11-27T21:45:24.257145 | 2020-06-10T03:35:08 | 2020-06-10T03:35:08 | 242,132,236 | 1 | 0 | null | false | 2022-11-22T05:24:05 | 2020-02-21T12:17:01 | 2020-11-27T14:37:13 | 2022-11-22T05:24:01 | 74 | 1 | 0 | 2 | Python | false | false | from django.urls import path
from django_youtube_clone.apps.video import views
urlpatterns = [
path('', views.Home.as_view(), name='home'),
path('videos/upload/', views.Upload.as_view(), name='upload'),
path('search/', views.Search.as_view(), name='search'),
path('videos/<int:id>/edit', views.EditVideo.as_view(), name='edit_video'),
path('videos/<int:pk>/', views.VideoDetail.as_view(), name='video_detail'),
path('videos/<int:pk>/delete', views.DeleteVideo.as_view(), name='delete_video'),
]
| UTF-8 | Python | false | false | 521 | py | 21 | urls.py | 11 | 0.664107 | 0.664107 | 0 | 12 | 42.416667 | 85 |
taichi-dev/taichi-nerfs | 10,161,892,625,963 | 261618bd915febe62817ce73c06786f3fb7d0fd3 | 59a3d1c5739d27df28a7904ec32f908575427641 | /modules/utils.py | d7bac1579736d9e69b018f6ba07eecf54242ac90 | [
"Apache-2.0"
] | permissive | https://github.com/taichi-dev/taichi-nerfs | 8d91e66b29aa9f0712e177c38ee6f257c4288e77 | 50bac170584628df6a7017b77d76e780296f0e6c | refs/heads/main | 2023-07-10T07:01:29.423283 | 2023-05-29T05:53:38 | 2023-05-29T05:53:38 | 610,072,946 | 548 | 37 | Apache-2.0 | false | 2023-05-29T05:53:40 | 2023-03-06T02:58:21 | 2023-05-29T05:43:43 | 2023-05-29T05:53:39 | 157,331 | 459 | 32 | 8 | Python | false | false | import os
import cv2
import torch
import numpy as np
import taichi as ti
from taichi.math import uvec3
data_type = ti.f32
torch_type = torch.float32
MAX_SAMPLES = 1024
NEAR_DISTANCE = 0.01
SQRT3 = 1.7320508075688772
SQRT3_MAX_SAMPLES = SQRT3 / 1024
SQRT3_2 = 1.7320508075688772 * 2
def res_in_level_np(
level_i,
base_res,
log_per_level_scale
):
result = np.ceil(
float(base_res) * np.exp(
float(level_i) * log_per_level_scale
) - 1.0
)
return float(result + 1)
def scale_in_level_np(
base_res,
max_res,
levels,
):
result = np.log(
float(max_res) / float(base_res)
) / float(levels - 1)
return result
def align_to(x, y):
return int((x+y-1)/y)*y
@ti.kernel
def random_initialize(data: ti.types.ndarray()):
for I in ti.grouped(data):
data[I] = (ti.random() * 2.0 - 1.0) * 1e-4
@ti.func
def scalbn(x, exponent):
return x * ti.math.pow(2, exponent)
@ti.func
def calc_dt(t, exp_step_factor, grid_size, scale):
return ti.math.clamp(t * exp_step_factor, SQRT3_MAX_SAMPLES,
SQRT3_2 * scale / grid_size)
@ti.func
def frexp_bit(x):
exponent = 0
if x != 0.0:
# frac = ti.abs(x)
bits = ti.bit_cast(x, ti.u32)
exponent = ti.i32((bits & ti.u32(0x7f800000)) >> 23) - 127
# exponent = (ti.i32(bits & ti.u32(0x7f800000)) >> 23) - 127
bits &= ti.u32(0x7fffff)
bits |= ti.u32(0x3f800000)
frac = ti.bit_cast(bits, ti.f32)
if frac < 0.5:
exponent -= 1
elif frac > 1.0:
exponent += 1
return exponent
@ti.func
def mip_from_pos(xyz, cascades):
mx = ti.abs(xyz).max()
# _, exponent = _frexp(mx)
exponent = frexp_bit(ti.f32(mx)) + 1
# frac, exponent = ti.frexp(ti.f32(mx))
return ti.min(cascades - 1, ti.max(0, exponent))
@ti.func
def mip_from_dt(dt, grid_size, cascades):
# _, exponent = _frexp(dt*grid_size)
exponent = frexp_bit(ti.f32(dt * grid_size))
# frac, exponent = ti.frexp(ti.f32(dt*grid_size))
return ti.min(cascades - 1, ti.max(0, exponent))
@ti.func
def __expand_bits(v):
v = (v * ti.uint32(0x00010001)) & ti.uint32(0xFF0000FF)
v = (v * ti.uint32(0x00000101)) & ti.uint32(0x0F00F00F)
v = (v * ti.uint32(0x00000011)) & ti.uint32(0xC30C30C3)
v = (v * ti.uint32(0x00000005)) & ti.uint32(0x49249249)
return v
@ti.func
def __morton3D(xyz):
xyz = __expand_bits(xyz)
return xyz[0] | (xyz[1] << 1) | (xyz[2] << 2)
@ti.func
def __morton3D_invert(x):
x = x & (0x49249249)
x = (x | (x >> 2)) & ti.uint32(0xc30c30c3)
x = (x | (x >> 4)) & ti.uint32(0x0f00f00f)
x = (x | (x >> 8)) & ti.uint32(0xff0000ff)
x = (x | (x >> 16)) & ti.uint32(0x0000ffff)
return ti.int32(x)
@ti.kernel
def morton3D_invert_kernel(indices: ti.types.ndarray(ndim=1),
coords: ti.types.ndarray(ndim=2)):
for i in indices:
ind = ti.uint32(indices[i])
coords[i, 0] = __morton3D_invert(ind >> 0)
coords[i, 1] = __morton3D_invert(ind >> 1)
coords[i, 2] = __morton3D_invert(ind >> 2)
def morton3D_invert(indices):
coords = torch.zeros(indices.size(0),
3,
device=indices.device,
dtype=torch.int32)
morton3D_invert_kernel(indices.contiguous(), coords)
ti.sync()
return coords
@ti.kernel
def morton3D_kernel(xyzs: ti.types.ndarray(ndim=2),
indices: ti.types.ndarray(ndim=1)):
for s in indices:
xyz = uvec3([xyzs[s, 0], xyzs[s, 1], xyzs[s, 2]])
indices[s] = ti.cast(__morton3D(xyz), ti.int32)
def morton3D(coords1):
indices = torch.zeros(coords1.size(0),
device=coords1.device,
dtype=torch.int32)
morton3D_kernel(coords1.contiguous(), indices)
ti.sync()
return indices
@ti.kernel
def packbits(density_grid: ti.types.ndarray(ndim=1),
density_threshold: float,
density_bitfield: ti.types.ndarray(ndim=1)):
for n in density_bitfield:
bits = ti.uint8(0)
for i in ti.static(range(8)):
bits |= (ti.uint8(1) << i) if (
density_grid[8 * n + i] > density_threshold) else ti.uint8(0)
density_bitfield[n] = bits
@ti.kernel
def torch2ti(field: ti.template(), data: ti.types.ndarray()):
for I in ti.grouped(data):
field[I] = data[I]
@ti.kernel
def ti2torch(field: ti.template(), data: ti.types.ndarray()):
for I in ti.grouped(data):
data[I] = field[I]
@ti.kernel
def ti2torch_grad(field: ti.template(), grad: ti.types.ndarray()):
for I in ti.grouped(grad):
grad[I] = field.grad[I]
@ti.kernel
def torch2ti_grad(field: ti.template(), grad: ti.types.ndarray()):
for I in ti.grouped(grad):
field.grad[I] = grad[I]
@ti.kernel
def torch2ti_vec(field: ti.template(), data: ti.types.ndarray()):
for I in range(data.shape[0] // 2):
field[I] = ti.Vector([data[I * 2], data[I * 2 + 1]])
@ti.kernel
def ti2torch_vec(field: ti.template(), data: ti.types.ndarray()):
for i, j in ti.ndrange(data.shape[0], data.shape[1] // 2):
data[i, j * 2] = field[i, j][0]
data[i, j * 2 + 1] = field[i, j][1]
@ti.kernel
def ti2torch_grad_vec(field: ti.template(), grad: ti.types.ndarray()):
for I in range(grad.shape[0] // 2):
grad[I * 2] = field.grad[I][0]
grad[I * 2 + 1] = field.grad[I][1]
@ti.kernel
def torch2ti_grad_vec(field: ti.template(), grad: ti.types.ndarray()):
for i, j in ti.ndrange(grad.shape[0], grad.shape[1] // 2):
field.grad[i, j][0] = grad[i, j * 2]
field.grad[i, j][1] = grad[i, j * 2 + 1]
def depth2img(depth):
depth = (depth - depth.min()) / (depth.max() - depth.min())
depth_img = cv2.applyColorMap((depth * 255).astype(np.uint8),
cv2.COLORMAP_TURBO)
return depth_img
def save_deployment_model(model, dataset, save_dir):
padding = torch.zeros(13, 16)
rgb_out = model.rgb_net.output_layer.weight.detach().cpu()
rgb_out = torch.cat([rgb_out, padding], dim=0)
new_dict = {
'poses': dataset.poses.cpu().numpy(),
'model.density_bitfield': model.density_bitfield.cpu().numpy(),
'model.hash_encoder.params': model.pos_encoder.hash_table.detach().cpu().numpy(),
'model.per_level_scale': model.pos_encoder.log_b,
'model.xyz_encoder.params':
torch.cat(
[model.xyz_encoder.hidden_layers[0].weight.detach().cpu().reshape(-1),
model.xyz_encoder.output_layer.weight.detach().cpu().reshape(-1)]
).numpy(),
'model.rgb_net.params':
torch.cat(
[model.rgb_net.hidden_layers[0].weight.detach().cpu().reshape(-1),
rgb_out.reshape(-1)]
).numpy(),
}
np.save(
os.path.join(f'{save_dir}', 'deployment.npy'),
new_dict
)
| UTF-8 | Python | false | false | 7,049 | py | 59 | utils.py | 41 | 0.567598 | 0.51369 | 0 | 253 | 26.86166 | 89 |
thibaudlemaire/autolights | 5,557,687,681,384 | 3471e5b1fe827c271918afcc336df139d0806330 | 7cf7592bca7b20627ec4c23d0d43c748367fa74f | /soft/sys_expert/energy_detection.py | 01ea956c4163abeba9d608dfccf11eb077f4d1b0 | [] | no_license | https://github.com/thibaudlemaire/autolights | 4b64d5a2ef1885c21b8a2d2e0e145dd7b067dd97 | 3b6d331efdec8b08c12c6503870704e8fe4addd7 | refs/heads/master | 2021-06-18T09:08:37.924753 | 2017-06-29T09:14:11 | 2017-06-29T09:14:11 | 94,530,591 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@author: thibaud
"""
import logging
import librosa
import math
import time
import numpy as np
from threading import Thread
from .bibliotheque import energie
# Constants
BUFFER_SIZE = 10 # Number of frames to store in the buffer (10 -> 0,25s)
SAMPLE_PER_FRAME = 1024 # See audio module
SAMPLE_RATE = 44100 # See audio module
ENERGY_SILENCE_THRESHOLD = 10 # Absolute RMS Energy threshold under which sound is concidered as silence
ENERGY_CHANGE_THRESHOLD = 5 # Delta
BASS_THRESHOLD = 2 # Relative to mean
SWEEP_THRESHOLD = 1.5 # Relative to mean
BREAK_THRESHOLD = 2.2 # Relative
INTER_STATES_TIME = 3 # Time beteween states in state machine
MEAN_NUMBER = 30 # Number of value in slincing means
# States
_STATE_WAITING = 0
_STATE_SWEEP = 1
_STATE_BREAK = 2
_STATE_DROP = 3
# This class provide a thread for the SE module
class EnergyDetector(Thread):
def __init__(self, audio_frames, manager):
Thread.__init__(self)
self.terminated = False # Stop flag
self.audio_frames = audio_frames # Contain 5ms frames
self.last_energy = 0 # Energy register
self.last_bass_energy = 0
self.last_high_energy = 0
self.bass_mean = 30 # Means
self.high_mean = 15
self.counter = 0 # State counter
self.frames = None # Frames buffer
self.manager = manager # Pointer to manager
self.state = 0 # State machine : 0 waiting for sweep, 1 waiting for silence, 2 waiting for bass
self.state_timestamp = 0 # Time since last state change
# Thread processing BPM Detection
def run(self):
logging.info("Starting RMS Energy detector")
# This loop condition have to be checked frequently, so the code inside may not be blocking
while not self.terminated:
if time.time() - self.state_timestamp > INTER_STATES_TIME: self.state = _STATE_WAITING
new_frame = self.audio_frames.get() # Get new frame (blocking)
if self.counter == 0:
self.frames = new_frame
self.counter += 1
elif self.counter >= BUFFER_SIZE:
self.frames = np.append(self.frames, new_frame)
# Global Energy
energy_raw = librosa.feature.rmse(y=self.frames) # RMS Energy calculation on full spectrum
new_energy = np.mean(energy_raw) # Mean energy
if math.isnan(new_energy):
logging.warning("Volume trop fort !")
else:
new_energy = int(new_energy) # Round energy
if np.abs(self.last_energy - new_energy) > ENERGY_CHANGE_THRESHOLD: # Detect a change
self.last_energy = new_energy
self.manager.new_energy(new_energy)
if new_energy < ENERGY_SILENCE_THRESHOLD: # Detect a silence
self.manager.silence()
# High frequency energy
new_high_energy = np.mean(energie.high_freq_energie(self.frames, SAMPLE_RATE)) # RMS Energy on high freq
if math.isnan(new_high_energy):
logging.warning("Volume trop fort !")
else:
new_high_energy = int(new_high_energy)
self.high_mean = (self.high_mean * MEAN_NUMBER + new_high_energy) / (1 + MEAN_NUMBER) # Slicing mean calculation
if np.abs(self.last_high_energy - new_high_energy) > ENERGY_CHANGE_THRESHOLD: # Detect high energy change
self.last_high_energy = new_high_energy
self.manager.new_high_energy(new_high_energy)
if new_high_energy > self.high_mean * SWEEP_THRESHOLD: # Detect a sweep (high energy on high freq)
self.manager.sweep()
if self.state == _STATE_SWEEP: # Change machine state
self.state_timestamp = time.time()
if self.state == _STATE_WAITING:
self.state_timestamp = time.time()
self.state = _STATE_SWEEP
# Bass frequency energy
new_bass_energy = np.mean(energie.low_freq_energie(self.frames, SAMPLE_RATE)) # RMS Energy on low freq
if math.isnan(new_bass_energy):
logging.warning("Volume trop fort !")
else:
new_bass_energy = int(new_bass_energy)
self.bass_mean = (self.bass_mean * MEAN_NUMBER + new_bass_energy) / (1 + MEAN_NUMBER) # Slicing mean calculation
if np.abs(self.last_bass_energy - new_bass_energy) > ENERGY_CHANGE_THRESHOLD: # Detect low energy change
self.last_bass_energy = new_bass_energy
self.manager.new_bass_energy(new_bass_energy)
if new_bass_energy > self.bass_mean * BASS_THRESHOLD: # Detect high bass
self.manager.bass()
if self.state == _STATE_BREAK: # Change machine state
self.state_timestamp = time.time()
self.state = _STATE_DROP
self.manager.drop()
if new_bass_energy < self.bass_mean / BREAK_THRESHOLD: # Detect break (low energy on low freq)
self.manager.bass_break()
if self.state == _STATE_BREAK: # Change machine state
self.state_timestamp = time.time()
if self.state == _STATE_SWEEP:
self.state_timestamp = time.time()
self.state = _STATE_BREAK
self.counter = 0
else:
self.frames = np.append(self.frames, new_frame)
self.counter += 1
# Method called to stop the thread
def stop(self):
self.terminated = True
self.audio_frames.put(np.empty(SAMPLE_PER_FRAME, dtype=np.int16)) # Release blocking getter
| UTF-8 | Python | false | false | 6,534 | py | 49 | energy_detection.py | 37 | 0.530762 | 0.522345 | 0 | 122 | 52.557377 | 135 |
whccx/ccxshop | 8,134,668,079,717 | 72a266367d2a2ea46a0de3952249b00bc9120f70 | 28ffaedb0d91e8c8316f958e002132843f41dcfd | /apps/shop/urls.py | ffec2281a9ad5dc45e2d926bddee110f6dfd1a5b | [] | no_license | https://github.com/whccx/ccxshop | 31d58cc5098f61318486a57cb927552be1074949 | 9d7ff9b49c975002dc46497df30471ba1585c962 | refs/heads/master | 2022-12-20T05:52:53.477857 | 2018-09-21T08:47:32 | 2018-09-21T08:47:32 | 147,655,448 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from shop import views #导入py文件
app_name = 'shop'
urlpatterns = [
url(r'^$', views.shop),#默认shop页面
] | UTF-8 | Python | false | false | 216 | py | 23 | urls.py | 18 | 0.68 | 0.675 | 0 | 11 | 17.272727 | 39 |
bh2smith/advent | 9,715,216,029,971 | cd54221ece46ac6693dda0eb957f08d8f0ee9b4f | da43f29a091ee81e93c9a91fb94ebae76ddcfa73 | /2015/day08/day08.py | e4790143fd2d509b7af724a9e1aad938837b1dba | [] | no_license | https://github.com/bh2smith/advent | c243e77992af5ea88fd77df9313afd6a90e87843 | 6b94bdd97ad4d15d9f68048f65f6db8a3173e841 | refs/heads/master | 2020-04-11T19:03:36.972230 | 2019-01-01T16:32:50 | 2019-01-01T16:32:50 | 162,020,640 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import codecs
if __name__ == '__main__':
strings = list(map(lambda t: t.strip(), open('input').readlines()))
l, r, q = 0, 0, 0
for s in strings:
l += len(s)
t = codecs.getdecoder("unicode_escape")(s)[0]
r += len(t) - 2
m = s.encode("UTF-8")
q += len(str(m)) + s.count('"') - 1
print("part 1:", l - r)
print("part 2:", q - l)
| UTF-8 | Python | false | false | 387 | py | 36 | day08.py | 35 | 0.465116 | 0.44186 | 0 | 14 | 26.642857 | 71 |
mittgaurav/Pietone | 19,567,871,030,748 | 57c8b6f7955530afaf0bce4c7741893b4a8cd0fe | 41862e79ab6eb99ea2dee9b5f9258a2102d29b18 | /longest_palin_subsequence.py | f08c939f25f46c453ad80f8cbd2c146a3901ced1 | [] | no_license | https://github.com/mittgaurav/Pietone | 688427226a9ed3287b5ea4136042d4d475c83ebc | 859a2d4d80e11e2b4474580a423626a0e08ac245 | refs/heads/master | 2021-08-16T08:29:42.083499 | 2021-06-17T20:13:19 | 2021-06-17T20:13:19 | 156,988,154 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 21 00:54:52 2019
@author: gaurav
"""
def longest_palin_seq(arr) -> 'int':
"""longest palindromic
subsequence"""
if not arr:
return 0
if len(arr) == 1:
return 1
if arr[0] == arr[-1]:
# The two extremes match,
# try after removing them
return 2 + longest_palin_seq(arr[1:-1])
# see by removing either first or last
return max(longest_palin_seq(arr[1:]),
longest_palin_seq(arr[:-1]))
def longest_palin_dp(arr, i, j) -> 'int':
"""memoization: matrix of
start and end indices"""
if i < 0 or j >= len(arr) or i > j:
return 0
if matrix[i][j] != -1: # already set
return matrix[i][j]
if i == j:
matrix[i][j] = 1
elif arr[i] == arr[j]:
matrix[i][j] = 2 + longest_palin_dp(arr, i+1, j-1)
else:
matrix[i][j] = max(longest_palin_dp(arr, i+1, j),
longest_palin_dp(arr, i, j-1))
return matrix[i][j]
print(longest_palin_seq.__name__)
for A in ["abdbca", "cddpd"]:
matrix = [[-1 for _ in A] for _ in A]
print(A, longest_palin_seq(A), longest_palin_dp(A, 0, len(A)-1))
print("--------------------")
def l_p_string(arr):
"""contiguous string
that's a palindrome"""
### NOT CORRECT
### Need MANACHER
if not arr:
return (0, True)
if len(arr) == 1:
return (1, True)
if len(arr) == 2:
return (2, True) if arr[0] == arr[1] else (0, False)
# chars don't match,
# so check internal.
# Is not continuous
if arr[0] != arr[-1]:
return((max(l_p_string(arr[:-1])[0],
l_p_string(arr[1:])[0]), False))
val, club = l_p_string(arr[1:-1])
if club: # inside is continuous
# Club with the outer match
val += 2
# now, it may happen that sub-str
# has longer match than continuos
without = max(l_p_string(arr[1:]), l_p_string(arr[:-1]))
if without[0] > val:
return without
return (val, club)
print(l_p_string.__name__)
print("abab", l_p_string("abab"))
print("babad", l_p_string("babad"))
print("abbccbba", l_p_string("abbccbba"))
print("abbccbballabbccbbal", l_p_string("abbccbballabbccbbal"))
print("abbccbballxabbccbbal", l_p_string("abbccbballxabbccbbal"))
print("rrar", l_p_string("rrar"))
print("--------------------")
def longest_paren(arr):
"""parenthesis match longest"""
# not working
if not arr:
return 0
if len(arr) == 1:
return 0
if len(arr) == 2:
return 2 if arr[0] == '(' and arr[1] == ')' else 0
if arr[0] == '(' and arr[1] == ')':
return 2 + longest_paren(arr[1:-1])
elif arr[0] != '(':
return longest_paren(arr[1:])
elif arr[-1] != ')':
return longest_paren(arr[:-1])
print(longest_paren.__name__)
print(longest_paren("()("))
print(longest_paren("("))
print(longest_paren(")()())"))
print(longest_paren(")())())"))
| UTF-8 | Python | false | false | 2,988 | py | 70 | longest_palin_subsequence.py | 70 | 0.533802 | 0.509705 | 0 | 120 | 23.9 | 68 |
EdmundOgban/music-experiments | 19,567,871,002,396 | f2216110ec35a229101f2dba16e6ae05dfb9d3e9 | 2ed536d11a712e701830652c1388d2f040739422 | /scores/ezio/ezio0.py | 88eb2a88c5fd9c83f8d724b3f95aacd5a6e46ccf | [
"MIT"
] | permissive | https://github.com/EdmundOgban/music-experiments | ba3b1601ef93694db072e76da58b7e046277cbb3 | c340deb335eb37e6e0ca8ee20a3fe4fac7409ed3 | refs/heads/master | 2023-08-20T06:13:39.316250 | 2020-05-09T05:42:41 | 2020-05-09T05:42:41 | 259,487,465 | 0 | 0 | MIT | true | 2020-04-28T00:15:47 | 2020-04-28T00:15:46 | 2020-04-27T19:24:34 | 2020-04-27T19:24:32 | 22 | 0 | 0 | 0 | null | false | false | import random
import itertools
from music import tone, play_sequence
def gen_rhythm(beats):
while True:
res = random.choices([1,2,4,8], (32,8,2,1),
k=random.choice([4,8,16,32]))
if sum(res) == beats:
return res
def gen_rhythm2(beats):
if beats == 1:
return [1]
if random.random() < 0.9:
return [*gen_rhythm2(int(beats/2)), *gen_rhythm2(int(beats/2))]
else:
return [beats]
def make_music(synth):
MUL = 4
I = (0, 3, 5, 7, 10) # A C D E G
IV = (9, 12, 14, 16, 19) # F# A B C# E
V = (16, 19, 21, 23, 26) # C# E F# G# B
scale_I = [tone(x, 440) for x in I]
scale_IV = [tone(x, 440) for x in IV]
scale_V = [tone(x, 440) for x in V]
for scale in itertools.cycle([scale_I, scale_IV, scale_V, scale_V]):
TEMPO = random.choice([300, 400, 600])
BASE = 60 / TEMPO
bass_scale = [x / 4 for x in scale]
beat_duration = random.choice([4,8,16,32])
durations = [BASE*d for d in gen_rhythm2(beat_duration)]
hdlen = len(durations)
notes = [*random.choices(scale, (5,5,3,1,1), k=hdlen)*2,
*random.choices(scale, (1,1,3,5,5), k=hdlen)*2]
durations *= MUL
print(len(durations), len(notes))
sequence = list(zip(notes, durations))
#print(*[f'{round(freq,1):5}/{d:2}' for (freq, d) in sequence])
sequence2 = [(random.choice(scale) if x % 2 == 0 else 0, BASE*int(MUL/2))
for x in range(beat_duration*int(MUL/2))]
sequence3 = [(bass_scale[0], BASE*MUL) for x in range(beat_duration)]
synth.play_mix(
play_sequence(seq) for seq in [sequence, sequence2, sequence3]
)
| UTF-8 | Python | false | false | 1,746 | py | 10 | ezio0.py | 8 | 0.538373 | 0.479381 | 0 | 51 | 33.235294 | 81 |
bojangles-m/cli-clock | 9,405,978,401,642 | ca50b97f0bdd12226f4f697f9615cf1b9d1ef2f2 | cf5b403544ae2bc664c3a529a7e6fa18b7c9f691 | /lib/apps.py | 677c3e96a60fc8c333a4d7575f757b8e69c83527 | [] | no_license | https://github.com/bojangles-m/cli-clock | 11e8a319029db651a8c7aad911f5dac7d9439e63 | f2bfcbe5f19aa557cc20579d8aff2ba76b16c2fa | refs/heads/master | 2021-06-10T06:13:15.742592 | 2016-12-26T11:24:31 | 2016-12-26T11:24:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import subprocess
import time
from datetime import datetime
from lib.notification import notify
from lib.exceptions import PastTimeEnetredError
class timer:
@staticmethod
def run(sec, msg=None):
while (sec > 0):
sec -= 1
time.sleep(1)
notify("It's time", msg, sound=True)
@staticmethod
def init(sec, msg=None):
msg = msg if msg else "This message should appear instantly, with a sound"
subprocess.Popen(['nohup', './apps/timer.py', sec, msg])
class alarm:
@staticmethod
def run(at, msg=None):
at = datetime.strptime(at, "%Y-%m-%d %H:%M")
while(at > datetime.now()):
time.sleep(0.200)
notify("Wake up!", msg, sound=True)
@staticmethod
def init(at, msg=None):
msg = msg if msg else "This message should appear instantly, with a sound"
try:
al = datetime.strptime(at, "%Y-%m-%d %H:%M")
now = datetime.now()
if now >= al:
raise PastTimeEnetredError(at)
subprocess.Popen(['nohup', './apps/alarm.py', at, msg])
except ValueError as err:
print "Wrong input!"
print err
except PastTimeEnetredError as ex:
print "Time is up!"
print "Entered '%s' time has to be in the future." % (ex.value)
| UTF-8 | Python | false | false | 1,362 | py | 7 | apps.py | 6 | 0.571219 | 0.566079 | 0 | 51 | 25.705882 | 82 |
ec500-software-engineering/exercise-1-modularity-sunithapriya | 2,671,469,675,547 | 43357bc2d71998c68106c19c6a1bbc1bcc5ecfd8 | 06c6969c148e205200b912f73e16c878081892f9 | /main.py | 69334efef7d6a01981ba9378abd8b130b0fd6f8f | [] | no_license | https://github.com/ec500-software-engineering/exercise-1-modularity-sunithapriya | ace829f125cfff3d4e165115cd1214af50de8284 | 050e023693478c72cc5b54614c150cb08932a973 | refs/heads/master | 2020-04-22T01:24:45.301419 | 2019-02-19T22:09:05 | 2019-02-19T22:09:05 | 170,013,476 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from input import getPatientInfo, readSensorData
from storage import searchPerson, insert
from alert_system import alertCheck
from output import patient
if __name__ == "__main__":
#Input Module#
patientInfo = getPatientInfo()
patientInfo.encode("ascii","replace")
sensorData = readSensorData()
sensorData.encode("ascii","replace")
#Storage Module#
#Insert paitent and sensor data into mongodb
insert(patientInfo, sensorData)
#Search for Patient Details using PatientId
patientDetails = searchPerson("1234")
#Alert Module#
#Check sensorData for alerts#
alert = alertCheck(sensorData)
#Output Module#
patient = patient()
#Recieve message from Alert system
patient.recieveFromAlert(alert)
#Display alert to UI
patient.send_alert_to_UI()
| UTF-8 | Python | false | false | 761 | py | 6 | main.py | 4 | 0.764783 | 0.759527 | 0 | 28 | 26.071429 | 48 |
CodeEnvironment/django-rest-framework-deploy-heroku | 10,196,252,379,339 | f1496cd1f56cfbf088a11b9cc4b17e3beabca0c5 | 7e205af8825a41c48d7f8fb3f9988a8746564f36 | /racing/admin.py | a8b11aecb7cc2ebaa82ffbc9f5cf0329748337a4 | [
"MIT"
] | permissive | https://github.com/CodeEnvironment/django-rest-framework-deploy-heroku | 6135a9f9837842933c958683b8f0f5c6bc555046 | c6ffb20961c193b0f4dc1289de904b5d6750f335 | refs/heads/main | 2023-04-24T16:03:18.606340 | 2021-04-29T20:47:42 | 2021-04-29T20:47:42 | 354,850,998 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import Driver
admin.site.register(Driver)
| UTF-8 | Python | false | false | 89 | py | 53 | admin.py | 50 | 0.820225 | 0.820225 | 0 | 4 | 21.25 | 32 |
bfpimentel/dotfiles | 11,897,059,443,582 | a62027c62f0b29a9c7ee12c9ab09edd336811691 | 73e68928271ef728dd9c25979248b578a699a1bb | /.config/qtile/settings/keys.py | ee551c47d81e1d9f4a19a5def7e44b034ad29029 | [] | no_license | https://github.com/bfpimentel/dotfiles | 9461397ebf7947bb3bda69e9f12d26d53d28e871 | d786d584651acffcd6b5d4e270d8334da07fb7c3 | refs/heads/master | 2023-02-23T18:58:14.035005 | 2021-01-29T18:23:58 | 2021-01-29T18:23:58 | 323,170,869 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from libqtile.config import Key
from libqtile.lazy import lazy
from settings.groups import groups
mod = "mod4"
keys = [
# Qtile
Key([mod, "mod1"], "r", lazy.restart()),
Key([mod, "mod1"], "q", lazy.shutdown()),
# Switch between windows
Key([mod], "Down", lazy.layout.down()),
Key([mod], "Up", lazy.layout.up()),
Key([mod], "Left", lazy.layout.left()),
Key([mod], "Right", lazy.layout.right()),
# Toggle floating
Key([mod], "f", lazy.window.toggle_floating()),
# Move windows
Key([mod, "shift"], "Down", lazy.layout.shuffe_down()),
Key([mod, "shift"], "Up", lazy.layout.shuffe_up()),
Key([mod, "shift"], "Left", lazy.layout.shuffle_left()),
Key([mod, "shift"], "Right", lazy.layout.shuffle_right()),
# Toggle layouts
Key([mod], "Tab", lazy.next_layout()),
# Switch Screens
Key([mod, "mod1"], "Tab", lazy.next_screen()),
# Kill Window
Key([mod], "w", lazy.window.kill()),
# Alacritty
Key([mod], "Return", lazy.spawn("kitty")),
# Rofi
Key([mod], "space", lazy.spawn("rofi -show drun")),
Key([mod, "shift"], "space", lazy.spawn("rofi -show")),
# Flameshot
Key([mod], "p", lazy.spawn("flameshot gui")),
Key([mod, "shift"], "p", lazy.spawn("flameshot screen -r -c")),
Key([mod, "mod1"], "p", lazy.spawn("flameshot screen -r -p ~/Pictures")),
]
for key, group in enumerate(groups, 1):
keys.append(Key([mod], str(key), lazy.group[group.name].toscreen()))
keys.append(Key([mod, "shift"], str(key), lazy.window.togroup(group.name)))
| UTF-8 | Python | false | false | 1,566 | py | 32 | keys.py | 17 | 0.584291 | 0.58046 | 0 | 51 | 29.686275 | 79 |
l0kihardt/time-analyse-bot | 4,836,133,212,911 | c52f41ea4762c9ba7caa95abe2b7887343f53f3f | ffcdf925083be0b9ec7bab177aef4abd90936dc8 | /plugins/Test.py | ed3c837d603d3979c6f8640b60c8436463e887a1 | [] | no_license | https://github.com/l0kihardt/time-analyse-bot | 8f46dc63752c3e2a0b256357e66ec302c5a85453 | b33900043b952bce7bc302a28f89a26620522d60 | refs/heads/master | 2021-05-05T23:48:37.259783 | 2018-01-10T02:15:57 | 2018-01-10T02:15:57 | 116,893,041 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# This file defines a class that eases the connection to iCload for caldav manipulation
# Is uses the credentials supplied in the constructor to discver the users principal and calendar-home-set urls then uses
# these as inputs to the CALDAV library to add a caledndar, and create an event
# If the example is re-run - an Authorisation error will occur as the example will try to re-add the same event which will be rejected due to the duplicate ID
#
from __future__ import print_function
from datetime import datetime
import sys
from bs4 import BeautifulSoup
import caldav
from caldav.elements import dav, cdav
from lxml import etree
import requests
from requests.auth import HTTPBasicAuth
class iCloudConnector(object):
icloud_url = "http://cal.trumind.net:8008"
username = None
password = None
propfind_principal = (
u'''<?xml version="1.0" encoding="utf-8"?><propfind xmlns='DAV:'>'''
u'''<prop><current-user-principal/></prop></propfind>'''
)
propfind_calendar_home_set = (
u'''<?xml version="1.0" encoding="utf-8"?><propfind xmlns='DAV:' '''
u'''xmlns:cd='urn:ietf:params:xml:ns:caldav'><prop>'''
u'''<cd:calendar-home-set/></prop></propfind>'''
)
def __init__(self, username, password, **kwargs):
self.username = username
self.password = password
if 'icloud_url' in kwargs:
self.icloud_url = kwargs['icloud_url']
self.discover()
self.get_calendars()
# discover: connect to icloud using the provided credentials and discover
#
# 1. The principal URL
# 2 The calendar home URL
#
# These URL's vary from user to user
# once doscivered, these can then be used to manage calendars
def discover(self):
# Build and dispatch a request to discover the prncipal us for the
# given credentials
headers = {
'Depth': '1',
}
auth = HTTPBasicAuth(self.username, self.password)
principal_response = requests.request(
'PROPFIND',
self.icloud_url,
auth=auth,
headers=headers,
data=self.propfind_principal.encode('utf-8')
)
if principal_response.status_code != 207:
print('Failed to retrieve Principal: ',
principal_response.status_code)
exit(-1)
# Parse the resulting XML response
soup = BeautifulSoup(principal_response.content, 'lxml')
self.principal_path = soup.find(
'current-user-principal'
).find('href').get_text()
discovery_url = self.icloud_url + self.principal_path
# Next use the discovery URL to get more detailed properties - such as
# the calendar-home-set
home_set_response = requests.request(
'PROPFIND',
discovery_url,
auth=auth,
headers=headers,
data=self.propfind_calendar_home_set.encode('utf-8')
)
if home_set_response.status_code != 207:
print('Failed to retrieve calendar-home-set',
home_set_response.status_code)
exit(-1)
# And then extract the calendar-home-set URL
soup = BeautifulSoup(home_set_response.content, 'lxml')
self.calendar_home_set_url = 'http://cal.trumind.net:8008'+soup.find(
'href',
attrs={'xmlns':'DAV:'}
).get_text()
# get_calendars
# Having discovered the calendar-home-set url
# we can create a local object to control calendars (thin wrapper around
# CALDAV library)
def get_calendars(self):
self.caldav = caldav.DAVClient(self.calendar_home_set_url,
username=self.username,
password=self.password)
self.principal = self.caldav.principal()
self.calendars = self.principal.calendars()
def get_named_calendar(self, name):
if len(self.calendars) > 0:
for calendar in self.calendars:
properties = calendar.get_properties([dav.DisplayName(), ])
display_name = properties['{DAV:}displayname']
if display_name == name:
return calendar
return None
def create_calendar(self,name):
return self.principal.make_calendar(name=name)
def delete_all_events(self,calendar):
for event in calendar.events():
event.delete()
return True
def create_events_from_ical(self, ical):
# to do
pass
def create_simple_timed_event(self,start_datetime, end_datetime, summary,
description):
# to do
pass
def create_simple_dated_event(self,start_datetime, end_datetime, summary,
description):
# to do
pass
# Simple example code
# 格式化时间字串
# DTSTAMP = time.strftime('%Y%m%dT%H%M%SZ', time.localtime())
vcal = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Example Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:0000000008
DTSTAMP:20180104T111016Z
DTSTART:20180104T111016Z
DTEND:20180104T131016Z
SUMMARY:This is an event
END:VEVENT
END:VCALENDAR
"""
username = 'user01'
password = 'user01'
# The above is an 'application password' any app must now have its own
# password in iCloud. For info refer to
# https://www.imore.com/how-generate-app-specific-passwords-iphone-ipad-mac
icx = iCloudConnector(username, password)
# 获取所有日历
cal = icx.get_named_calendar('MyCalendar')
# 新建日历
if not cal:
cal = icx.create_calendar('MyCalendar')
#新建事件
try:
cal.add_event(vcal)
except AuthorisationError as ae:
print('Couldn\'t add event', ae.reason)
#获取 2018/1/1 ~ 2018/6/1 间的所有事件
results = cal.date_search(datetime(2018, 1, 1), datetime(2018, 6, 1))
for event in results:
print("Found", event)
print(event.data) # 打印事件的数据
#event.delete() //删除服务器上的事件
print("----------")
| UTF-8 | Python | false | false | 6,118 | py | 6 | Test.py | 4 | 0.621599 | 0.603019 | 0 | 187 | 31.229947 | 158 |
thomastu/CaReCur | 2,482,491,111,974 | de4e21bf7fda86d0e6337df63f5133328d94b26a | d94a8f8e512093a49a8cb0a6c246c2431b94e7ff | /src/data/geography/ca_counties.py | cf0dad2b0cd465bf8e7b05da1438f9cd85a43a69 | [
"MIT"
] | permissive | https://github.com/thomastu/CaReCur | e66a4be7f009ed36eea5fd9adaa3b9afbe0d3987 | bd057ea8c20d403beedc5eec2af353fcffaa304a | refs/heads/master | 2022-07-16T12:43:37.395740 | 2020-05-15T18:58:05 | 2020-05-15T18:58:05 | 240,231,758 | 3 | 0 | MIT | false | 2020-03-26T05:49:10 | 2020-02-13T10:18:12 | 2020-03-26T05:48:32 | 2020-03-26T05:48:28 | 6,254 | 1 | 0 | 0 | Jupyter Notebook | false | false | """California county shape files.
https://data.ca.gov/dataset/ca-geographic-boundaries/resource/b0007416-a325-4777-9295-368ea6b710e6
"""
import zipfile
from invoke import run
from loguru import logger
from src.conf import settings
RAW_DIR = settings.DATA_DIR / "raw/geography/"
PROCESSED_DIR = settings.DATA_DIR / "processed/geography/"
url = "https://data.ca.gov/dataset/e212e397-1277-4df3-8c22-40721b095f33/resource/b0007416-a325-4777-9295-368ea6b710e6/download/ca-county-boundaries.zip"
fn = "ca-county-boundaries.zip"
if __name__ == "__main__":
# Get data
RAW_DIR.mkdir(exist_ok=True)
PROCESSED_DIR.mkdir(exist_ok=True)
fp = RAW_DIR/fn
# Download data
cmd = f"curl -L {url} -o {fp}"
run(cmd)
# Unzip it!
with zipfile.ZipFile(fp, "r") as fh:
fh.extractall(PROCESSED_DIR) | UTF-8 | Python | false | false | 827 | py | 54 | ca_counties.py | 18 | 0.70133 | 0.608222 | 0 | 32 | 24.875 | 152 |
rowenama/Ma | 3,109,556,363,992 | f7c38d94b6919cba3d3d274b11d3c1f295702cb4 | fca04e7621f77dd05ce2f7d7eb8e609342c12022 | /Question4/question4.py | dbaae7b88f2c59f2b913da9a398c06ad5b75c530 | [] | no_license | https://github.com/rowenama/Ma | 897ac6d8196a3d2cb6e81eb56b9e745691ed17e6 | 3b4073ece142750e73946e1470209910516d641a | refs/heads/main | 2023-03-21T15:07:23.454259 | 2021-03-09T12:46:07 | 2021-03-09T12:46:07 | 345,871,038 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Given Inputs
L1 = 20
L2 = 10
InputImage= [
[0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1],
[1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1],
[1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1],
[1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0],
[1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1]
]
OutputMatrix = [[0 for x in range(L1)] for y in range(L2)]
# 4-connectivity
CountNum = 1
# frist determine the connection of each pixel with its upper and left neighour
for i, data_i in enumerate(InputImage):
for j, data_j in enumerate(data_i):
if data_j>0:
Con = [CountNum]
if j!=0:
Con.append(OutputMatrix[i][j-1])
if j<L1-1:
Con.append(OutputMatrix[i][j+1])
if data_i[j+1] >= 1 and i!=0:
Con.append(OutputMatrix[i-1][j+1])
if i!=0:
Con.append(OutputMatrix[i-1][j])
if i<L2-1:
Con.append(OutputMatrix[i+1][j])
flag = min(list(filter(lambda a: a != 0, Con)))
if len(list(filter(lambda a: a != 0, Con)))==1:
CountNum = CountNum + 1
OutputMatrix[i][j] = flag
Seq=[0]
# then determine the connection of each pixel with its lower and right neighour
for i, data_i in reversed(list(enumerate(InputImage))):
for j, data_j in reversed(list(enumerate(data_i))):
if data_j>0:
Con = [OutputMatrix[i][j]]
if j!=0:
Con.append(OutputMatrix[i][j-1])
if j<L1-1:
Con.append(OutputMatrix[i][j+1])
if i!=0:
Con.append(OutputMatrix[i-1][j])
if i<L2-1:
Con.append(OutputMatrix[i+1][j])
Con = list(filter(lambda a: a != 0, Con))
if len(Con)>=1:
flag = min(Con)
OutputMatrix[i][j] = flag
Seq.append(flag)
#sort all the existing connection and order them
Seq = list(set(Seq))
#Output the result
for i, data in enumerate(OutputMatrix):
for j, data_j in enumerate(data):
print(Seq.index(data_j), end =" ")
print()
| UTF-8 | Python | false | false | 2,509 | py | 8 | question4.py | 7 | 0.474293 | 0.376644 | 0 | 69 | 35.362319 | 79 |
keiji/region_cropper | 15,187,004,360,964 | 5962289d308a019ff3288f0b72ebdfa388b64ea2 | c8bb15cabe090d1c49cff017420868b5bbf213d9 | /tools/src/entity/rect.py | 971be521dfd98b9a4e2f3bb84ec7114876cb8be2 | [
"Apache-2.0"
] | permissive | https://github.com/keiji/region_cropper | c47930ec60a175d5801e6d976f1f158e88e12a51 | b0a6838fb52569a08f48954785a36f31762f263c | refs/heads/master | 2020-12-25T16:24:57.843518 | 2020-11-04T16:32:40 | 2020-11-04T16:32:40 | 68,176,688 | 10 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/bin/python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Rect(object):
left = 0
top = 0
right = 0
bottom = 0
def width(self):
return (self.right - self.left)
def height(self):
return (self.bottom - self.top)
def center(self):
cX = round(self.left + (self.width() / 2))
cY = round(self.top + (self.height() / 2))
return (cX, cY)
def __init__(self, left, top, right, bottom):
self.left = left
self.top = top
self.right = right
self.bottom = bottom
def __eq__(self, other):
if isinstance(other, Rect):
return (
(self.left == other.left)
and (self.top == other.top)
and (self.right == other.right)
and (self.bottom == other.bottom)
)
else:
return False
def __ne__(self, other):
return (not self.__eq__(other))
def __repr__(self):
return "Entry(%f, %f, %f, %f)" % (
self.left, self.top, self.right, self.bottom)
def __hash__(self):
return hash(self.__repr__())
def copy(self):
return Rect(self.left, self.top, self.right, self.bottom)
def tostring(self):
return '(%f, %f, %f, %f)' % (self.left, self.top, self.right, self.bottom)
| UTF-8 | Python | false | false | 1,284 | py | 30 | rect.py | 5 | 0.575545 | 0.569315 | 0 | 58 | 21.137931 | 78 |
epam/Indigo | 17,961,553,245,071 | e86a2857d82af12fdede4cce83c02468c9a6c5f2 | 182bbadb0ee7f59f1abd154d06484e555a30c6d8 | /api/tests/integration/tests/basic/basic_load.py | 4997a4a4c405056155d6748462e0cfd803bf4829 | [
"Apache-2.0"
] | permissive | https://github.com/epam/Indigo | 08559861adf474122366b6e2e499ed3aa56272d1 | 8e473e69f393c3a57ff75b7728999c5fb4cbf1a3 | refs/heads/master | 2023-09-02T10:14:46.843829 | 2023-08-25T08:39:24 | 2023-08-25T08:39:24 | 37,536,320 | 265 | 106 | Apache-2.0 | false | 2023-09-14T17:34:00 | 2015-06-16T14:45:56 | 2023-09-06T21:50:50 | 2023-09-14T17:33:59 | 225,517 | 246 | 88 | 336 | C++ | false | false | import os
import sys
sys.path.append(
os.path.normpath(
os.path.join(os.path.abspath(__file__), "..", "..", "..", "common")
)
)
from env_indigo import * # noqa
indigo = Indigo()
indigo.setOption("molfile-saving-skip-date", "1")
mol = indigo.loadMolecule(
"OCC1C(O)C(O)C(O)C(OC2C(O)C(O)C(OCC3CCCCC3)OC2CO)O1 |ha:0,1,2,3,4,5,6,7,8,9,10,29,hb:0,1,2,3,4,5,6,7,8,9,30,31|"
)
print("****** Molfile 2000 ********")
indigo.setOption("molfile-saving-mode", "2000")
print(mol.molfile())
print("****** Molfile 3000 ********")
indigo.setOption("molfile-saving-mode", "3000")
print(mol.molfile())
print("****** CML ********")
print(mol.cml())
print("****** SMILES ********")
print(mol.smiles())
print("****** Canonical SMILES ********")
mol.unhighlight()
print(mol.canonicalSmiles())
print("****** Loading SDF with multiline properties ********")
for item in indigo.iterateSDFile(
joinPathPy("molecules/multiline_properties.sdf", __file__)
):
for prop in item.iterateProperties():
print(prop.name() + " : " + prop.rawData())
print("****** CurlySMILES ********")
m = indigo.loadMolecule("PC{-}{+n}N")
print(m.smiles())
m = indigo.loadMolecule("PC{-}O{+n}N")
print(m.smiles())
print("****** Finding invalid stereocenters ********")
for item in indigo.iterateSDFile(
joinPathPy("molecules/invalid_3d_stereocenters.sdf", __file__)
):
try:
print(item.molfile())
except IndigoException as e:
print(getIndigoExceptionText(e))
try:
print(item.smiles())
except IndigoException as e:
print(getIndigoExceptionText(e))
print("****** Extended aromatic SMILES ********")
m = indigo.loadMolecule("NC(Cc1c[nH]c2cc[te]c12)C(O)=O")
print(m.smiles())
m.dearomatize()
print(m.smiles())
print("****** Skip BOM flag ********")
m = indigo.loadMoleculeFromFile(
joinPathPy("molecules/mol-utf8-bom.mol", __file__)
)
print(m.name())
print("****** Incomplete stereo in SMILES/SMARTS ********")
print("[*@]")
m = indigo.loadQueryMolecule("[*@]")
print(m.smiles())
print("[*@H]")
m = indigo.loadQueryMolecule("[*@H]")
print(m.smiles())
print("[*@H](~*)~*")
m = indigo.loadQueryMolecule("[*@H](~*)~*")
print(m.smiles())
print("****** H2 molecule ********")
m = indigo.loadMoleculeFromFile(joinPathPy("molecules/H2.mol", __file__))
indigo.setOption("molfile-saving-mode", "2000")
print(m.smiles())
print(m.canonicalSmiles())
print(m.molfile())
print(m.grossFormula())
print("****** S-group's SCL (CLASS) support ********")
m = indigo.loadMoleculeFromFile(
joinPathPy("molecules/sa-class-v2000.mol", __file__)
)
indigo.setOption("molfile-saving-mode", "2000")
print(m.canonicalSmiles())
print(m.molfile())
indigo.setOption("molfile-saving-mode", "3000")
m = indigo.loadMoleculeFromFile(
joinPathPy("molecules/sa-class-v3000.mol", __file__)
)
print(m.canonicalSmiles())
print(m.molfile())
print("****** S-group's SPL (PARENT) support ********")
m = indigo.loadMoleculeFromFile(
dataPath("molecules/sgroups/sgroups-V2000.mol")
)
indigo.setOption("molfile-saving-mode", "2000")
print(m.canonicalSmiles())
print(m.molfile())
indigo.setOption("molfile-saving-mode", "3000")
m = indigo.loadMoleculeFromFile(
dataPath("molecules/sgroups/sgroups-V3000.mol")
)
print(m.canonicalSmiles())
print(m.molfile())
print("****** Load custom collection ********")
m = indigo.loadMoleculeFromFile(
joinPathPy("molecules/custom_collection.mol", __file__)
)
print(m.molfile())
print("****** Load TEMPLATE (SCSR) structure ********")
m = indigo.loadMoleculeFromFile(
joinPathPy("molecules/SCSR_test.mol", __file__)
)
print(m.molfile())
print("****** Alias handling (V2000) ********")
m = indigo.loadMoleculeFromFile(
joinPathPy("molecules/alias_marvin_v2000.mol", __file__)
)
indigo.setOption("molfile-saving-mode", "2000")
print(m.molfile())
print("****** Alias handling (V3000) ********")
indigo.setOption("molfile-saving-mode", "3000")
print(m.molfile())
print("****** Alias handling (CML) ********")
print(m.cml())
m = indigo.loadMoleculeFromFile(
joinPathPy("molecules/alias_marvin.cml", __file__)
)
indigo.setOption("molfile-saving-mode", "2000")
print(m.molfile())
print("****** Alias handling (SMILES) ********")
print(m.canonicalSmiles())
print("****** Test load from gzip buffer ********")
with open(joinPathPy("molecules/benzene.mol.gz", __file__), "rb") as gz_mol:
buf = gz_mol.read()
if isIronPython():
from System import Array, Byte
buf_arr = bytearray(buf)
buf = Array[Byte]([Byte(b) for b in buf_arr])
m = indigo.loadMoleculeFromBuffer(buf)
print(m.canonicalSmiles())
print("****** Load V3000 with DISP keyword ********")
m = indigo.loadMoleculeFromFile(
joinPathPy("molecules/V3000_disp.mol", __file__)
)
indigo.setOption("molfile-saving-mode", "3000")
print(m.molfile())
print("****** Load V3000 with unknown keyword ********")
try:
mol = indigo.loadMoleculeFromFile(
joinPathPy("molecules/V3000_unknown.mol", __file__)
)
except IndigoException as e:
print(getIndigoExceptionText(e))
try:
mol = indigo.loadMoleculeFromFile(
joinPathPy("molecules/V3000_unknown_atom_key.mol", __file__)
)
except IndigoException as e:
print(getIndigoExceptionText(e))
print("****** Name is skeletal prefix ********")
try:
m = indigo.loadMolecule("sil")
except IndigoException as e:
print(getIndigoExceptionText(e))
| UTF-8 | Python | false | false | 5,390 | py | 2,282 | basic_load.py | 1,260 | 0.642115 | 0.615584 | 0 | 189 | 27.518519 | 116 |
jasonchoww/tradingview-automation | 5,909,875,038,555 | 9986872ae1bfeae4e9eea9aef503742835b74019 | 837e81a18dbe597cf389b34ffe3bca3a28b6ee93 | /launch_tradingview.py | c12dda425c8e27ab95cedb85ca181e77224acb6a | [] | no_license | https://github.com/jasonchoww/tradingview-automation | 1c5de353edbcec94cae4bb9dce230839adcb2c01 | b4c7bd56f746c90dbe1c633f8bb4bbd75b3f5b52 | refs/heads/master | 2020-05-07T12:31:40.372230 | 2019-04-14T19:55:10 | 2019-04-14T19:55:10 | 180,507,855 | 0 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
def launch(driver):
# launches tradingview
driver.get("https://www.tradingview.com/")
# login
driver.find_element_by_xpath('/html/body/div[2]/div[2]/div[1]/div[4]/span[2]/a').click()
time.sleep(2)
# google+ login
driver.find_element_by_xpath(
'//*[@id="overlap-manager-root"]/div/div[2]/div/div/div/div/div/div[1]/div[2]/span[2]').click()
time.sleep(2)
# switches to second window
driver.switch_to.window(driver.window_handles[1])
# send_keys: email account
driver.find_element_by_xpath('//*[@id="identifierId"]').send_keys("example@email.com")
driver.find_element_by_xpath('//*[@id="identifierNext"]/content/span').click()
time.sleep(2)
# send_keys: password
driver.find_element_by_xpath('//*[@id="password"]/div[1]/div/div[1]/input').send_keys("password_goes_here")
driver.find_element_by_xpath('//*[@id="passwordNext"]/content/span').click()
time.sleep(2)
# switches back to original window after logging in
driver.switch_to.window(driver.window_handles[0])
time.sleep(3)
# opens chart
driver.find_element_by_xpath('/html/body/div[3]/div[2]/div[2]/ul/li[6]').click()
| UTF-8 | Python | false | false | 1,191 | py | 3 | launch_tradingview.py | 3 | 0.649034 | 0.630563 | 0 | 35 | 33.028571 | 111 |
muhlik20033/muhlik20033 | 12,773,232,758,766 | 2e2835bfe307c9fcaa7a5a20e27974b7369003c1 | a2419e48f7a8ea87f71a96775eff2faa087e1603 | /TSIS 5/17.py | a9fc92a4c2fa87e5693b646fc382bbefcd1a80f4 | [] | no_license | https://github.com/muhlik20033/muhlik20033 | 6bc7211074a97aed25229a22e8e729b29df494c6 | 60bcdda9eeb360de6c233ed45597e48a874c349f | refs/heads/main | 2023-06-24T01:43:44.032740 | 2021-07-29T08:45:04 | 2021-07-29T08:45:04 | 380,452,848 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | with open('text.txt') as f:
file = f.read().splitlines()
print([s.rstrip('\n ') for s in file]) | UTF-8 | Python | false | false | 105 | py | 48 | 17.py | 48 | 0.571429 | 0.571429 | 0 | 3 | 33.666667 | 42 |
AlexandruGodri/pygame-tooltkit | 8,512,625,184,477 | 40a7642f74671d5e7457025bd994d5d3b8c87711 | 1abd3c2dd22c04fb291907989c7b4475cf1709b1 | /game/game.py | 203464ad43dac6484c41f8353fe03046dec7e81a | [] | no_license | https://github.com/AlexandruGodri/pygame-tooltkit | 76b4d428541cb7ab5b34dd6328c11be816285117 | 611ef986e520ed95205d4926e512a2a8c2770057 | refs/heads/master | 2021-01-20T07:57:20.031582 | 2017-06-08T13:23:52 | 2017-06-08T13:23:52 | 90,072,762 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import pygame
class Game():
def __init__(self):
self._size = None
self._background = None
self.screen = None
self.sprites = pygame.sprite.Group()
self.events = {}
self._ready = False
def init(self, size, background):
self._size = size
self._background = background
pygame.init()
self.screen = pygame.display.set_mode(size)
self.screen.fill(background)
def ready(self):
self._ready = True
def create_image_sprite(self, img_path, position, angle, size):
try:
img = pygame.sprite.Sprite()
img.image = pygame.Surface(size)
img.image = pygame.image.load(img_path).convert_alpha()
img.image = pygame.transform.rotate(img.image, angle)
img.rect = img.image.get_rect()
img.rect.x = position[0]
img.rect.y = position[1]
self.sprites.add(img)
return img
except Exception as e:
print 'Error Adding Image', e, img_path, position, size, angle
return None
def create_rectangle(self, color, position, size):
try:
img = pygame.sprite.Sprite()
img.image = pygame.Surface(size)
img.image.fill(color)
img.rect = img.image.get_rect()
img.rect.x = position[0]
img.rect.y = position[1]
self.sprites.add(img)
return img
except Exception as e:
print 'Error Adding Rectangle', e, position, size
return None
def move_sprite(self, sprite, position=None, angle=None):
if sprite in self.sprites:
if angle is not None:
orig_rect = sprite.image.get_rect()
rot_image = pygame.transform.rotate(sprite.image, angle)
rot_rect = orig_rect.copy()
rot_rect.center = rot_image.get_rect().center
sprite.image = rot_image.subsurface(rot_rect).copy()
if position is not None:
sprite.rect.x = position[0]
sprite.rect.y = position[1]
def render(self):
self.screen.fill(self._background)
self.sprites.update()
self.sprites.draw(self.screen)
pygame.display.flip()
def on(self, event, cb):
if event not in self.events:
self.events[event] = []
self.events[event].append(cb)
def run(self):
while not self._ready:
time.sleep(0.01)
clock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if self._ready:
if event.type in self.events:
for cb in self.events[event.type]:
cb(event)
if self._ready:
try:
self.render()
except Exception as e:
pass
clock.tick(60)
| UTF-8 | Python | false | false | 3,003 | py | 11 | game.py | 10 | 0.525475 | 0.521812 | 0 | 97 | 29.958763 | 74 |
mir-am/Mir-Repo | 12,515,534,709,857 | 773c73bebcbd9c993227828abde46a0576c8d816 | f40f2c84b3063eee6404422fdc3ed33b413f9503 | /src/iknntsvm.py | 53f96ce04cbdda50d8d9b452a5c931968b121cf2 | [] | no_license | https://github.com/mir-am/Mir-Repo | ec3607b9fcf6de727f7548a6bacdb39a174a1694 | 8edf848592a0111d541c5d311303ad1b2a58fd03 | refs/heads/master | 2020-03-20T20:09:56.505968 | 2019-06-11T15:33:37 | 2019-06-11T15:33:37 | 137,673,294 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 17 12:09:33 2018
@author: Mir, A.
"""
# Implementation of Improved KNN-based twin support vector machine
# IKNN-TSVM
import clippSolverv3
from dataproc import read_data
from twinsvm import train_tsvm, predict_tsvm
from clipp import clipp_dcd
from weight import w_compute_mir
#from playground import hyperp_eq
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from ls_wtsvm import accuracy
from scipy import sparse
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import time
# Train IKNN-TSVM - Linear case
def train_IKTSVM(X_train, X_train_label, c, k, method='FSA'):
# Matrix A or class 1 data
mat_A = X_train[X_train_label == 1]
# Matrix B or class -1 data
mat_B = X_train[X_train_label == -1]
# Compute weight matrices - KNN finding
weight = w_compute_mir(X_train, X_train_label, k, method)
# Define margin points of class +1 and -1
mat_M_1 = mat_A[weight[3] == 1] # Class + 1
mat_M_2 = mat_B[weight[1] == 1] # Class -1
# Prepare D_1, D_2 - Diag. matrices
mat_D_1 = np.diag(weight[0].reshape(weight[0].shape[0],))
mat_D_2 = np.diag(weight[2].reshape(weight[2].shape[0],))
# Scipy sparse matrix
#mat_D_1_s = sparse.diags(weight[0].reshape(weight[0].shape[0],))
#mat_D_2_s = sparse.diags(weight[2].reshape(weight[2].shape[0],))
# Define H=[A e] & G=[M2 e] matrix
mat_H = np.column_stack((mat_A, np.ones((mat_A.shape[0], 1), \
dtype=np.float64))) # Class +1
mat_G = np.column_stack((mat_M_2, np.ones((mat_M_2.shape[0], 1), \
dtype=np.float64))) # Margin points of class -1
# Define Q=[B e] & P=[M1 e]
mat_Q = np.column_stack((mat_B, np.ones((mat_B.shape[0], 1), \
dtype=np.float64))) # Class -1
mat_P = np.column_stack((mat_M_1, np.ones((mat_M_1.shape[0], 1), \
dtype=np.float64))) # Margin points of class +1
# Transpose of H, G, Q & P
mat_H_t = np.transpose(mat_H)
mat_G_t = np.transpose(mat_G)
mat_Q_t = np.transpose(mat_Q)
mat_P_t = np.transpose(mat_P)
# regulariztion term- for overcoming ill-possible condition
reg = 2 ** float(-7)
# Sparse matrix multiplication
#mat_H_D_1 = sparse.csr_matrix.dot(mat_H_t, mat_D_1)
#mat_Q_D_2 = sparse.csr_matrix.dot(mat_Q_t, mat_D_2)
# Compute matrix inverses
mat_H_D_H = np.linalg.inv(np.dot(np.dot(mat_H_t, mat_D_1), mat_H) + \
(reg * np.identity(mat_H.shape[1])))
mat_Q_D_Q = np.linalg.inv(np.dot(np.dot(mat_Q_t, mat_D_2), mat_Q) + \
(reg * np.identity(mat_Q.shape[1])))
# Wolfe dual of class 1
mat_dual1 = np.dot(np.dot(mat_G, mat_H_D_H), mat_G_t)
# Wofle dual of class 2
mat_dual2 = np.dot(np.dot(mat_P, mat_Q_D_Q), mat_P_t)
# Solving dual problem 1 - obtaining hyperplane of class 1
#alpha_d1 = clipp_dcd(mat_dual1, c)
alpha_d1 = np.array(clippSolverv3.clippDCD_V3(mat_dual1, c)).reshape(mat_dual1.shape[0], 1)
# Solving dual problem 2 - obtaining hyperplane of class 2
#alpha_d2 = clipp_dcd(mat_dual2, c)
alpha_d2 = np.array(clippSolverv3.clippDCD_V3(mat_dual2, c)).reshape(mat_dual2.shape[0], 1)
# Obtain hyperplane 1 & 2
hyper_p_1 = -1 * np.dot(np.dot(mat_H_D_H, mat_G_t), alpha_d1)
w_1 = hyper_p_1[:hyper_p_1.shape[0] - 1, :]
b_1 = hyper_p_1[-1, :]
hyper_p_2 = np.dot(np.dot(mat_Q_D_Q, mat_P_t), alpha_d2)
w_2 = hyper_p_2[:hyper_p_2.shape[0] - 1, :]
b_2 = hyper_p_2[-1, :]
return w_1, b_1, w_2, b_2
# Predict IKNN-TSVM - Linear case
def pre_IKTSVM(X_test, w_1, b_1, w_2, b_2):
prepen_distance = np.zeros((X_test.shape[0], 2))
for i in range(X_test.shape[0]):
# Prependicular distance of data pint i from hyperplane 2(class 1)
prepen_distance[i, 1] = np.abs(np.dot(X_test[i, :], w_1) + b_1)
# Prependicular distance of data pint i from hyperplane 1 (class -1)
prepen_distance[i, 0] = np.abs(np.dot(X_test[i, :], w_2) + b_2)
# Step 5: Assign data points to class +1 or -1 based on distance from hyperplanes
output = 2 * np.argmin(prepen_distance, axis=1) - 1
return output
# Linear case - IKNN-TSVM - Cross validation
def cv_lin_IKTSVM(data_train, data_labels, k_fold, k, c, method='FSA'):
# K-Fold Cross validation, divide data into K subsets
k_fold = KFold(k_fold)
# Store result after each run
mean_accuracy = []
# Postive class
mean_recall_p, mean_precision_p, mean_f1_p = [], [], []
# Negative class
mean_recall_n, mean_precision_n, mean_f1_n = [], [], []
# Count elements of confusion matrix
tp, tn, fp, fn = 0, 0, 0, 0
k_time = 1
# Train and test IKNN-TSVM K times
for train_index, test_index in k_fold.split(data_train):
# Extract data based on index created by k_fold
X_train = np.take(data_train, train_index, axis=0)
X_test = np.take(data_train, test_index, axis=0)
X_train_label = np.take(data_labels, train_index, axis=0)
X_test_label = np.take(data_labels, test_index, axis=0)
# Train Classifier - obtain two non-parallel hyperplane
hyper_p = train_IKTSVM(X_train, X_train_label, c, k, method)
# Parameters of two hyperplanes
w_1 = hyper_p[0]
b_1 = hyper_p[1]
w_2 = hyper_p[2]
b_2 = hyper_p[3]
# Predict
output = pre_IKTSVM(X_test, w_1, b_1, w_2, b_2)
# Compute evaluation metrics
accuracy_test = accuracy(X_test_label, output)
mean_accuracy.append(accuracy_test[4])
# Positive cass
mean_recall_p.append(accuracy_test[5])
mean_precision_p.append(accuracy_test[6])
mean_f1_p.append(accuracy_test[7])
# Negative class
mean_recall_n.append(accuracy_test[8])
mean_precision_n.append(accuracy_test[9])
mean_f1_n.append(accuracy_test[10])
# Count
tp = tp + accuracy_test[0]
tn = tn + accuracy_test[1]
fp = fp + accuracy_test[2]
fn = fn + accuracy_test[3]
#print("K_fold %d finished..." % k_time)
k_time = k_time + 1
# m_a=0, m_r_p=1, m_p_p=2, m_f1_p=3, k_nn=4, c_1=5, k=6, w_1=7, b_1=8, w_2=9, b_2=10
# m_r_n=11, m_p_n=12, m_f1_n=13, tp=14, tn=15, fp=16, fn=17
return mean_accuracy, mean_recall_p, mean_precision_p, mean_f1_p, k_fold.get_n_splits(), \
c, k, w_1, b_1, w_2, b_2, mean_recall_n, mean_precision_n, mean_f1_n, \
tp, tn, fp, fn
# Grid search - IKNN-TSVM- Linear
def gs_lin_IKTSVM(data, k_fold, k_l, k_u, l_bound, u_bound, step, \
method ,file_name):
train_data = data[0]
labels = data[1]
# Store
result_list = []
# Max accuracy
max_acc, acc_std = 0, 0
# Create an excel file for
excel_write = pd.ExcelWriter(file_name, engine='xlsxwriter')
# Search space - C parameter
c_range = np.arange(l_bound, u_bound, step)
# Search space - neighborhood size - k
k_range = np.arange(k_l, k_u, 1)
search_space = list(product(*[c_range ] + [k_range]))
# Total number of search elements
search_total = len(search_space)
# Count
run = 1
for element in search_space:
c = 2 ** float(element[0])
k = element[1]
start = time.time()
result = cv_lin_IKTSVM(train_data, labels, k_fold, k, c, method)
end = time.time()
acc = np.mean(result[0])
# Add results to the list
result_list.append([acc, np.std(result[0]), np.mean(result[1]), np.std(result[1]), np.mean(result[2]), \
np.std(result[2]), np.mean(result[3]), np.std(result[3]), np.mean(result[11]), np.std(result[11]), \
np.mean(result[12]), np.std(result[12]), np.mean(result[13]), np.std(result[13]), result[14], result[15], \
result[16], result[17], result[5], result[6], result[4], run])
# Save best accuracy
if acc > max_acc:
max_acc = acc
acc_std = np.std(result[0])
print("IKNN-TSVM(%s)| Run: %d | %d |Data:%s | K: %d | C: 2^%d |B-Acc:%.2f+-%.2f |Acc: %.2f+-%.2f | Time: %.2f Sec." % (method, run, search_total, data[2], k, element[0], \
max_acc, acc_std, acc ,np.std(result[0]) , end - start))
run = run + 1
print("Best Accuracy: %.2f-+%.2f" % (max_acc, acc_std))
# Create a panda data frame
result_frame = pd.DataFrame(result_list, columns=['accuracy', 'acc_std', 'recall_p', 'r_p_std', 'precision_p', 'p_p_std', \
'f1_p', 'f1_p_std', 'recall_n', 'r_n_std', 'precision_n', 'p_n_std', 'f1_n',\
'f1_n_std', 'tp', 'tn', 'fp', 'fn', 'c', 'knn', 'k_fold', 'run'])
# Write result to excel
result_frame.to_excel(excel_write, sheet_name='Sheet1')
excel_write.save()
return result_frame, max_acc
# Plot hyperplanes obtained by IKNN-TSVM
def plot_IKNNTSVM(X_train, y_train, c, k):
# Split train data into separate class
X_t_c1 = X_train[y_train == 1]
X_t_c2 = X_train[y_train == -1]
# Train a classifier with toy data
model = train_IKTSVM(X_train, y_train, c, k, 'FSA')
# Class1 hyper plane
w_1 = model[0]
b_1 = model[1]
# Class 2 hyperplane
w_2 = model[2]
b_2 = model[3]
# Line Equation hyperplane 1
slope1, intercept1 = hyperp_eq(w_1, b_1)
# Line Equation hyperplane 2
slope2, intercept2 = hyperp_eq(w_2, b_2)
# Min and Max of feature X1 and creating X values for creating line
xx_1 = np.linspace(np.min(X_train[:, 0]), np.max(X_train[:, 0]))
# y values
yy_1 = slope1 * xx_1 + intercept1
yy_2 = slope2 * xx_1 + intercept2
fig = plt.figure(1)
# Plot Training data
plt.scatter(X_t_c1[:, 0], X_t_c1[:, 1], marker='o', cmap=plt.cm.Paired)
plt.scatter(X_t_c2[:, 0], X_t_c2[:, 1], marker='+', cmap=plt.cm.Paired)
# Plot two hyperplanes
plt.plot(xx_1, yy_1, 'k--', label='Hyperplane +1') # Hyperplane of class 1
plt.plot(xx_1, yy_2, 'k-', label='Hyperplane -1') # Hyperplane of class 2
plt.ylim(-0.7, 1.5)
plt.legend()
plt.show()
# Test
if __name__ == '__main__':
# Address of datasets
data_add = 'Dataset/Synthetic/'
# Read a dataset
data = read_data(data_add + '/ripley.csv')
# Split dataset
X_train, X_test, y_train, y_test = train_test_split(data[0], data[1], \
test_size=0.1)
start = time.time()
# Parameters
c = 2 ** -2
k = 5
# k_fold = 10 # fold-CV
# k_l = 2
# k_u = 11
# c_l_b = -8
# c_u_b = 9
# rbf_l_b = -8
# rbf_u_b = 0
#
# gs_lin_IKTSVM(data, k_fold, k_l, k_u, c_l_b, c_u_b, 1, \
# 'ld', 'Result/IKNN-TSVM-Lin-titanic12.xlsx')
#test = train_IKTSVM(X_train, y_train, c, k)
#test = cv_lin_IKTSVM(data[0], data[1], 10, k, c)
plot_IKNNTSVM(X_train, y_train, c, k)
print('IKNN-TSVM-Finished: %.2f ms.' % ((time.time() - start) * 1000))
#pre_1 = pre_IKNN_TSVM(X_test, test[0], test[1], test[2], test[3])
#acc_1 = accuracy_score(y_test, pre_1) * 100
#acc_1 = np.mean(test[0])
#print("IKNN-TSVM-Acc: %.2f+-%.2f" % (acc_1, np.std(test[0])))
## Becnmark TSVM vs. IKNN-TSVM
#start = time.time()
#
#test_tsvm = train_tsvm(X_train, y_train, c, c, 'cpp')
#
#print('TSVM-Finished: %.2f ms' % ((time.time() - start) * 1000))
#
#pre_2 = predict_tsvm(X_test, test_tsvm[0], test_tsvm[1], test_tsvm[2], test_tsvm[3])
#
#acc_2 = accuracy_score(y_test, pre_2) * 100
#
#print("TSVM-Acc: %.2f" % acc_2)
| UTF-8 | Python | false | false | 12,594 | py | 108 | iknntsvm.py | 48 | 0.539146 | 0.506432 | 0 | 382 | 31.968586 | 179 |
azuline/cryptopals | 17,154,099,403,293 | 1c291746ae98fffc4f198173b5c9f845ad0c5dc9 | 1046257be7fede8e197cb90a5a855a748bde78ce | /set4/c32.py | 9502887de3f52298ea5e45f9668c99beffc8cf4d | [
"Apache-2.0"
] | permissive | https://github.com/azuline/cryptopals | d051f86f7c4d1b7090110f96eaf04e007707127e | 22280022fffad3bfb44bfc443abea0bad1125c8b | refs/heads/master | 2023-01-28T01:26:31.360723 | 2020-12-14T03:58:11 | 2020-12-14T03:58:11 | 209,361,493 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Break HMAC-SHA1 with a slightly less artificial timing leak
"""
import sys # isort:skip
from pathlib import Path # isort:skip
sys.path.append(str(Path(__file__).parent.resolve().parent))
import logging
from secrets import token_bytes
from threading import Thread
from time import sleep, time
import requests
from set4.c31 import hmac_sha1, start_webserver
# Shut Flask and Werkzeug up.
wzlogger = logging.getLogger("werkzeug")
wzlogger.disabled = True
def insecure_compare(sig1, sig2):
if len(sig1) != len(sig2):
return False
for c1, c2 in zip(sig1, sig2):
sleep(0.005)
if c1 != c2:
return False
return True
def crack_mac_for_any_file(file):
print("\nCracking MAC...")
mac = b""
for _ in range(20):
times = []
for byte in [bytes([i]) for i in range(256)]:
padding = b"\x00" * (20 - (len(mac) + 1))
total_time = 0
for _ in range(10):
start_time = time()
r = requests.post(
"http://localhost:5000/test",
params={
"file": file.hex(),
"signature": (mac + byte + padding).hex(),
},
)
end_time = time()
total_time += end_time - start_time
times.append((byte, total_time))
byte, longest_time = sorted(times, key=lambda v: v[1], reverse=True)[0]
assert longest_time > (len(mac) + 1.5) * 0.05
print(f"Found a byte of the mac: {byte.hex()}")
mac += byte
assert r.status_code == 200 # Assert that the last MAC was valid.
return mac
if __name__ == "__main__":
secret_key = token_bytes(64)
print("Starting webserver.")
Thread(target=start_webserver(insecure_compare, secret_key)).start()
sleep(1) # Give the webserver time to spin up...
file = token_bytes(24)
print("\nThe file is:")
print(file)
print("\nThe secret key is:")
print(secret_key.hex())
print("\nThe MAC is:")
print(hmac_sha1(secret_key, file).hex())
mac = crack_mac_for_any_file(file)
print("\nFound full MAC:")
print(mac.hex())
| UTF-8 | Python | false | false | 2,224 | py | 43 | c32.py | 39 | 0.556655 | 0.533273 | 0.00045 | 92 | 23.173913 | 79 |
bmyerz/iowa-computer-science-methods | 15,135,464,779,421 | 67c7bf6d87fc170ce4928667e6a7730c54b91262 | d6b1a72f2726397f5fc7f8493362b2f0a5c20b5d | /jes-code/horndup.py | 57be016a3eabd94684c21f58d670cd95182304cf | [] | no_license | https://github.com/bmyerz/iowa-computer-science-methods | a9d8e42cff9323d0d8419a000278498e46ba8f3a | bd76a2f65c1cf291ca0255e7c03e06feba6231b3 | refs/heads/master | 2021-06-20T03:22:57.555524 | 2021-02-22T17:29:18 | 2021-02-22T17:29:18 | 189,730,334 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | s = makeSound("Ensoniq-SQ-1-French-Horn-C4.wav")
openSoundTool(s) | UTF-8 | Python | false | false | 65 | py | 19 | horndup.py | 19 | 0.753846 | 0.723077 | 0 | 2 | 32 | 48 |
WDB40/CIS189 | 9,878,424,794,517 | 339b4b34c6ab795327c3578f7298b46338943799 | 59f4388d9c3816a3af6b6d27b14e086509656cf9 | /Module8/src/get_test_scores.py | e6c47cd9e871106dcdd68abbfd38c56cf4a9259f | [] | no_license | https://github.com/WDB40/CIS189 | 93f6152f3b152d038182243d73830fb823dee339 | 3d51ba4d060c532c140ae317a825fd648ea1c7f7 | refs/heads/master | 2020-07-17T03:18:58.817312 | 2019-12-09T02:39:50 | 2019-12-09T02:39:50 | 205,929,980 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Program: get_test_scores.py
Author: Wes Brown
Last date modified: 10/15/19
Purpose:
"""
def valid_number(value, min, max):
INVALID_INPUT = -1
if value < min or value > max or value == INVALID_INPUT:
return False
else:
return True
def get_test_score():
INVALID_INPUT = -1
MAX_SCORE = 100
MIN_SCORE = 0
user_input = INVALID_INPUT
while not valid_number(user_input, MIN_SCORE, MAX_SCORE):
try:
user_input = int(input("Enter a test score: "))
except ValueError:
user_input = INVALID_INPUT
return user_input
def average_scores(the_dict):
total = 0
for key in the_dict:
total = total + the_dict[key]
return total / len(the_dict)
def get_num_tests():
INVALID_INPUT = -1
MAX_SCORE = 10
MIN_SCORE = 1
user_input = INVALID_INPUT
while not valid_number(user_input, MIN_SCORE, MAX_SCORE):
try:
user_input = int(input("Enter the number of tests: "))
except ValueError:
user_input = INVALID_INPUT
return user_input
if __name__ == '__main__':
num_scores = get_num_tests()
scores_dict = dict()
for i in range(1, num_scores + 1):
score = get_test_score()
scores_dict.update({i: score})
print("Average Score: %.2f" % average_scores(scores_dict))
| UTF-8 | Python | false | false | 1,360 | py | 122 | get_test_scores.py | 118 | 0.590441 | 0.575735 | 0 | 66 | 19.606061 | 66 |
django-group/python-itvdn | 4,080,218,965,983 | 8ad07f5ecfeb5ec69a28f5ab39bed50a35b04632 | 8a1ab23b056886965fec2a3e4064c5ed55e22bfb | /домашка/essential/lesson 3/Dmytro Marianchenko/t_3.py | 827afb0f46e7a992dee4d19b588939d6e08a49f0 | [] | no_license | https://github.com/django-group/python-itvdn | 5d8a59f06618f993d20d2f60374c36aae4ae8ab0 | 62ef87dfac947ed4bf1f5b6b890461f56814d893 | refs/heads/master | 2021-01-07T11:16:37.996524 | 2020-02-14T13:53:47 | 2020-02-14T13:53:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def checker(name, year, company_bd):
if year > company_bd:
print(f"{name} is not an employee of the company")
else:
print(f"{name} works in the company since {year}")
def name_form(x):
while x is None:
x = input(f"Please enter:\n>> ")
if x.isalpha():
y = x.capitalize()
return y
else:
print("should not contain a numbers")
pass
def year_form(x):
while x is None:
try:
x = int(input(f"Please enter:\n>> "))
return x
except ValueError:
print("year should not contain a letter or any symbols except numbers")
def validation(x, company_bd):
for i in x:
if company_bd >= i[3]:
print(f"{i[0]} {i[1]} is not а company member")
else:
print(f"{i[0]} {i[1]} work in {i[2]} department sins {i[3]} year")
def main():
name = None
surname = None
year = None
company_bd = 1991
print("Enter a name of worker")
name = name_form(name)
print("Enter a surname of worker")
surname = name_form(surname)
force = input("Enter a department of company:\n>> ")
print("Enter a year of start working in company")
year = year_form(year)
pers = [name, surname, force, int(year)]
personal.append(pers)
while True:
sw = input("Do you wont to add an another person? y/n\n>> ")
if sw == "y":
break
elif sw == "n":
validation(personal, company_bd)
input("Pres 'Enter' to exit...")
exit()
main()
if __name__ == '__main__':
personal = []
main()
| UTF-8 | Python | false | false | 1,658 | py | 706 | t_3.py | 679 | 0.532287 | 0.525649 | 0 | 63 | 25.301587 | 83 |
loek-tonnaer/UnsupervisedActionEstimation | 12,412,455,504,012 | f5ac5d406068dc7ba1b3691ca8d4d6be24feafd0 | c5bdee1e0209cf74ba70aad7e954028f0fcecae0 | /metrics/utils.py | 09624714a75eb354d85186c472499254a738017c | [] | no_license | https://github.com/loek-tonnaer/UnsupervisedActionEstimation | e856b32ccc5e01cdbcae50a9eead16d75548f115 | 1c951bf65abb8e5a8189cc67102bda6e1834168b | refs/heads/master | 2022-12-26T20:13:22.956433 | 2020-10-06T16:23:40 | 2020-10-06T16:23:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import sklearn
import torch
def _get_random_latent(ds):
f = []
for factor in ds.latents_sizes:
f.append(np.random.randint(0, factor))
return np.array(f)
def _sample_one_representation(rep_fn, ds, paired=False):
latent_1 = ds.sample_latent()
img1 = ds.get_img_by_latent(latent_1)[0]
if not torch.is_tensor(img1):
img1 = img1[0]
z = rep_fn(img1.to('cuda').unsqueeze(0))
return z.detach().cpu(), latent_1
def sample_batch(model, num_points, ds, paired=False):
reps, factors = None, None
for i in range(num_points):
rep, fac = _sample_one_representation(model, ds, paired=paired)
# fac = fac[1:]
if i == 0:
reps, factors = rep, fac
else:
factors = np.vstack((factors, fac))
reps = np.vstack((reps, rep))
return np.transpose(reps), np.transpose(factors)
def histogram_discretize(target, num_bins=20):
discretized = np.zeros_like(target)
for i in range(target.shape[0]):
discretized[i, :] = np.digitize(target[i, :], np.histogram(target[i, :], num_bins)[1][:-1])
return discretized
def discrete_mutual_info(mus, ys):
"""Compute discrete mutual information."""
num_codes = mus.shape[0]
num_factors = ys.shape[0]
m = np.zeros([num_codes, num_factors])
for i in range(num_codes):
for j in range(num_factors):
m[i, j] = sklearn.metrics.mutual_info_score(ys[j, :], mus[i, :])
return m
def discrete_entropy(ys):
"""Compute discrete mutual information."""
num_factors = ys.shape[0]
h = np.zeros(num_factors)
for j in range(num_factors):
h[j] = sklearn.metrics.mutual_info_score(ys[j, :], ys[j, :])
return h
def normalize_data(data, mean=None, stddev=None):
if mean is None:
mean = np.mean(data, axis=1)
if stddev is None:
stddev = np.std(data, axis=1)
return (data - mean[:, np.newaxis]) / stddev[:, np.newaxis], mean, stddev
| UTF-8 | Python | false | false | 1,997 | py | 34 | utils.py | 33 | 0.607912 | 0.595894 | 0 | 70 | 27.528571 | 99 |
jshrall/pm_tools | 4,217,657,896,767 | 2384c8a438dfcf1decd67b3e67686f65170a47c3 | c6e967dca129fabae2ca3f54e880805c7b0ee9df | /plugins/mermaid/mermaid.py | 99e861ac0f71f6309be805f171314877f3d8f353 | [
"MIT"
] | permissive | https://github.com/jshrall/pm_tools | d3354b5c868ddfd7dfb5a138209401129c964b23 | 2207f1f145172c3016b059b60b89c83ece184c72 | refs/heads/master | 2021-09-09T12:04:03.086767 | 2018-03-15T21:21:41 | 2018-03-15T21:21:41 | 113,763,387 | 0 | 0 | null | true | 2017-12-10T16:00:04 | 2017-12-10T16:00:02 | 2017-11-18T19:02:41 | 2017-11-18T19:42:15 | 61,739 | 0 | 0 | 0 | null | false | null | import os
class MermaidPlugin(object):
def __init__(self, preprocessor):
self.mermaid_js = preprocessor.toolpath("plugins/mermaid/mermaid.cli/index.bundle.js")
# Config and style are both currently unused
#self.mermaid_cfg = preprocessor.toolpath("plugins/mermaid/mermaid_config.json")
#self.mermaid_css = preprocessor.toolpath("plugins/mermaid/mermaid.css")
self.pp = preprocessor
self.token = "mermaid"
self.pp.register_plugin(self)
def process(self, code, filename_or_title, title=None, div_style=None):
"""
Process mermaid code and return the proper insertion string
"""
mmdfile, outfile, update, title = self.pp.get_source(code, filename_or_title, ".mmd", ".svg", title)
if update:
self.mermaid2img(mmdfile, outfile)
return self.pp.img2md(outfile, title, div_style)
def mermaid2img(self, infile, outfile):
"""Convert mermaid file to image output file.
Args:
infile (str): [description]
outfile (str, optional): Defaults to None. Image will be written to this file. The outfile extension describes the type,
either png or svg.
"""
try:
if outfile and os.path.exists(outfile):
os.unlink(outfile)
self.pp._call(r'"%s" -i "%s" -o "%s"' % (self.mermaid_js, infile, outfile))
except SystemExit:
# If mermaid failed, but generated output SVG, that SVG contains error description
# so should be good enough to continue
if outfile and os.path.exists(outfile):
print "Ignoring the error above. See mermaid output diagram for detailed error description"
else:
raise
new = MermaidPlugin
| UTF-8 | Python | false | false | 1,826 | py | 68 | mermaid.py | 33 | 0.617196 | 0.615553 | 0 | 47 | 37.851064 | 132 |
tomaszmartin/stocks | 10,823,317,619,212 | b3ef49faf7afc423762745c82020f65391ee7581 | fd2b3452d1cc5539282c5861ba188bc48f5d7912 | /app/scrapers/coinapi.py | c484a97651e684dbfb0902d2793286ab57bc1512 | [] | no_license | https://github.com/tomaszmartin/stocks | 9bc568769406abd207eb4447a7029707a0bd0af3 | 4e97dd1f69e59b9643c52340bd052fd173693a4d | refs/heads/main | 2023-08-19T20:52:17.946003 | 2021-12-30T18:48:02 | 2021-12-30T18:48:02 | 365,441,638 | 0 | 0 | null | false | 2021-10-09T18:19:03 | 2021-05-08T06:51:33 | 2021-07-24T20:22:02 | 2021-10-09T18:19:02 | 232 | 0 | 0 | 2 | HTML | false | false | """Extracts data from a coinbase API."""
import datetime as dt
import json
import pandas as pd
from airflow.providers.http.hooks.http import HttpHook
COINS = [
"ADA",
"BTC",
"BTT",
"BNB",
"DASH",
"DOGE",
"ETH",
"ETC",
"LTC",
"LUNA",
"XLM",
]
def download_realtime(for_date: dt.datetime, coin_symbol: str) -> bytes:
"""Downloads current prices for a specified coin.
Args:
coin_symbol: what coin should be downloaded, for example BTC
for_date: unused in this context
Returns:
bytes: result
"""
hook = HttpHook("GET", http_conn_id="coinapi")
endpoint = f"v1/quotes/BINANCE_SPOT_{coin_symbol.upper()}_USDT/current"
resp = hook.run(endpoint)
data = resp.content
return data
def download_data(for_date: dt.datetime, coin_symbol: str) -> bytes:
"""Downloads file with appropriate data from the CoinAPI.
Args:
coin_symbol: what coin should be downloaded, for example BTC
for_date: for what day
Returns:
bytes: result
"""
next_day = for_date + dt.timedelta(days=1)
hook = HttpHook("GET", http_conn_id="coinapi")
endpoint = "v1/exchangerate/{coin}/USD/history?period_id=1DAY&time_start={start}&time_end={end}"
endpoint = endpoint.format(
coin=coin_symbol.upper(), start=for_date.date(), end=next_day.date()
)
resp = hook.run(endpoint)
data = resp.content
return data
def parse_data(data: bytes, for_date: dt.datetime, coin_symbol: str = ""):
"""Extracts data from file into correct format.
Args:
data: data from file
for_date: for what day data was downloaded
coin_symbol: what coin this data holds. It's not present in the file data.
Raises:
ValueError: when no coin is passed
Returns:
final data
"""
if not coin_symbol:
raise ValueError("Need to specify coin!")
frame = pd.read_json(data)
frame = frame.rename(
columns={
"date": "time_close",
"rate_open": "open",
"rate_high": "high",
"rate_low": "low",
"rate_close": "close",
}
)
frame = frame.drop(
columns=["time_period_start", "time_period_end", "time_open", "time_close"]
)
frame["date"] = for_date.date()
frame["coin"] = coin_symbol.upper()
frame["base"] = "USD"
return frame.to_dict("records")
def parse_realtime(data: bytes, for_date: dt.datetime, coin_symbol: str = ""):
"""Extracts realtime data from file into correct format.
Args:
data: data from file
for_date: not used in this context
coin_symbol: what coin this data holds. It's not present in the file data.
Raises:
ValueError: when no coin is passed
Returns:
final data
"""
if not coin_symbol:
raise ValueError("Need to specify coin!")
json_data = json.loads(data)
frame = pd.json_normalize(json_data)
frame["coin"] = coin_symbol.upper()
frame["base"] = "USD"
frame = frame.drop(
columns=[
"symbol_id",
"last_trade.time_exchange",
"last_trade.time_coinapi",
"last_trade.uuid",
"last_trade.price",
"last_trade.size",
"last_trade.taker_side",
"time_exchange",
"time_coinapi",
]
)
return frame.to_dict("records")
| UTF-8 | Python | false | false | 3,440 | py | 62 | coinapi.py | 34 | 0.586919 | 0.585756 | 0 | 130 | 25.461538 | 100 |
peuic/pokemon | 7,481,833,038,659 | 0fcb5a8b8c306d92a1e347d52b21e7f543742025 | 00fd18e5bf1ea0b209d0e4033c7007b0a3ba6d30 | /poketest.py | bb8128532530e54413a2feaa53ce9fedf4ee0856 | [] | no_license | https://github.com/peuic/pokemon | aefcdd334cdf29528028c6673c1aaec855dc08df | c3741f7d700f1469ccf0549fb5b1128a04880962 | refs/heads/master | 2022-12-09T17:15:38.555538 | 2018-06-21T02:20:26 | 2018-06-21T02:20:26 | 138,101,337 | 0 | 0 | null | false | 2022-12-08T02:09:35 | 2018-06-21T00:50:32 | 2018-06-21T14:39:22 | 2022-12-08T02:09:33 | 4,831 | 0 | 0 | 4 | Python | false | false | from flask import Flask, render_template, request
import requests
app = Flask(__name__)
@app.route('/pokeresult', methods=['POST'])
def pokedata():
num = request.form['pokeq']
r = requests.get('https://pokeapi.co/api/v2/pokemon/'+num+'/')
json_object = r.json()
poke_id = json_object ['id']
poke_name = json_object ['name']
poke_pic = json_object ['sprites'] ['front_default']
poke_peso = json_object ['weight']
return render_template('pokeresult.html', pokeid=poke_id, pokename=poke_name, pokepic=poke_pic, pokepeso=poke_peso)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True) | UTF-8 | Python | false | false | 681 | py | 4 | poketest.py | 1 | 0.651982 | 0.650514 | 0 | 22 | 30 | 119 |
bitbybitsth/django_deploy | 14,817,637,200,521 | a6496ade3f09c77d59d3b2342c350113db5aced8 | ec60b96d8ed11b750ea91a64196ecc3a6d8b299a | /ecom/product/migrations/0002_auto_20211012_0924.py | a260695ce655eafb129b87dd0acbda5afd246d98 | [] | no_license | https://github.com/bitbybitsth/django_deploy | 1d6fe99989ea7a4ab8961dee70b3612e4bc2b589 | 670e716c4cf8a605c1ca2ffdfbed000a13f17bf7 | refs/heads/main | 2023-08-25T06:42:39.424848 | 2021-10-27T03:54:38 | 2021-10-27T03:54:38 | 421,664,267 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.2.8 on 2021-10-12 03:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("product", "0001_initial"),
]
operations = [
migrations.AlterModelOptions(
name="product",
options={"verbose_name": "Product", "verbose_name_plural": "Products"},
),
migrations.AddField(
model_name="product",
name="delivery",
field=models.CharField(
default="India", max_length=40, verbose_name="Delivery Country"
),
),
migrations.AlterField(
model_name="product",
name="warranty",
field=models.IntegerField(default=1, verbose_name="Warranty in Year"),
),
]
| UTF-8 | Python | false | false | 805 | py | 3 | 0002_auto_20211012_0924.py | 3 | 0.556522 | 0.529193 | 0 | 29 | 26.758621 | 83 |
pyro-ppl/pyro | 627,065,241,847 | f952fcbce3e5a98e2b6c2d3711cd19445676f828 | edc1134436a79ca883a0d25f3c8dfffc4235c514 | /tests/infer/test_svgd.py | d90efb0c8c1c8d1c7051fa944d13b930238448db | [
"Apache-2.0"
] | permissive | https://github.com/pyro-ppl/pyro | 2283d8ca528fc090c724a3a6e0f344e505ebbf77 | 0e82cad30f75b892a07e6c9a5f9e24f2cb5d0d81 | refs/heads/dev | 2023-08-18T00:35:28.014919 | 2023-08-06T21:01:36 | 2023-08-06T21:01:36 | 94,506,832 | 3,647 | 606 | Apache-2.0 | false | 2023-09-14T13:52:14 | 2017-06-16T05:03:47 | 2023-09-14T05:15:11 | 2023-09-14T12:55:38 | 95,825 | 8,118 | 973 | 243 | Python | false | false | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
import pyro
import pyro.distributions as dist
from pyro.infer import SVGD, IMQSteinKernel, RBFSteinKernel
from pyro.infer.autoguide.utils import _product
from pyro.optim import Adam
from tests.common import assert_equal
@pytest.mark.parametrize(
"latent_dist",
[
dist.Normal(torch.zeros(2), torch.ones(2)).to_event(1),
dist.LogNormal(torch.tensor([-1.0]), torch.tensor([0.7])).to_event(1),
dist.LogNormal(torch.tensor(-1.0), torch.tensor(0.7)),
dist.Beta(torch.tensor([0.3]), torch.tensor([0.7])).to_event(1),
],
)
@pytest.mark.parametrize("mode", ["univariate", "multivariate"])
@pytest.mark.parametrize("stein_kernel", [RBFSteinKernel, IMQSteinKernel])
def test_mean_variance(latent_dist, mode, stein_kernel, verbose=True):
pyro.clear_param_store()
def model():
pyro.sample("z", latent_dist)
kernel = stein_kernel()
adam = Adam({"lr": 0.05})
svgd = SVGD(model, kernel, adam, 200, 0, mode=mode)
bandwidth_start = 1.0
bandwidth_end = 5.0
n_steps = 301
# scramble initial particles
svgd.step()
pyro.param("svgd_particles").unconstrained().data *= 1.3
pyro.param("svgd_particles").unconstrained().data += 0.7
for step in range(n_steps):
kernel.bandwidth_factor = bandwidth_start + (step / n_steps) * (
bandwidth_end - bandwidth_start
)
squared_gradients = svgd.step()
if step % 125 == 0:
print("[step %03d] " % step, squared_gradients)
final_particles = svgd.get_named_particles()["z"]
if verbose:
print(
"[mean]: actual, expected = ",
final_particles.mean(0).data.numpy(),
latent_dist.mean.data.numpy(),
)
print(
"[var]: actual, expected = ",
final_particles.var(0).data.numpy(),
latent_dist.variance.data.numpy(),
)
assert_equal(final_particles.mean(0), latent_dist.mean, prec=0.01)
prec = 0.05 if mode == "multivariate" else 0.02
assert_equal(final_particles.var(0), latent_dist.variance, prec=prec)
@pytest.mark.parametrize("shape", [(1, 1), (2, 1, 3), (4, 2), (1, 2, 1, 3)])
@pytest.mark.parametrize("stein_kernel", [RBFSteinKernel, IMQSteinKernel])
def test_shapes(shape, stein_kernel):
pyro.clear_param_store()
shape1, shape2 = (5,) + shape, shape + (6,)
mean_init1 = torch.arange(_product(shape1)).double().reshape(shape1) / 100.0
mean_init2 = torch.arange(_product(shape2)).double().reshape(shape2)
def model():
pyro.sample("z1", dist.LogNormal(mean_init1, 1.0e-8).to_event(len(shape1)))
pyro.sample("scalar", dist.Normal(0.0, 1.0))
pyro.sample("z2", dist.Normal(mean_init2, 1.0e-8).to_event(len(shape2)))
num_particles = 7
svgd = SVGD(model, stein_kernel(), Adam({"lr": 0.0}), num_particles, 0)
for step in range(2):
svgd.step()
particles = svgd.get_named_particles()
assert particles["z1"].shape == (num_particles,) + shape1
assert particles["z2"].shape == (num_particles,) + shape2
for particle in range(num_particles):
assert_equal(particles["z1"][particle, ...], mean_init1.exp(), prec=1.0e-6)
assert_equal(particles["z2"][particle, ...], mean_init2, prec=1.0e-6)
@pytest.mark.parametrize("mode", ["univariate", "multivariate"])
@pytest.mark.parametrize("stein_kernel", [RBFSteinKernel, IMQSteinKernel])
def test_conjugate(mode, stein_kernel, verbose=False):
data = torch.tensor([1.0, 2.0, 3.0, 3.0, 5.0]).unsqueeze(-1).expand(5, 3)
alpha0 = torch.tensor([1.0, 1.8, 2.3])
beta0 = torch.tensor([2.3, 1.5, 1.2])
alpha_n = alpha0 + data.sum(0) # posterior alpha
beta_n = beta0 + data.size(0) # posterior beta
def model():
with pyro.plate("rates", alpha0.size(0)):
latent = pyro.sample("latent", dist.Gamma(alpha0, beta0))
with pyro.plate("data", data.size(0)):
pyro.sample("obs", dist.Poisson(latent), obs=data)
kernel = stein_kernel()
adam = Adam({"lr": 0.05})
svgd = SVGD(model, kernel, adam, 200, 2, mode=mode)
bandwidth_start = 1.0
bandwidth_end = 5.0
n_steps = 451
for step in range(n_steps):
kernel.bandwidth_factor = bandwidth_start + (step / n_steps) * (
bandwidth_end - bandwidth_start
)
squared_gradients = svgd.step()
if step % 150 == 0:
print("[step %03d] " % step, squared_gradients)
final_particles = svgd.get_named_particles()["latent"]
posterior_dist = dist.Gamma(alpha_n, beta_n)
if verbose:
print(
"[mean]: actual, expected = ",
final_particles.mean(0).data.numpy(),
posterior_dist.mean.data.numpy(),
)
print(
"[var]: actual, expected = ",
final_particles.var(0).data.numpy(),
posterior_dist.variance.data.numpy(),
)
assert_equal(final_particles.mean(0)[0], posterior_dist.mean, prec=0.02)
prec = 0.05 if mode == "multivariate" else 0.02
assert_equal(final_particles.var(0)[0], posterior_dist.variance, prec=prec)
| UTF-8 | Python | false | false | 5,251 | py | 639 | test_svgd.py | 525 | 0.613217 | 0.576081 | 0 | 149 | 34.241611 | 83 |
okotaku/kaggle_statoil | 4,020,089,423,558 | 5a51f2cd307c627487617e94263b984e75b0ecbd | ed7ffb471f80f8aed29c50b1b5b1187cbd3b8a8d | /model/vgg16.py | 54b98e01004b9e5e586635ea0d8d30ab38ecdd25 | [] | no_license | https://github.com/okotaku/kaggle_statoil | f9ddc3396663eed26e533b4844cb7f57b3a39be5 | 3c3225bb4eeaf32ae9109614eda4af102394c667 | refs/heads/master | 2021-09-05T18:11:32.016595 | 2018-01-30T05:55:44 | 2018-01-30T05:55:44 | 115,371,866 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from keras.applications.vgg16 import VGG16
from keras.layers import Input, Flatten, Dense
from keras.layers.core import Activation
from keras.models import Model
def Vgg16(freeze_leyer):
input_tensor = Input(shape=(75, 75, 3))
vgg16 = VGG16(include_top=False, weights='imagenet',
input_tensor=input_tensor)
x = Flatten()(vgg16.output)
x = Dense(256)(x)
x = Activation('relu')(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(input=vgg16.input, output=x)
if freeze_leyer > 0:
for layer in model.layers[:freeze_leyer]:
layer.trainable = False
return model
| UTF-8 | Python | false | false | 663 | py | 15 | vgg16.py | 14 | 0.650075 | 0.612368 | 0 | 23 | 27.826087 | 56 |
manishbisoi/Jinee | 10,273,561,793,635 | c14d928a2818615015cac7c4c9ceac859596960e | 05db88673dd09c36406faeb9b9d0afcb40b5fa26 | /tasks/fortune.py | ee62b4464c694dc43fda8317d964bba14b0f3c9d | [] | no_license | https://github.com/manishbisoi/Jinee | 67f9d40fbad4231ba605b890e36500140c6bf69d | 5676cd0fc4c4df4237fabb132dcfa9dec6578d5f | refs/heads/master | 2021-01-20T22:09:45.160364 | 2016-08-12T14:14:07 | 2016-08-12T14:14:07 | 65,555,800 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
os.system('fortune | xcowsay') | UTF-8 | Python | false | false | 40 | py | 19 | fortune.py | 18 | 0.75 | 0.75 | 0 | 2 | 19.5 | 30 |
gacanepa/CursoBashPython | 5,042,291,648,916 | 0ed53325fa1d72c4c2500bfca51be343adde7ad2 | 4a292a4d66451b323952d565c438d4a65d9408aa | /clase2/clase2.py | a9efa124a62c9546b0ee9c3d14fcdf67e8e376ca | [] | no_license | https://github.com/gacanepa/CursoBashPython | c5963d042aabbe64e9f182be2f14ba477401152f | 2cc83be2609aad9656594942b34a57020baf4b82 | refs/heads/master | 2019-01-25T00:28:03.621524 | 2017-12-13T20:11:05 | 2017-12-13T20:11:05 | 86,010,949 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # EJERCICIOS CLASE 2
# ------------------------------------------
# FUNCIONES AUXILIARES
# TipoDeDato: recibe una variable (nombreVariable) y su valor (dato) como entrada y devuelve el tipo de dato
def TipoDeDato(nombreVariable, dato):
print('La variable', nombreVariable, 'es igual a', dato, 'y es del tipo', type(dato))
# ------------------------------------------
# EJEMPLOS VISTOS EN CLASE
# ------------------------------------------
# TIPOS DE DATOS
# Enteros:
numero1=5
numero2=-9
TipoDeDato('numero1', numero1)
TipoDeDato('numero2', numero2)
# Flotantes
numero3=1.32
numero4=1.2
TipoDeDato('numero3', numero3)
TipoDeDato('numero4', numero4)
# Strings. Se incluyen ejemplos de la concatenación y de la replicación de strings.
nombre='Gabriel'
apellido='Cánepa'
nombreCompleto=nombre + ' ' + apellido # Concatenación
subrayado='-' * len(nombre) # Replicación
TipoDeDato('nombre', nombre)
TipoDeDato('apellido', apellido)
TipoDeDato('nombreCompleto', nombreCompleto)
TipoDeDato('subrayado', subrayado)
# Booleanos (y parecidos)
# Recordemos que Python interpreta (además de False) a cualquier objeto vacío como False, y al resto como True.
a1=True
a2=False
TipoDeDato('a1', a1)
TipoDeDato('a2', a2)
# Listas
paises=['Argentina', 'Uruguay', 'Paraguay', 'Chile']
calificaciones=[7.5, 9, 10, 6.75]
TipoDeDato('paises', paises)
TipoDeDato('calificaciones', calificaciones)
# ------------------------------------------
# CONVERSIÓN DE DATOS
# Entero a flotante
intFloat1=float(1)
intFloat2=float(-5)
TipoDeDato('intFloat1', intFloat1)
TipoDeDato('intFloat2', intFloat2)
# Flotante a entero. ¡CUIDADO! Se truncará el valor
floatInt1=int(1.32)
floatInt2=int(2.0)
TipoDeDato('floatInt1', floatInt1)
TipoDeDato('floatInt2', floatInt2)
# Entero o flotante a string. Necesario para concatenar strings con el operador +
num1Str=str(14)
num2Str=str(2.32)
TipoDeDato('num1Str', num1Str)
TipoDeDato('num2Str', num2Str)
# La siguiente asignación produciría un error:
# otraVariable='Hoy es ' + 29 + ' de marzo'
# Pero esta funcionaría correctamente:
# otraVariable='Hoy es ' + str(29) + ' de marzo'
# String a flotante o entero
strInt=int('84')
strFloat=float('2.345')
TipoDeDato('strInt', strInt)
TipoDeDato('strFloat', strFloat)
# Varios tipos a booleano. Al realizar la conversión, Python considera a cualquier objeto vacío o a None como False.
# El resto será True. Este procedimiento es útil al evaluar condiciones en un control de flujo.
intBool1=bool(0)
intBool2=bool(1)
intBool3=bool(120)
floatBool1=bool(0.0)
floatBool2=bool(2.32)
stringBool1=bool('')
stringBool2=bool('Hola a todos')
noneBool=bool(None)
listaBool1=bool([])
listaBool2=bool(paises)
TipoDeDato('intBool1', intBool1)
TipoDeDato('intBool2', intBool2)
TipoDeDato('intBool3', intBool3)
TipoDeDato('floatBool1', floatBool1)
TipoDeDato('floatBool2', floatBool2)
TipoDeDato('stringBool1', stringBool1)
TipoDeDato('stringBool2', stringBool2)
TipoDeDato('noneBool', noneBool)
TipoDeDato('listaBool1', listaBool1)
TipoDeDato('listaBool2', listaBool2)
# ------------------------------------------
# INTERACCIÓN
# Solicitar la entrada de un valor y asignarlo a una variable. Descomentar para testear.
# respuesta=input()
# print(respuesta)
# ------------------------------------------
# DIFERENCIA ENTRE = Y ==
# = se utiliza para asignar un valor a una variable
# == se emplea para chequear que una variable posea tal o cual valor
miVar=6 # Asignación del valor 6 a la variable miVar
print(miVar==6) # Como miVar contiene el valor 6, esta sentencia devolverá True.
print(miVar==7) # Como miVar contiene el valor 6, esta sentencia devolverá False.
# ------------------------------------------
# OPERACIONES CON LISTAS
# Agregar los números del 1 al 15 en una lista llamada numerosConFor usando un for loop
numerosConFor=[]
for i in range(1,16):
numerosConFor.append(i)
# Mostrar la lista y verificar su tipo:
TipoDeDato('numerosConFor', numerosConFor)
# Eliminar de la lista los elementos que se encuentren en las posiciones 0, 3, y 9:
numerosConFor.pop(0)
numerosConFor.pop(3)
numerosConFor.pop(9)
# Insertar el número -5 en la posición 4:
numerosConFor.insert(4, -5)
# Cambiar el valor del elemento que se encuentra en la primera y en la última posición de la lista.
# Los nuevos valores deben ser 11 y -30, respectivamente.
numerosConFor[0] = 11
maxIndex = numerosConFor.index(max(numerosConFor))
numerosConFor[maxIndex] = -30
# Nuestra lista tiene los siguientes elementos luego de haber realizado los cambios anteriores:
TipoDeDato('numerosConFor', numerosConFor)
# ------------------------------------------
# TAREA
# Ejercicio 1: Reemplazar el bucle while de la diapositiva correspondiente con un bucle for. Utilizar la función range().
listaNumeros=[]
for i in range(0,5):
listaNumeros.append(i)
print(listaNumeros)
# Ejercicio 2: Escribir un módulo que incluya funciones para sumar, restar, multiplicar, y dividir dos números. Guardarlo
# con el nombre operaciones.py en el directorio actual e invocarlo desde la consola interactiva de Python. A continuación,
# ejemplificar el uso de cada una de las funciones.
def suma(a, b):
'''Devuelve el resultado de la suma de dos números a y b'''
return a + b
def resta(a, b):
'''Devuelve el resultado de la resta de a menos b'''
return a - b
def multiplicacion(a, b):
'''Devuelve el resultado del producto de números a y b'''
return a * b
def division(a, b):
'''Devuelve el resultado de la división entre dos a y b'''
return a / b
# Ejercicio 3: Escribir una función que tome 3 números como entrada y calcule el promedio, redondeando el resultado a dos
# decimales. Si el resultado es mayor o igual que 7, mostrar el mensaje 'Aprobado'. De lo contrario, mostrar 'No aprobado’.
def promedio(a, b, c):
resultado = (a + b + c) / 3
if resultado >= 7:
print('Aprobado')
else:
print('No aprobado')
| UTF-8 | Python | false | false | 6,149 | py | 13 | clase2.py | 11 | 0.677683 | 0.652978 | 0 | 196 | 29.183673 | 123 |
il-dionigi/180DB-GradCaps | 790,274,004,428 | 144888937cd0e60660cbf55279a6181ff5ed8e2c | ede143f1801ab6dcc1ad67626980032257a0e936 | /HWGroup/pixelPi/encoder.py | 574bc53d9eb7cae06cac4bb5c5aac15a70892e41 | [] | no_license | https://github.com/il-dionigi/180DB-GradCaps | 1349f0c829ee43cc7a89d01fb31ef631a8df6b9b | 7330ec6d0a4bb1d1737ce634686ca463a8de1829 | refs/heads/master | 2020-12-13T12:57:25.377614 | 2020-03-12T21:13:57 | 2020-03-12T21:13:57 | 234,423,121 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from bitstring import BitArray
import math
import Node
xpos_len = 4 # 4 bits -> 0-15
xpos_start = 0 # x starts at bit 0 in x+y bits
ypos_len = 4 # 4 bits -> 0-15
ypos_start = xpos_len # y starts at bit len(xpos in bits) in x+y bits
xy_len = xpos_len + ypos_len # total lengh of x+y bits
message_length = 20 * 8 # in bits
group_id_len = 4 # bits used for group id
group_size = 2^4 # currently 19
def encodeMessage(map):
''' Function to encode seat positions for sending via message_length BLE broadcasts
Args:
@map (Array of Tuples or Arrays): Format of tuples/arrays is [(int)id, (int)x, (int)y]
Returns:
@ (string): String of encoded ids with their coords
'''
sorted_map = sorted(map, key=lambda x: x[0])
message_ba = BitArray('')
for i in range(len(sorted_map)):
message_ba.append(hex(sorted_map[i][0]>>4))
message_ba.append(hex(sorted_map[i][0]&0b1111))
message_ba.append(hex(sorted_map[i][1]))
message_ba.append(hex(sorted_map[i][2]))
return message_ba.tobytes().decode('cp437')
def decodeMessage(message):
''' Function to decode seat positions for sending via message_length BLE broadcasts
Args:
@messages (string): Should be the full output from encodeMessage
Returns:
@map (array of tuples): format of tuples is ((int)id, (int)x, (int)y)
'''
seat_map = []
message_bytes = BitArray(message.encode('cp437')).tobytes()
for i in range(0, len(message_bytes),2):
byte0 = message_bytes[i]
byte1 = message_bytes[i+1]
seat_map.append((int(byte0), int(byte1)&0b1111, int(byte1)>>4))
return seat_map | UTF-8 | Python | false | false | 1,542 | py | 51 | encoder.py | 36 | 0.690013 | 0.656939 | 0 | 47 | 31.829787 | 87 |
slightlynybbled/manufacturing | 18,614,388,295,929 | 8bbc2cb674f970ebc423e83189072f218c6447e8 | 18742a0ecbf392328d8df994abf937e0e72d89f3 | /examples/ppk_plot.py | 475d772870ff5c8e74adc0c024ef88cc66100a44 | [
"MIT"
] | permissive | https://github.com/slightlynybbled/manufacturing | 1ab9c3dd46b168d8be1b98472d818f5265483def | 4d172c7a12a649259ad3bc0e3f6993b1dcd79e0c | refs/heads/master | 2023-06-09T00:18:31.143098 | 2023-06-06T18:23:29 | 2023-06-06T18:23:29 | 224,505,774 | 45 | 16 | MIT | false | 2023-06-06T15:45:03 | 2019-11-27T19:47:39 | 2023-05-12T05:52:55 | 2023-06-06T15:44:15 | 3,068 | 40 | 13 | 3 | Python | false | false | import logging
import matplotlib.pyplot as plt
from manufacturing import import_excel, ppk_plot
logging.basicConfig(level=logging.INFO)
data = import_excel('data/example_data_with_faults.xlsx',
columnname='value')
fig, ax = plt.subplots() # creating a figure to provide to the ppk_plot as a parameter
ppk_plot(**data,
parameter_name='Current',
upper_specification_limit=10.1,
lower_specification_limit=5.5,
show_dppm=True,
figure=fig)
plt.show()
| UTF-8 | Python | false | false | 517 | py | 54 | ppk_plot.py | 33 | 0.673114 | 0.663443 | 0 | 19 | 26.210526 | 87 |
rlee287/secure-notes-client | 14,740,327,766,827 | d10cb831011f988208c9f56177100bad3bcb0095 | 347059357e2ca1db87e6e0ceb2722c243a93e6c7 | /secure_notes_client/gui_editor.py | 665b1572285911c2092b21c809715c0582ffb093 | [
"MIT"
] | permissive | https://github.com/rlee287/secure-notes-client | b669b6ee400262ff23cc5ece35e7091edb7135af | 56d5fcce1d2eeb46de22aac63131fe7214b6f185 | refs/heads/master | 2022-12-09T02:51:26.149435 | 2019-10-14T01:56:37 | 2019-10-14T01:56:37 | 193,761,929 | 0 | 0 | MIT | false | 2022-12-08T06:36:18 | 2019-06-25T18:24:58 | 2019-10-14T01:56:45 | 2022-12-08T06:36:17 | 39 | 0 | 0 | 5 | Python | false | false | from PySide2 import QtWidgets
from PySide2.QtCore import Signal, Qt
import filesystem
import networking
class EditorClass(QtWidgets.QMainWindow):
close_signal = Signal(QtWidgets.QMainWindow)
def __init__(self, ui_obj, config_obj, note_id):
# type: (EditorClass, Ui_NoteEditWindow, ConfigObj, str) -> None
super().__init__()
self.ui_obj = ui_obj
self.config_obj = config_obj
self.note_id = note_id
self.edited_since_last_save = False
self.allow_edits = False
self.note_obj=filesystem.read_noteobj(config_obj,note_id)
ui_obj.setupUi(self)
self.setAttribute(Qt.WA_DeleteOnClose,True)
self.setWindowTitle(self.note_obj["note"]["title"])
ui_obj.titleLineEdit.setText(self.note_obj["note"]["title"])
ui_obj.noteTextEdit.setPlainText(self.note_obj["note"]["text"])
ui_obj.titleLineEdit.editingFinished.connect(self.mark_edited)
ui_obj.noteTextEdit.textChanged.connect(self.mark_edited)
ui_obj.actionSave.triggered.connect(self.save_file)
self.update_editor_enabled_status()
def set_editing_enabled(self, enable_editing):
if self.allow_edits == enable_editing:
return
if self.allow_edits: # T -> F, throw warning if unsaved changes
if self.edited_since_last_save:
raise ValueError("Unsaved changes present")
else: # F -> T, just do enabling
self.allow_edits = True
self.edited_since_last_save = False
self.update_editor_enabled_status()
def update_editor_enabled_status(self):
self.ui_obj.titleLineEdit.setReadOnly(not self.allow_edits)
self.ui_obj.noteTextEdit.setReadOnly(not self.allow_edits)
def mark_edited(self):
self.edited_since_last_save = True
def save_file(self):
note_obj_copy = self.note_obj.copy()
note_obj_copy["note"]["title"] = ui_obj.titleLineEdit.getText()
note_obj_copy["note"]["text"] = ui_obj.noteTextEdit.getText()
self.edited_since_last_save = False
def closeEvent(self, event):
# TODO: confirmation dialog stuff
self.close_signal.emit(self)
event.accept() | UTF-8 | Python | false | false | 2,241 | py | 12 | gui_editor.py | 8 | 0.643909 | 0.643017 | 0 | 61 | 35.754098 | 72 |
ZackJorquera/ScaleLiquidRemainingIOT | 4,363,686,782,744 | 20a8b98e95aeef1bb5269b21b512d9720c49ef7d | e3210970da44105e392c6e3abbd7b1e12c0e755f | /RaspberryPiCode/ScaleAggregator/ScaleAggregator.py | 5e5058542f382aef660833981a133211a9caf99b | [
"MIT"
] | permissive | https://github.com/ZackJorquera/ScaleLiquidRemainingIOT | dedf6d1faa17dfe186cbf6db2b68761a84e9da06 | 57a349b95a3205c7dd5ad98337a9e5b01f384022 | refs/heads/master | 2018-10-22T06:41:20.700818 | 2018-07-25T03:40:57 | 2018-07-25T03:40:57 | 114,922,290 | 2 | 0 | null | false | 2018-02-07T13:59:36 | 2017-12-20T19:14:25 | 2018-02-07T13:32:49 | 2018-02-07T13:58:52 | 56 | 0 | 0 | 1 | Python | false | null | import time
import sys
import logging
import os
sys.path.append('../Tools/')
import ScaleInfoReaderWriter as ScaleIRW
import DatabaseReaderWriter as DBRW
import ConfigReaderWriter as CfgRW
def LoadDB():
if CfgRW.cfgVars["dbToUse"] == "Mongo":
db = DBRW.MongoDBProfile()
else:
db = DBRW.MongoDBProfile()
# db = DBRW.MySQLDBProfile()
db.Connect()
return db
def printAndLog(msg, loglevel):
print msg
logger.log(loglevel, msg)
def createLogger():
logDir = "../Log"
if not os.path.exists(logDir):
os.makedirs(logDir)
logPath = "../Log/Log.txt"
file_handler = logging.FileHandler(logPath)
file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s\t%(asctime)s \t%(message)s')
file_handler.setFormatter(formatter)
tmplogger = logging.getLogger()
tmplogger.setLevel(logging.DEBUG)
tmplogger.addHandler(file_handler)
return tmplogger
ScaleDataDB = LoadDB()
logger = createLogger()
scaleInfoList = None
loopOn = 0
secsPerPersist = int(CfgRW.cfgVars["aggregatorSecsPerPersist"],10) # try catch
loopsOfPersists = int(CfgRW.cfgVars["aggregatorLoopsOfPersists"],10)
timeOfLastUpdate = None
printAndLog("Starting Aggregation every " + str(secsPerPersist) + " Second.", logging.INFO)
if ScaleDataDB.Client != None:
printAndLog("Outputting to " + CfgRW.cfgVars["dbToUse"] + " database " + ScaleDataDB.DBName + " at: " + str(ScaleDataDB.Client.address), logging.INFO)
while True:
if CfgRW.cfgVars["uselatestFromMongoAsCurrent"].upper() == "TRUE":
break
timeOfLastUpdate = time.time()
if scaleInfoList is None or loopOn > loopsOfPersists or len(scaleInfoList) != ScaleIRW.GetNumOfScales():
try:
scaleInfoList = ScaleIRW.GetListOfScaleInfos()
except Exception as error:
printAndLog(str(error), logging.ERROR)
break
if ScaleDataDB.Connected:
successfulPushes = 0
failedPushes = 0
for si in scaleInfoList:
try:
if si.Failed:
raise Exception()
ScaleDataDB.Write(si, (si.GetValue() * 100)) # There is a Write Function for both the MySQLRW and MongoRW classes
successfulPushes += 1
except:
failedPushes += 1
if CfgRW.cfgVars["aggregatorPrintPushes"].upper() == "TRUE":
printAndLog(str(successfulPushes) + " documents successfully added to database" \
" with " + str(failedPushes) + " fails. " \
"Waiting " + str(secsPerPersist) + " seconds before next update.", logging.INFO)
else:
if failedPushes > 0:
printAndLog(str(failedPushes) + " documents failed to push to database.", logging.ERROR)
if successfulPushes == 0 and len(scaleInfoList) != 0:
printAndLog("DB failed to push, attempting Reconnect.", logging.ERROR)
if ScaleDataDB.Reconnect():
printAndLog("Successfully reconnected to " + CfgRW.cfgVars["dbToUse"] + " database " + ScaleDataDB.DBName + " at: " + str(ScaleDataDB.Client.address), logging.INFO)
else:
printAndLog("DB failed to connect, attempting Reconnect.", logging.ERROR)
ScaleDataDB.Reconnect()
if ScaleDataDB.Client != None:
printAndLog("Outputting to " + CfgRW.cfgVars["dbToUse"] + " database " + ScaleDataDB.DBName + " at: " + str(ScaleDataDB.Client.address), logging.INFO)
while time.time() - timeOfLastUpdate < secsPerPersist:
time.sleep(1)
loopOn += 1 | UTF-8 | Python | false | false | 3,607 | py | 18 | ScaleAggregator.py | 10 | 0.648461 | 0.643748 | 0 | 102 | 34.372549 | 184 |
eQTL-Catalogue/eQTL-SumStats | 5,076,651,386,447 | 09a87f5f83f060c8e65d6b6d1cf524a1392a9db9 | 91bc0c7c9ee8db759444d430c100ce88bf242f8a | /sumstats/main.py | f7ffdb1ed75181eb336d8c5b394d702e065cbbb9 | [] | no_license | https://github.com/eQTL-Catalogue/eQTL-SumStats | 7bc7e9e9e8df2865aa946eba21bf248a79f21d4c | 84751d782139cb0f84c41607c9e8ee4e98ff072e | refs/heads/master | 2023-06-23T03:33:17.794710 | 2023-06-13T10:46:29 | 2023-06-13T10:46:29 | 234,100,303 | 2 | 1 | null | false | 2022-12-08T01:50:52 | 2020-01-15T14:38:50 | 2021-06-04T15:46:03 | 2022-12-08T01:50:51 | 8,853 | 2 | 1 | 18 | Python | false | false | from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse, ORJSONResponse
from fastapi.middleware.cors import CORSMiddleware
import logging
from sumstats.config import (API_BASE,
APP_VERSION,
API_DESCRIPTION,
TAGS_METADATA)
from sumstats.dependencies.error_classes import APIException
import sumstats.api_v1.routers.routes as routes_v1
import sumstats.api_v2.routers.eqtl as routes_v2
logging.config.fileConfig("sumstats/log_conf.ini",
disable_existing_loggers=False)
logger = logging.getLogger(__name__)
app = FastAPI(title="eQTL Catalogue Summary Statistics API Documentation",
openapi_tags=TAGS_METADATA,
description=API_DESCRIPTION,
docs_url=f"{API_BASE}/docs",
redoc_url=None,
openapi_url=f"{API_BASE}/openapi.json",
version=APP_VERSION
)
@app.exception_handler(ValueError)
async def value_error_exception_handler(request: Request, exc: ValueError):
return JSONResponse(
status_code=400,
content={"message": str(exc)},
)
@app.exception_handler(APIException)
async def handle_custom_api_exception(request: Request,
exc: APIException):
return JSONResponse(
status_code=exc.status_code,
content={"message": exc.message},
)
# configure CORS
app.add_middleware(CORSMiddleware,
allow_origins=['*'])
# v1 API (default)
app.include_router(routes_v1.router,
prefix=API_BASE,
include_in_schema=False,
default_response_class=ORJSONResponse)
app.include_router(routes_v1.router,
prefix=f"{API_BASE}/v1",
default_response_class=ORJSONResponse,
deprecated=True,
tags=["eQTL API v1"])
# v2 API
app.include_router(routes_v2.router,
prefix=f"{API_BASE}/v2",
default_response_class=ORJSONResponse,
tags=["eQTL API v2"])
| UTF-8 | Python | false | false | 2,148 | py | 163 | main.py | 135 | 0.601024 | 0.593575 | 0 | 65 | 32.046154 | 75 |
ForeverZyh/ASCC | 10,505,490,011,325 | 2a661ca6c9abc4667133de3169f4c55847aed8c2 | 4820859e8d6f68678dd212d6ad185b7b2914597e | /PWWS/get_NE_list.py | 3d8dd3a9dec1cc2da5c009a187eec93814bff31a | [
"MIT"
] | permissive | https://github.com/ForeverZyh/ASCC | c44d88e58b4a1e39bb61abcb7aaf1641c1a14164 | 2d76d679889953501c469221a37d486e7ee42ded | refs/heads/main | 2023-04-27T16:45:26.575805 | 2021-05-07T01:36:41 | 2021-05-07T01:36:41 | 364,674,852 | 0 | 0 | MIT | true | 2021-05-05T18:44:13 | 2021-05-05T18:44:12 | 2021-05-05T18:44:10 | 2021-05-03T05:27:15 | 41,614 | 0 | 0 | 0 | null | false | false | # coding: utf-8
import os
import numpy as np
from .config import config
import copy
import sys
from .read_files import split_imdb_files, split_yahoo_files, split_agnews_files
import spacy
import argparse
import re
from collections import Counter, defaultdict
nlp = spacy.load('en')
parser = argparse.ArgumentParser('named entity recognition')
parser.add_argument('-d', '--dataset',
help='Data set',
choices=['imdb', 'agnews', 'yahoo'],
default='yahoo')
NE_type_dict = {
'PERSON': defaultdict(int), # People, including fictional.
'NORP': defaultdict(int), # Nationalities or religious or political groups.
'FAC': defaultdict(int), # Buildings, airports, highways, bridges, etc.
'ORG': defaultdict(int), # Companies, agencies, institutions, etc.
'GPE': defaultdict(int), # Countries, cities, states.
'LOC': defaultdict(int), # Non-GPE locations, mountain ranges, bodies of water.
'PRODUCT': defaultdict(int), # Object, vehicles, foods, etc.(Not services)
'EVENT': defaultdict(int), # Named hurricanes, battles, wars, sports events, etc.
'WORK_OF_ART': defaultdict(int), # Titles of books, songs, etc.
'LAW': defaultdict(int), # Named documents made into laws.
'LANGUAGE': defaultdict(int), # Any named language.
'DATE': defaultdict(int), # Absolute or relative dates or periods.
'TIME': defaultdict(int), # Times smaller than a day.
'PERCENT': defaultdict(int), # Percentage, including "%".
'MONEY': defaultdict(int), # Monetary values, including unit.
'QUANTITY': defaultdict(int), # Measurements, as of weight or distance.
'ORDINAL': defaultdict(int), # "first", "second", etc.
'CARDINAL': defaultdict(int), # Numerals that do not fall under another type.
}
def recognize_named_entity(texts):
'''
Returns all NEs in the input texts and their corresponding types
'''
NE_freq_dict = copy.deepcopy(NE_type_dict)
for text in texts:
doc = nlp(text)
for word in doc.ents:
NE_freq_dict[word.label_][word.text] += 1
return NE_freq_dict
def find_adv_NE(D_true, D_other):
'''
find NE_adv in D-D_y_true which is defined in the end of section 3.1
'''
# adv_NE_list = []
for type in NE_type_dict.keys():
# find the most frequent true and other NEs of the same type
true_NE_list = [NE_tuple[0] for (i, NE_tuple) in enumerate(D_true[type]) if i < 15]
other_NE_list = [NE_tuple[0] for (i, NE_tuple) in enumerate(D_other[type]) if i < 30]
for other_NE in other_NE_list:
if other_NE not in true_NE_list and len(other_NE.split()) == 1:
# adv_NE_list.append((type, other_NE))
print("'" + type + "': '" + other_NE + "',")
with open('./{}.txt'.format(args.dataset), 'a', encoding='utf-8') as f:
f.write("'" + type + "': '" + other_NE + "',\n")
break
class NameEntityList(object):
# If the original input in IMDB belongs to class 0 (negative)
imdb_0 = {'PERSON': 'David',
'NORP': 'Australian',
'FAC': 'Hound',
'ORG': 'Ford',
'GPE': 'India',
'LOC': 'Atlantic',
'PRODUCT': 'Highly',
'EVENT': 'Depression',
'WORK_OF_ART': 'Casablanca',
'LAW': 'Constitution',
'LANGUAGE': 'Portuguese',
'DATE': '2001',
'TIME': 'hours',
'PERCENT': '98%',
'MONEY': '4',
'QUANTITY': '70mm',
'ORDINAL': '5th',
'CARDINAL': '7',
}
# If the original input in IMDB belongs to class 1 (positive)
imdb_1 = {'PERSON': 'Lee',
'NORP': 'Christian',
'FAC': 'Shannon',
'ORG': 'BAD',
'GPE': 'Seagal',
'LOC': 'Malta',
'PRODUCT': 'Cat',
'EVENT': 'Hugo',
'WORK_OF_ART': 'Jaws',
'LAW': 'RICO',
'LANGUAGE': 'Sebastian',
'DATE': 'Friday',
'TIME': 'minutes',
'PERCENT': '75%',
'MONEY': '$',
'QUANTITY': '9mm',
'ORDINAL': 'sixth',
'CARDINAL': 'zero',
}
imdb = [imdb_0, imdb_1]
agnews_0 = {'PERSON': 'Williams',
'NORP': 'European',
'FAC': 'Olympic',
'ORG': 'Microsoft',
'GPE': 'Australia',
'LOC': 'Earth',
'PRODUCT': '#',
'EVENT': 'Cup',
'WORK_OF_ART': 'PowerBook',
'LAW': 'Pacers-Pistons',
'LANGUAGE': 'Chinese',
'DATE': 'third-quarter',
'TIME': 'Tonight',
'MONEY': '#39;t',
'QUANTITY': '#39;t',
'ORDINAL': '11th',
'CARDINAL': '1',
}
agnews_1 = {'PERSON': 'Bush',
'NORP': 'Iraqi',
'FAC': 'Outlook',
'ORG': 'Microsoft',
'GPE': 'Iraq',
'LOC': 'Asia',
'PRODUCT': '#',
'EVENT': 'Series',
'WORK_OF_ART': 'Nobel',
'LAW': 'Constitution',
'LANGUAGE': 'French',
'DATE': 'third-quarter',
'TIME': 'hours',
'MONEY': '39;Keefe',
'ORDINAL': '2nd',
'CARDINAL': 'Two',
}
agnews_2 = {'PERSON': 'Arafat',
'NORP': 'Iraqi',
'FAC': 'Olympic',
'ORG': 'AFP',
'GPE': 'Baghdad',
'LOC': 'Earth',
'PRODUCT': 'Soyuz',
'EVENT': 'Cup',
'WORK_OF_ART': 'PowerBook',
'LAW': 'Constitution',
'LANGUAGE': 'Filipino',
'DATE': 'Sunday',
'TIME': 'evening',
'MONEY': '39;m',
'QUANTITY': '20km',
'ORDINAL': 'eighth',
'CARDINAL': '6',
}
agnews_3 = {'PERSON': 'Arafat',
'NORP': 'Iraqi',
'FAC': 'Olympic',
'ORG': 'AFP',
'GPE': 'Iraq',
'LOC': 'Kashmir',
'PRODUCT': 'Yukos',
'EVENT': 'Cup',
'WORK_OF_ART': 'Gazprom',
'LAW': 'Pacers-Pistons',
'LANGUAGE': 'Hebrew',
'DATE': 'Saturday',
'TIME': 'overnight',
'MONEY': '39;m',
'QUANTITY': '#39;t',
'ORDINAL': '11th',
'CARDINAL': '6',
}
agnews = [agnews_0, agnews_1, agnews_2, agnews_3]
yahoo_0 = {'PERSON': 'Fantasy',
'NORP': 'Russian',
'FAC': 'Taxation',
'ORG': 'Congress',
'GPE': 'U.S.',
'LOC': 'Sea',
'PRODUCT': 'Variable',
'EVENT': 'Series',
'WORK_OF_ART': 'Stopping',
'LAW': 'Constitution',
'LANGUAGE': 'Hebrew',
'DATE': '2004-05',
'TIME': 'morning',
'MONEY': '$ale',
'QUANTITY': 'Hiberno-English',
'ORDINAL': 'Tertiary',
'CARDINAL': 'three',
}
yahoo_1 = {'PERSON': 'Equine',
'NORP': 'Japanese',
'FAC': 'Music',
'ORG': 'Congress',
'GPE': 'UK',
'LOC': 'Sea',
'PRODUCT': 'RuneScape',
'EVENT': 'Series',
'WORK_OF_ART': 'Stopping',
'LAW': 'Strap-',
'LANGUAGE': 'Spanish',
'DATE': '2004-05',
'TIME': 'night',
'PERCENT': '100%',
'MONEY': 'five-dollar',
'QUANTITY': 'Hiberno-English',
'ORDINAL': 'Sixth',
'CARDINAL': '5',
}
yahoo_2 = {'PERSON': 'Equine',
'NORP': 'Canadian',
'FAC': 'Music',
'ORG': 'Congress',
'GPE': 'California',
'LOC': 'Atlantic',
'PRODUCT': 'Variable',
'EVENT': 'Series',
'WORK_OF_ART': 'Weight',
'LANGUAGE': 'Filipino',
'DATE': '2004-05',
'TIME': 'night',
'PERCENT': '100%',
'MONEY': 'ten-dollar',
'QUANTITY': '$ale',
'ORDINAL': 'Tertiary',
'CARDINAL': 'two',
}
yahoo_3 = {'PERSON': 'Equine',
'NORP': 'Irish',
'FAC': 'Music',
'ORG': 'Congress',
'GPE': 'California',
'LOC': 'Sea',
'PRODUCT': 'RuneScape',
'EVENT': 'Series',
'WORK_OF_ART': 'Weight',
'LAW': 'Strap-',
'LANGUAGE': 'Spanish',
'DATE': '2004-05',
'TIME': 'tonight',
'PERCENT': '100%',
'MONEY': 'five-dollar',
'QUANTITY': 'Hiberno-English',
'ORDINAL': 'Sixth',
'CARDINAL': '5',
}
yahoo_4 = {'PERSON': 'Equine',
'NORP': 'Irish',
'FAC': 'Music',
'ORG': 'Congress',
'GPE': 'Canada',
'LOC': 'Sea',
'PRODUCT': 'Variable',
'WORK_OF_ART': 'Stopping',
'LAW': 'Constitution',
'LANGUAGE': 'Spanish',
'DATE': '2004-05',
'TIME': 'seconds',
'PERCENT': '100%',
'MONEY': 'hundred-dollar',
'QUANTITY': 'Hiberno-English',
'ORDINAL': 'Tertiary',
'CARDINAL': '100',
}
yahoo_5 = {'PERSON': 'Equine',
'NORP': 'English',
'FAC': 'Music',
'ORG': 'Congress',
'GPE': 'Australia',
'LOC': 'Sea',
'PRODUCT': 'Variable',
'EVENT': 'Series',
'WORK_OF_ART': 'Weight',
'LAW': 'Strap-',
'LANGUAGE': 'Filipino',
'DATE': '2004-05',
'TIME': 'seconds',
'MONEY': 'hundred-dollar',
'ORDINAL': 'Tertiary',
'CARDINAL': '2000',
}
yahoo_6 = {'PERSON': 'Fantasy',
'NORP': 'Islamic',
'FAC': 'Music',
'ORG': 'Congress',
'GPE': 'California',
'LOC': 'Sea',
'PRODUCT': 'Variable',
'EVENT': 'Series',
'WORK_OF_ART': 'Stopping',
'LANGUAGE': 'Filipino',
'DATE': '2004-05',
'TIME': 'seconds',
'PERCENT': '100%',
'MONEY': '$ale',
'QUANTITY': '$ale',
'ORDINAL': 'Tertiary',
'CARDINAL': '100',
}
yahoo_7 = {'PERSON': 'Fantasy',
'NORP': 'Canadian',
'FAC': 'Music',
'ORG': 'Congress',
'GPE': 'UK',
'LOC': 'West',
'PRODUCT': 'Variable',
'EVENT': 'Watergate',
'WORK_OF_ART': 'Stopping',
'LAW': 'Constitution',
'LANGUAGE': 'Filipino',
'DATE': '2004-05',
'TIME': 'tonight',
'PERCENT': '100%',
'MONEY': '$ale',
'QUANTITY': '$ale',
'ORDINAL': 'Tertiary',
'CARDINAL': '2000',
}
yahoo_8 = {'PERSON': 'Equine',
'NORP': 'Japanese',
'FAC': 'Music',
'ORG': 'Congress',
'GPE': 'Chicago',
'LOC': 'Sea',
'PRODUCT': 'Variable',
'EVENT': 'Series',
'WORK_OF_ART': 'Stopping',
'LAW': 'Strap-',
'LANGUAGE': 'Spanish',
'DATE': '2004-05',
'TIME': 'night',
'PERCENT': '100%',
'QUANTITY': '$ale',
'ORDINAL': 'Sixth',
'CARDINAL': '2',
}
yahoo_9 = {'PERSON': 'Equine',
'NORP': 'Chinese',
'FAC': 'Music',
'ORG': 'Digital',
'GPE': 'U.S.',
'LOC': 'Atlantic',
'PRODUCT': 'Variable',
'EVENT': 'Series',
'WORK_OF_ART': 'Weight',
'LAW': 'Constitution',
'LANGUAGE': 'Spanish',
'DATE': '1918-1945',
'TIME': 'night',
'PERCENT': '100%',
'MONEY': 'ten-dollar',
'QUANTITY': 'Hiberno-English',
'ORDINAL': 'Tertiary',
'CARDINAL': '5'
}
yahoo = [yahoo_0, yahoo_1, yahoo_2, yahoo_3, yahoo_4, yahoo_5, yahoo_6, yahoo_7, yahoo_8, yahoo_9]
L = {'imdb': imdb, 'agnews': agnews, 'yahoo': yahoo}
NE_list = NameEntityList()
if __name__ == '__main__':
args = parser.parse_args()
print('dataset:', args.dataset)
class_num = config.num_classes[args.dataset]
if args.dataset == 'imdb':
train_texts, train_labels, dev_texts, dev_labels, test_texts, test_labels = split_imdb_files(opt)
# get input texts in different classes
pos_texts = train_texts[:12500]
pos_texts.extend(test_texts[:12500])
neg_texts = train_texts[12500:]
neg_texts.extend(test_texts[12500:])
texts = [neg_texts, pos_texts]
elif args.dataset == 'agnews':
texts = [[] for i in range(class_num)]
train_texts, train_labels, test_texts, test_labels = split_agnews_files()
for i, label in enumerate(train_labels):
texts[np.argmax(label)].append(train_texts[i])
for i, label in enumerate(test_labels):
texts[np.argmax(label)].append(test_texts[i])
elif args.dataset == 'yahoo':
train_texts, train_labels, test_texts, test_labels = split_yahoo_files()
texts = [[] for i in range(class_num)]
for i, label in enumerate(train_labels):
texts[np.argmax(label)].append(train_texts[i])
for i, label in enumerate(test_labels):
texts[np.argmax(label)].append(test_texts[i])
D_true_list = []
for i in range(class_num):
D_true = recognize_named_entity(texts[i]) # D_true contains the NEs in input texts with the label y_true
D_true_list.append(D_true)
for i in range(class_num):
D_true = copy.deepcopy(D_true_list[i])
D_other = copy.deepcopy(NE_type_dict)
for j in range(class_num):
if i == j:
continue
for type in NE_type_dict.keys():
# combine D_other[type] and D_true_list[j][type]
for key in D_true_list[j][type].keys():
D_other[type][key] += D_true_list[j][type][key]
for type in NE_type_dict.keys():
D_other[type] = sorted(D_other[type].items(), key=lambda k_v: k_v[1], reverse=True)
D_true[type] = sorted(D_true[type].items(), key=lambda k_v: k_v[1], reverse=True)
print('\nfind adv_NE_list in class', i)
with open('./{}.txt'.format(args.dataset), 'a', encoding='utf-8') as f:
f.write('\nfind adv_NE_list in class' + str(i))
find_adv_NE(D_true, D_other)
| UTF-8 | Python | false | false | 15,737 | py | 23 | get_NE_list.py | 19 | 0.432166 | 0.418885 | 0 | 428 | 35.768692 | 113 |
igor-barsukov/hurst-calc | 4,844,723,132,812 | 66cf9ad6f0409854330b339393af7e0f3f76edbb | 8519cc340770d1d05842dd75a1ce0e2cb47ac705 | /basic_rs/parse/parse_tbp.py | 4557402a59a25d7ec826bd7c428230c3981ad612 | [] | no_license | https://github.com/igor-barsukov/hurst-calc | 25c6194d4a98cb191d233f0d99dc33e8ce0c90f9 | 03d3ec160af780029b7fed4a5a916c044388ac5d | refs/heads/master | 2023-03-23T14:22:22.513762 | 2019-11-20T20:34:32 | 2019-11-20T20:34:32 | 221,783,518 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- encoding:utf-8 -*-
"""
Для работы требует модуль dpkt, достуаный через pip.
В качестве первого аргумента коммандной строки требует имя
файла для обработки, результат записывается в текущий каталог в
файл с таким же именем, но расширением csv.
РАССЧЕТ ВРЕМЕНИ МЕЖДУ ПАКЕТАМИ
- tbp - TIME BETWEEN PACKETS
"""
import dpkt
import sys
from os import path
def run(pcapfile):
# открываем файл с данными - первый параметр коммандной строки
infile = open(pcapfile,'rb')
# открываем для записи файл для сохранения статистики
# получаем имя 1-ого файла без расширения и добавляем .csv
outfileName = path.splitext(path.basename(pcapfile))[0]+'_tbp.csv'
outfile = open(outfileName,'w')
# заголовки столбцов данных
# outfile.write('deltaTime\n')
# инициализаия счётчиков
deltaTime = 0
previousTime = 0
for ts, buf in dpkt.pcap.Reader(infile):
# print "ts - ", ts
if previousTime > 0:
deltaTime = ts - previousTime
previousTime = ts
outfile.write(str(deltaTime)+'\n')
else:
outfile.write(str(0)+'\n')
previousTime = ts
infile.close()
outfile.close()
return outfileName
| UTF-8 | Python | false | false | 1,613 | py | 8 | parse_tbp.py | 7 | 0.663961 | 0.658279 | 0 | 46 | 25.782609 | 70 |
nagyist/ParaView | 11,338,713,702,683 | 63c423be57d97a90e908f3ea849e406830a9c0ec | 09d1138225f295ec2e5f3e700b44acedcf73f383 | /Web/Python/paraview/web/data_converter.py | ed094f364e1f01b5a4781c7a4e7692e467a8d74a | [
"BSD-3-Clause"
] | permissive | https://github.com/nagyist/ParaView | e86d1ed88a805aecb13f707684103e43d5f6b09f | 6810d701c44b2097baace5ad2c05f81c6d0fd310 | refs/heads/master | 2023-09-04T07:34:57.251637 | 2023-09-03T00:34:36 | 2023-09-03T00:34:57 | 85,244,343 | 0 | 0 | BSD-3-Clause | true | 2023-09-11T15:57:25 | 2017-03-16T21:44:59 | 2022-12-16T20:27:24 | 2023-09-11T15:57:22 | 238,433 | 0 | 0 | 0 | C++ | false | false | from vtkmodules.vtkIOImage import vtkPNGReader
from vtkmodules.vtkCommonCore import vtkFloatArray, vtkUnsignedCharArray
from vtkmodules.vtkCommonDataModel import vtkImageData
from vtkmodules.vtkIOLegacy import vtkDataSetWriter
from vtkmodules.web.camera import normalize, vectProduct, dotProduct
from vtkmodules.web import iteritems
import json, os, math, array
# -----------------------------------------------------------------------------
# Helper function
# -----------------------------------------------------------------------------
def getScalarFromRGB(rgb, scalarRange=[-1.0, 1.0]):
delta = (scalarRange[1] - scalarRange[0]) / 16777215.0 # 2^24 - 1 => 16,777,215
if rgb[0] != 0 or rgb[1] != 0 or rgb[2] != 0:
# Decode encoded value
return scalarRange[0] + delta * float(
rgb[0] * 65536 + rgb[1] * 256 + rgb[2] - 1
)
else:
# No value
return float("NaN")
def convertImageToFloat(srcPngImage, destFile, scalarRange=[0.0, 1.0]):
reader = vtkPNGReader()
reader.SetFileName(srcPngImage)
reader.Update()
rgbArray = reader.GetOutput().GetPointData().GetArray(0)
stackSize = rgbArray.GetNumberOfTuples()
size = reader.GetOutput().GetDimensions()
outputArray = vtkFloatArray()
outputArray.SetNumberOfComponents(1)
outputArray.SetNumberOfTuples(stackSize)
for idx in range(stackSize):
outputArray.SetTuple1(
idx, getScalarFromRGB(rgbArray.GetTuple(idx), scalarRange)
)
# Write float file
with open(destFile, "wb") as f:
f.write(memoryview(outputArray))
return size
def convertRGBArrayToFloatArray(rgbArray, scalarRange=[0.0, 1.0]):
linearSize = rgbArray.GetNumberOfTuples()
outputArray = vtkFloatArray()
outputArray.SetNumberOfComponents(1)
outputArray.SetNumberOfTuples(linearSize)
for idx in range(linearSize):
outputArray.SetTuple1(
idx, getScalarFromRGB(rgbArray.GetTuple(idx), scalarRange)
)
return outputArray
# -----------------------------------------------------------------------------
# Composite.json To order.array
# -----------------------------------------------------------------------------
class CompositeJSON(object):
def __init__(self, numberOfLayers):
self.nbLayers = numberOfLayers
self.encoding = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def load(self, file):
with open(file, "r") as f:
composite = json.load(f)
self.width = composite["dimensions"][0]
self.height = composite["dimensions"][1]
self.pixels = composite["pixel-order"].split("+")
self.imageSize = self.width * self.height
self.stackSize = self.imageSize * self.nbLayers
def getImageSize(self):
return self.imageSize
def getStackSize(self):
return self.stackSize
def writeOrderSprite(self, path):
ds = vtkImageData()
ds.SetDimensions(self.width, self.height, self.nbLayers)
ds.GetPointData().AddArray(self.getSortedOrderArray())
writer = vtkDataSetWriter()
writer.SetInputData(ds)
writer.SetFileName(path)
writer.Update()
def getSortedOrderArray(self):
sortedOrder = vtkUnsignedCharArray()
sortedOrder.SetName("layerIdx")
sortedOrder.SetNumberOfTuples(self.stackSize)
# Reset content
for idx in range(self.stackSize):
sortedOrder.SetValue(idx, 255)
idx = 0
for pixel in self.pixels:
x = idx % self.width
y = idx / self.width
flipYIdx = self.width * (self.height - y - 1) + x
if "@" in pixel:
idx += int(pixel[1:])
else:
# Need to decode the order
layerIdx = 0
for layer in pixel:
sortedOrder.SetValue(
flipYIdx + self.imageSize * layerIdx, self.encoding.index(layer)
)
layerIdx += 1
# Move to next pixel
idx += 1
return sortedOrder
# -----------------------------------------------------------------------------
# Composite Sprite to Sorted Composite Dataset Builder
# -----------------------------------------------------------------------------
class ConvertCompositeSpriteToSortedStack(object):
def __init__(self, directory):
self.basePath = directory
self.layers = []
self.data = []
self.imageReader = vtkPNGReader()
# Load JSON metadata
with open(os.path.join(directory, "config.json"), "r") as f:
self.config = json.load(f)
self.nbLayers = len(self.config["scene"])
while len(self.layers) < self.nbLayers:
self.layers.append({})
with open(os.path.join(directory, "index.json"), "r") as f:
self.info = json.load(f)
with open(os.path.join(directory, "offset.json"), "r") as f:
offsets = json.load(f)
for key, value in iteritems(offsets):
meta = key.split("|")
if len(meta) == 2:
self.layers[int(meta[0])][meta[1]] = value
elif meta[1] in self.layers[int(meta[0])]:
self.layers[int(meta[0])][meta[1]][int(meta[2])] = value
else:
self.layers[int(meta[0])][meta[1]] = [value, value, value]
self.composite = CompositeJSON(len(self.layers))
def listData(self):
return self.data
def convert(self):
for root, dirs, files in os.walk(self.basePath):
if "rgb.png" in files:
print("Process", root)
self.processDirectory(root)
def processDirectory(self, directory):
self.imageReader.SetFileName(os.path.join(directory, "rgb.png"))
self.imageReader.Update()
rgbArray = self.imageReader.GetOutput().GetPointData().GetArray(0)
self.composite.load(os.path.join(directory, "composite.json"))
orderArray = self.composite.getSortedOrderArray()
imageSize = self.composite.getImageSize()
stackSize = self.composite.getStackSize()
# Write order (sorted order way)
with open(os.path.join(directory, "order.uint8"), "wb") as f:
f.write(memoryview(orderArray))
self.data.append(
{"name": "order", "type": "array", "fileName": "/order.uint8"}
)
# Encode Normals (sorted order way)
if "normal" in self.layers[0]:
sortedNormal = vtkUnsignedCharArray()
sortedNormal.SetNumberOfComponents(3) # x,y,z
sortedNormal.SetNumberOfTuples(stackSize)
# Get Camera orientation and rotation information
camDir = [0, 0, 0]
worldUp = [0, 0, 0]
with open(os.path.join(directory, "camera.json"), "r") as f:
camera = json.load(f)
camDir = normalize(
[camera["position"][i] - camera["focalPoint"][i] for i in range(3)]
)
worldUp = normalize(camera["viewUp"])
# [ camRight, camUp, camDir ] will be our new orthonormal basis for normals
camRight = vectProduct(camDir, worldUp)
camUp = vectProduct(camRight, camDir)
# Tmp structure to capture (x,y,z) normal
normalByLayer = vtkFloatArray()
normalByLayer.SetNumberOfComponents(3)
normalByLayer.SetNumberOfTuples(stackSize)
# Capture all layer normals
layerIdx = 0
zPosCount = 0
zNegCount = 0
for layer in self.layers:
normalOffset = layer["normal"]
for idx in range(imageSize):
normalByLayer.SetTuple3(
layerIdx * imageSize + idx,
getScalarFromRGB(
rgbArray.GetTuple(idx + normalOffset[0] * imageSize)
),
getScalarFromRGB(
rgbArray.GetTuple(idx + normalOffset[1] * imageSize)
),
getScalarFromRGB(
rgbArray.GetTuple(idx + normalOffset[2] * imageSize)
),
)
# Re-orient normal to be view based
vect = normalByLayer.GetTuple3(layerIdx * imageSize + idx)
if not math.isnan(vect[0]):
# Express normal in new basis we computed above
rVect = normalize(
[
-dotProduct(vect, camRight),
dotProduct(vect, camUp),
dotProduct(vect, camDir),
]
)
# Need to reverse vector ?
if rVect[2] < 0:
normalByLayer.SetTuple3(
layerIdx * imageSize + idx,
-rVect[0],
-rVect[1],
-rVect[2],
)
else:
normalByLayer.SetTuple3(
layerIdx * imageSize + idx, rVect[0], rVect[1], rVect[2]
)
layerIdx += 1
# Sort normals and encode them as 3 bytes ( -1 < xy < 1 | 0 < z < 1)
for idx in range(stackSize):
layerIdx = int(orderArray.GetValue(idx))
if layerIdx == 255:
# No normal => same as view direction
sortedNormal.SetTuple3(idx, 128, 128, 255)
else:
offset = layerIdx * imageSize
imageIdx = idx % imageSize
vect = normalByLayer.GetTuple3(imageIdx + offset)
if (
not math.isnan(vect[0])
and not math.isnan(vect[1])
and not math.isnan(vect[2])
):
sortedNormal.SetTuple3(
idx,
int(127.5 * (vect[0] + 1)),
int(127.5 * (vect[1] + 1)),
int(255 * vect[2]),
)
else:
print(
"WARNING: encountered NaN in normal of layer ",
layerIdx,
": [",
vect[0],
",",
vect[1],
",",
vect[2],
"]",
)
sortedNormal.SetTuple3(idx, 128, 128, 255)
# Write the sorted data
with open(os.path.join(directory, "normal.uint8"), "wb") as f:
f.write(memoryview(sortedNormal))
self.data.append(
{
"name": "normal",
"type": "array",
"fileName": "/normal.uint8",
"categories": ["normal"],
}
)
# Encode Intensity (sorted order way)
if "intensity" in self.layers[0]:
intensityOffsets = []
sortedIntensity = vtkUnsignedCharArray()
sortedIntensity.SetNumberOfTuples(stackSize)
for layer in self.layers:
intensityOffsets.append(layer["intensity"])
for idx in range(stackSize):
layerIdx = int(orderArray.GetValue(idx))
if layerIdx == 255:
sortedIntensity.SetValue(idx, 255)
else:
offset = 3 * intensityOffsets[layerIdx] * imageSize
imageIdx = idx % imageSize
sortedIntensity.SetValue(
idx, rgbArray.GetValue(imageIdx * 3 + offset)
)
with open(os.path.join(directory, "intensity.uint8"), "wb") as f:
f.write(memoryview(sortedIntensity))
self.data.append(
{
"name": "intensity",
"type": "array",
"fileName": "/intensity.uint8",
"categories": ["intensity"],
}
)
# Encode Each layer Scalar
layerIdx = 0
for layer in self.layers:
for scalar in layer:
if scalar not in ["intensity", "normal"]:
offset = imageSize * layer[scalar]
scalarRange = self.config["scene"][layerIdx]["colors"][scalar][
"range"
]
delta = (
scalarRange[1] - scalarRange[0]
) / 16777215.0 # 2^24 - 1 => 16,777,215
scalarArray = vtkFloatArray()
scalarArray.SetNumberOfTuples(imageSize)
for idx in range(imageSize):
rgb = rgbArray.GetTuple(idx + offset)
if rgb[0] != 0 or rgb[1] != 0 or rgb[2] != 0:
# Decode encoded value
value = scalarRange[0] + delta * float(
rgb[0] * 65536 + rgb[1] * 256 + rgb[2] - 1
)
scalarArray.SetValue(idx, value)
else:
# No value
scalarArray.SetValue(idx, float("NaN"))
with open(
os.path.join(directory, "%d_%s.float32" % (layerIdx, scalar)),
"wb",
) as f:
f.write(memoryview(scalarArray))
self.data.append(
{
"name": "%d_%s" % (layerIdx, scalar),
"type": "array",
"fileName": "/%d_%s.float32" % (layerIdx, scalar),
"categories": ["%d_%s" % (layerIdx, scalar)],
}
)
layerIdx += 1
# -----------------------------------------------------------------------------
# Composite Sprite to Sorted Composite Dataset Builder
# -----------------------------------------------------------------------------
class ConvertCompositeDataToSortedStack(object):
def __init__(self, directory):
self.basePath = directory
self.layers = []
self.data = []
self.imageReader = vtkPNGReader()
# Load JSON metadata
with open(os.path.join(directory, "config.json"), "r") as f:
self.config = json.load(f)
self.nbLayers = len(self.config["scene"])
while len(self.layers) < self.nbLayers:
self.layers.append({})
with open(os.path.join(directory, "index.json"), "r") as f:
self.info = json.load(f)
def listData(self):
return self.data
def convert(self):
for root, dirs, files in os.walk(self.basePath):
if "depth_0.float32" in files:
print("Process", root)
self.processDirectory(root)
def processDirectory(self, directory):
# Load depth
depthStack = []
imageSize = self.config["size"]
linearSize = imageSize[0] * imageSize[1]
nbLayers = len(self.layers)
stackSize = nbLayers * linearSize
layerList = range(nbLayers)
for layerIdx in layerList:
with open(
os.path.join(directory, "depth_%d.float32" % layerIdx), "rb"
) as f:
a = array.array("f")
a.fromfile(f, linearSize)
depthStack.append(a)
orderArray = vtkUnsignedCharArray()
orderArray.SetName("layerIdx")
orderArray.SetNumberOfComponents(1)
orderArray.SetNumberOfTuples(stackSize)
pixelSorter = [(i, i) for i in layerList]
for pixelId in range(linearSize):
# Fill pixelSorter
for layerIdx in layerList:
if depthStack[layerIdx][pixelId] < 1.0:
pixelSorter[layerIdx] = (layerIdx, depthStack[layerIdx][pixelId])
else:
pixelSorter[layerIdx] = (255, 1.0)
# Sort pixel layers
pixelSorter.sort(key=lambda tup: tup[1])
# Fill sortedOrder array
for layerIdx in layerList:
orderArray.SetValue(
layerIdx * linearSize + pixelId, pixelSorter[layerIdx][0]
)
# Write order (sorted order way)
with open(os.path.join(directory, "order.uint8"), "wb") as f:
f.write(memoryview(orderArray))
self.data.append(
{"name": "order", "type": "array", "fileName": "/order.uint8"}
)
# Remove depth files
for layerIdx in layerList:
os.remove(os.path.join(directory, "depth_%d.float32" % layerIdx))
# Encode Normals (sorted order way)
if "normal" in self.config["light"]:
sortedNormal = vtkUnsignedCharArray()
sortedNormal.SetNumberOfComponents(3) # x,y,z
sortedNormal.SetNumberOfTuples(stackSize)
# Get Camera orientation and rotation information
camDir = [0, 0, 0]
worldUp = [0, 0, 0]
with open(os.path.join(directory, "camera.json"), "r") as f:
camera = json.load(f)
camDir = normalize(
[camera["position"][i] - camera["focalPoint"][i] for i in range(3)]
)
worldUp = normalize(camera["viewUp"])
# [ camRight, camUp, camDir ] will be our new orthonormal basis for normals
camRight = vectProduct(camDir, worldUp)
camUp = vectProduct(camRight, camDir)
# Tmp structure to capture (x,y,z) normal
normalByLayer = vtkFloatArray()
normalByLayer.SetNumberOfComponents(3)
normalByLayer.SetNumberOfTuples(stackSize)
# Capture all layer normals
zPosCount = 0
zNegCount = 0
for layerIdx in layerList:
# Load normal(x,y,z) from current layer
normalLayer = []
for comp in [0, 1, 2]:
with open(
os.path.join(
directory, "normal_%d_%d.float32" % (layerIdx, comp)
),
"rb",
) as f:
a = array.array("f")
a.fromfile(f, linearSize)
normalLayer.append(a)
# Store normal inside vtkArray
offset = layerIdx * linearSize
for idx in range(linearSize):
normalByLayer.SetTuple3(
idx + offset,
normalLayer[0][idx],
normalLayer[1][idx],
normalLayer[2][idx],
)
# Re-orient normal to be view based
vect = normalByLayer.GetTuple3(layerIdx * linearSize + idx)
if not math.isnan(vect[0]):
# Express normal in new basis we computed above
rVect = normalize(
[
-dotProduct(vect, camRight),
dotProduct(vect, camUp),
dotProduct(vect, camDir),
]
)
# Need to reverse vector ?
if rVect[2] < 0:
normalByLayer.SetTuple3(
layerIdx * linearSize + idx,
-rVect[0],
-rVect[1],
-rVect[2],
)
else:
normalByLayer.SetTuple3(
layerIdx * linearSize + idx,
rVect[0],
rVect[1],
rVect[2],
)
# Sort normals and encode them as 3 bytes ( -1 < xy < 1 | 0 < z < 1)
for idx in range(stackSize):
layerIdx = int(orderArray.GetValue(idx))
if layerIdx == 255:
# No normal => same as view direction
sortedNormal.SetTuple3(idx, 128, 128, 255)
else:
offset = layerIdx * linearSize
imageIdx = idx % linearSize
vect = normalByLayer.GetTuple3(imageIdx + offset)
if (
not math.isnan(vect[0])
and not math.isnan(vect[1])
and not math.isnan(vect[2])
):
sortedNormal.SetTuple3(
idx,
int(127.5 * (vect[0] + 1)),
int(127.5 * (vect[1] + 1)),
int(255 * vect[2]),
)
else:
print(
"WARNING: encountered NaN in normal of layer ",
layerIdx,
": [",
vect[0],
",",
vect[1],
",",
vect[2],
"]",
)
sortedNormal.SetTuple3(idx, 128, 128, 255)
# Write the sorted data
with open(os.path.join(directory, "normal.uint8"), "wb") as f:
f.write(memoryview(sortedNormal))
self.data.append(
{
"name": "normal",
"type": "array",
"fileName": "/normal.uint8",
"categories": ["normal"],
}
)
# Remove depth files
for layerIdx in layerList:
os.remove(
os.path.join(directory, "normal_%d_%d.float32" % (layerIdx, 0))
)
os.remove(
os.path.join(directory, "normal_%d_%d.float32" % (layerIdx, 1))
)
os.remove(
os.path.join(directory, "normal_%d_%d.float32" % (layerIdx, 2))
)
# Encode Intensity (sorted order way)
if "intensity" in self.config["light"]:
sortedIntensity = vtkUnsignedCharArray()
sortedIntensity.SetNumberOfTuples(stackSize)
intensityLayers = []
for layerIdx in layerList:
with open(
os.path.join(directory, "intensity_%d.uint8" % layerIdx), "rb"
) as f:
a = array.array("B")
a.fromfile(f, linearSize)
intensityLayers.append(a)
for idx in range(stackSize):
layerIdx = int(orderArray.GetValue(idx))
if layerIdx == 255:
sortedIntensity.SetValue(idx, 255)
else:
imageIdx = idx % linearSize
sortedIntensity.SetValue(idx, intensityLayers[layerIdx][imageIdx])
with open(os.path.join(directory, "intensity.uint8"), "wb") as f:
f.write(memoryview(sortedIntensity))
self.data.append(
{
"name": "intensity",
"type": "array",
"fileName": "/intensity.uint8",
"categories": ["intensity"],
}
)
# Remove depth files
for layerIdx in layerList:
os.remove(os.path.join(directory, "intensity_%d.uint8" % layerIdx))
| UTF-8 | Python | false | false | 24,782 | py | 5,743 | data_converter.py | 3,836 | 0.453071 | 0.438867 | 0 | 644 | 37.481366 | 88 |
17tangs/CPP | 7,851,200,260,332 | bde8891980a4633e512b665696bdb027e86bd0a0 | 2bd90ca0875148b19bc273d423f2b4bdee451841 | /Main.py | b714328ee79c3788190987c2807d4650372878e9 | [] | no_license | https://github.com/17tangs/CPP | af54884fda274f77cdf326c17180d31e938587b1 | 0b0f996f5728c59043e30cf03a55c7fd320dc5e5 | refs/heads/master | 2021-01-13T00:36:47.867158 | 2016-04-13T05:08:34 | 2016-04-13T05:08:34 | 52,191,204 | 0 | 0 | null | false | 2016-03-07T04:35:55 | 2016-02-21T04:49:50 | 2016-02-21T05:03:44 | 2016-03-07T04:35:55 | 3,522 | 0 | 0 | 0 | Python | null | null | import time
import numpy as np
import sys
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from Population import *
from R import *
from random import *
start_time = time.time()
#the best solution found on the internet at this time uses annealing
#after 400,000 iterations, the creator of that found the optimal distance to be 10,618 miles = 17,088 km
#http://toddwschneider.com/posts/traveling-salesman-with-simulated-annealing-r-and-shiny/
class CPP:
#probabilities of weave methods
wp1 = 0.5
wp2 = 1-wp1
#size of two weave functions
ws1 = 24
ws2 = 8
#probabilities of mutations
rp = 0.3 #reverse
sp = 0.2 #shift
#different greedy alg
greedy = 10
#number of iterations
I = 5000
def main(self):
##create a population class using Population.py
#p = Population()
##generate the population based on the greedy algorithm
## p.greedy(0)
#for i in range(10):
#p.greedy(i)
##generate a certain number of random solutions
#p.add_random(950)
##call and print the statistics of the iteration
#averages = []
#bests = []
#worsts = []
#for i in range(CPP.I):
#averages.append(p.average)
#bests.append(p.best)
##worsts.append(p.worst)
#p.breed(1)#wp1, wp2, ws1, ws2, rp, sp)
size = [2,4,6,8,12,24]
A = []
B = []
for j in range(1):
p1 = Population()
for i in range(10):
p1.greedy(i)
p1.add_random(950)
a1 = []
b1 = []
for i in range(CPP.I):
a1.append(p1.average)
b1.append(p1.best)
p1.breed(size[j])
A.append(a1)
B.append(b1)
print p1.stat()
self.stat(A,B,size)
self.draw(p.pop)
##DISPLAY
#plots the cross-iteration trend of averages
def stat(self, A, B, size):
x = [i for i in range(CPP.I)]
c = ["b","g","r","c","m","y","k","w"]
l = []
for i in range(len(A)):
l1, = plt.plot(x, A[i], color = c[i], label = str(size[i]))
l.append(l1,)
plt.axis([0,CPP.I,0,100000])
plt.legend(handles=l)
plt.show()
#draws a map of the US and displays the solutions graphically
def draw(self, pop):
fig=plt.figure()
ax=fig.add_axes([0.1,0.1,0.8,0.8])
m = Basemap(llcrnrlon=-125.,llcrnrlat=25.,urcrnrlon=-65.,urcrnrlat=52.,
rsphere=(6378137.00,6356752.3142),
resolution='l',projection='merc',
lat_0=40.,lon_0=-20.,lat_ts=20.)
l = pop[0]
for i in range(len(l.sol)):
lat1 = l.sol[i].lat
lon1 = l.sol[i].lon
m.drawgreatcircle(lon1,lat1,lon1,lat1, linewidth=4, color = 'r')
if i == len(l.sol) - 1:
lat2 = l.sol[0].lat
lon2 = l.sol[0].lon
else:
lat2 = l.sol[i+1].lat
lon2 = l.sol[i+1].lon
m.drawgreatcircle(lon1,lat1,lon2,lat2, color = 'b')
m.drawcoastlines()
m.drawstates()
m.drawcountries()
m.fillcontinents()
ax.set_title('GREEDY')
plt.show()
##RECYCLING BIN
#Methods that read data.txt and generate the lists C, CCOR, CDIS and CS.
#The data is exported to R.py where it can be referenced upon later.
#Once they run, there's no need to run it again.
"""
def shortest(self, c, l):
m = sys.maxint
ind = 0
for i in range(len(CDIS)):
if C[i] not in l:
if self.getDist(c, C[i]) != 0:
if self.getDist(c, C[i]) < m:
m = self.getDist(c, C[i])
ind = i
return C[ind]
def init(self):
f = open("data.txt", "r")
for i in range(48):
e = []
for k in range(8):
s = f.readline()
if k % 2 == 0:
if(s[-2] == "\r"):
s = s[:-2]
else:
s = s[:-1]
#excluding empty lines and name of state
if(s[:4] != "Name"):
#slicing string for only the name of capital and longtitude/latitude
e.append(s.split(":")[1][1:])
#append each small list city list for 2D array
CCOR.append(e)
for y in range(len(CCOR)):
dis = []
for x in range(len(CCOR)):
lat1 = radians(float(CCOR[x][1]))
lon1 = radians(float(CCOR[x][2]))
lat2 = radians(float(CCOR[y][1]))
lon2 = radians(float(CCOR[y][2]))
dlon = lon2 - lon1
dlat = lat2 - lat1
a = (sin(dlat/2))**2 + cos(lat1) * cos(lat2) * (sin(dlon/2))**2
n = 2 * atan2( sqrt(a), sqrt(1-a) )
d = R * n
dis.append(int(d))
CDIS.append(dis)
for z in range(len(CCOR)):
C.append(CCOR[z][0])
f1 = open("R.py", "w")
f1.write("R = 6371\n")
f1.write("C = " + str(C) + "\n" )
f1.write("CCOR = " + str(CCOR) + "\n")
f1.write("CDIS = " + str(CDIS) + "\n")
f1.close()
def seed_greedy(self, l,i):
if i == len(C) - 1:
return
else:
lis = [n.name for n in l]
k = 0
while l[i].s[k] in lis:
k += 1
l.append(CPP.CO[C.index(l[i].s[k])])
self.seed_greedy(l,i + 1)
def ss(self):
CS = []
for c in C:
CS.append(self.s(c))
f = open("R.py", "a")
f.write("CS = " + str(CS))
f.close()
return
def s(self, c):
l = [c]
for x in range(len(C)-1):
m = sys.maxint
ind = 0
for i in range(len(CDIS)):
if C[i] not in l:
if self.getDist(c, C[i]) != 0:
if self.getDist(c, C[i]) < m:
m = self.getDist(c, C[i])
ind = i
l.append(C[ind])
return l
"""
##CALLING MAIN FUNCTION
x = CPP()
x.main()
#printing the elapsed time to complete I iterations
print("--- %s seconds ---" % (time.time() - start_time)) | UTF-8 | Python | false | false | 7,104 | py | 6 | Main.py | 5 | 0.427083 | 0.398367 | 0 | 212 | 31.528302 | 104 |
drc-ima/hospital_project | 7,791,070,701,113 | 9ddf463f84e7f7f0c592edbf585ef63b07124c27 | 29a7f665d20880db00a8b6cd567244fe6dab1872 | /apps/user/models.py | a3d53b5a33959b36da724b3dc77dad56924cb088 | [] | no_license | https://github.com/drc-ima/hospital_project | ab7b1a9178ab58517229dcbe8f8dc57046491015 | 8abbdf569578fb6d44dcb90fb6bc28ba97237ebc | refs/heads/main | 2023-01-15T12:36:16.185323 | 2020-11-24T16:18:07 | 2020-11-24T16:18:07 | 303,461,051 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth.base_user import BaseUserManager, AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.db import models
from django.utils import timezone
class UserManager(BaseUserManager):
def create_user(self, staff_id, first_name, last_name, password=None, **extra_fields):
if not staff_id:
raise ValueError('Staff Id is required')
user = self.model(
staff_id=staff_id,
first_name=first_name,
username=staff_id,
last_name=last_name,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, staff_id, first_name, last_name, password):
user = self.create_user(staff_id=staff_id, first_name=first_name, last_name=last_name, password=password)
user.is_superuser = True
user.is_active = True
user.is_staff = True
user.username = user.staff_id
user.save(using=self._db)
return user
USERTYPE = {
('MU', 'Manager'),
('SU', 'Support'),
('NU', 'Normal Staff'),
('AD', 'Administrator')
}
ROLES = {
('HR', 'Human Resource'),
('ACC', 'Accountant'),
('NRS', 'Nurse'),
('DR', 'Doctor'),
('CH', 'Cashier'),
('PHM', 'Pharmacist'),
('IT', 'IT'),
('HM', 'Hospital Manager')
}
STATUS = {
('On Leave', 'On Leave'),
('Active', 'Active'),
('Suspended', 'Suspended'),
('Dismissed', 'Dismissed'),
}
class User(AbstractBaseUser, PermissionsMixin):
username = models.CharField(
max_length=255,
blank=True,
null=True
)
staff_id = models.CharField(
max_length=255,
unique=True,
)
user_type = models.CharField(
choices=USERTYPE,
max_length=255,
blank=True,
null=True,
)
status = models.CharField(max_length=100, blank=True, null=True, choices=STATUS)
role = models.CharField(
choices=ROLES,
max_length=255,
blank=True,
null=True
)
first_name = models.CharField(
max_length=100,
blank=True,
null=True
)
last_name = models.CharField(
max_length=100,
blank=True,
null=True
)
is_active = models.BooleanField(default=True)
is_superuser = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
date_joined = models.DateTimeField(default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'staff_id'
REQUIRED_FIELDS = ['first_name', 'last_name']
def full_name(self):
return self.first_name + ' ' + self.last_name
def user_kind(self):
return self.get_user_type_display() + ' - ' + self.get_role_display()
class Meta:
verbose_name_plural = 'Users'
verbose_name = 'user'
ordering = ('staff_id', 'date_joined')
db_table = 'user'
def __str__(self):
return str(self.staff_id) | UTF-8 | Python | false | false | 3,014 | py | 81 | models.py | 50 | 0.586928 | 0.57996 | 0 | 128 | 22.554688 | 113 |
summygupta/hackerrank_python | 9,199,819,949,767 | 03b4a81ca23233c9fdf43d5dd382f0186fc16502 | 7d416ec7872f632f66d1062ee5f606152dfb3ee8 | /String Split and Join.py | 59bd53173f332f9bad1e4bc493533f4fb3672cdf | [] | no_license | https://github.com/summygupta/hackerrank_python | 66e193bdd5e6526bbc0a095d94f70ae3218305a9 | 85638548e33baf87278e59dcfe2954491f70db3c | refs/heads/master | 2022-12-12T23:26:41.453896 | 2020-08-31T10:09:47 | 2020-08-31T10:09:47 | 286,259,481 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def split_and_join(line):
t = line.split(" ")
return ("-".join(t))
| UTF-8 | Python | false | false | 75 | py | 18 | String Split and Join.py | 18 | 0.533333 | 0.533333 | 0 | 3 | 24 | 25 |
salesforce/provis | 18,519,898,992,301 | b7548d02673f7d422dd996f2f3d5dc62753c480e | 5e206b314c058b9b89a15a9c4fd3cee49bf3bd8e | /protein_attention/attention_analysis/compute_edge_features.py | 262ce2ae7248ea941662858b088132a354864df5 | [
"BSD-3-Clause"
] | permissive | https://github.com/salesforce/provis | 80abd0b98173849c7dcc4c765a28d39eb255bb87 | 051fe89190d9ac74865a6f49a3c25fd7b0fcca57 | refs/heads/master | 2023-08-29T00:53:31.325955 | 2023-05-01T19:22:27 | 2023-05-01T19:22:27 | 272,459,149 | 292 | 49 | NOASSERTION | false | 2023-06-12T21:27:47 | 2020-06-15T14:22:26 | 2023-06-12T01:04:40 | 2023-06-12T21:27:47 | 7,535 | 273 | 46 | 3 | Python | false | false | """Compute aggregate statistics of attention edge features over a dataset
Copyright (c) 2020, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import re
from collections import defaultdict
import numpy as np
import torch
from tqdm import tqdm
def compute_mean_attention(model,
n_layers,
n_heads,
items,
features,
tokenizer,
model_name,
model_version,
cuda=True,
max_seq_len=None,
min_attn=0):
model.eval()
with torch.no_grad():
# Dictionary that maps feature_name to array of shape (n_layers, n_heads), containing
# weighted sum of feature values for each layer/head over all examples
feature_to_weighted_sum = defaultdict(lambda: torch.zeros((n_layers, n_heads), dtype=torch.double))
# Sum of attention_analysis weights in each layer/head over all examples
weight_total = torch.zeros((n_layers, n_heads), dtype=torch.double)
for item in tqdm(items):
# Get attention weights, shape is (num_layers, num_heads, seq_len, seq_len)
attns = get_attention(model,
item,
tokenizer,
model_name,
model_version,
cuda,
max_seq_len)
if attns is None:
print('Skipping due to not returning attention')
continue
# Update total attention_analysis weights per head. Sum over from_index (dim 2), to_index (dim 3)
mask = attns >= min_attn
weight_total += mask.long().sum((2, 3))
# Update weighted sum of feature values per head
seq_len = attns.size(2)
for to_index in range(seq_len):
for from_index in range(seq_len):
for feature in features:
# Compute feature values
feature_dict = feature.get_values(item, from_index, to_index)
for feature_name, value in feature_dict.items():
# Update weighted sum of feature values across layers and heads
mask = attns[:, :, from_index, to_index] >= min_attn
feature_to_weighted_sum[feature_name] += mask * value
return feature_to_weighted_sum, weight_total
def get_attention(model,
item,
tokenizer,
model_name,
model_version,
cuda,
max_seq_len):
tokens = item['primary']
if model_name == 'bert':
if max_seq_len:
tokens = tokens[:max_seq_len - 2] # Account for SEP, CLS tokens (added in next step)
if model_version in ('prot_bert', 'prot_bert_bfd', 'prot_albert'):
formatted_tokens = ' '.join(list(tokens))
formatted_tokens = re.sub(r"[UZOB]", "X", formatted_tokens)
token_idxs = tokenizer.encode(formatted_tokens)
else:
token_idxs = tokenizer.encode(tokens)
if isinstance(token_idxs, np.ndarray):
token_idxs = token_idxs.tolist()
if max_seq_len:
assert len(token_idxs) == min(len(tokens) + 2, max_seq_len), (tokens, token_idxs, max_seq_len)
else:
assert len(token_idxs) == len(tokens) + 2
elif model_name == 'xlnet':
if max_seq_len:
tokens = tokens[:max_seq_len - 2] # Account for SEP, CLS tokens (added in next step)
formatted_tokens = ' '.join(list(tokens))
formatted_tokens = re.sub(r"[UZOB]", "X", formatted_tokens)
token_idxs = tokenizer.encode(formatted_tokens)
if isinstance(token_idxs, np.ndarray):
token_idxs = token_idxs.tolist()
if max_seq_len:
# Skip rare sequence with this issue
if len(token_idxs) != min(len(tokens) + 2, max_seq_len):
print('Warning: the length of the sequence changed through tokenization, skipping')
return None
else:
assert len(token_idxs) == len(tokens) + 2
else:
raise ValueError
inputs = torch.tensor(token_idxs).unsqueeze(0)
if cuda:
inputs = inputs.cuda()
attns = model(inputs)[-1]
if model_name == 'bert':
# Remove attention from <CLS> (first) and <SEP> (last) token
attns = [attn[:, :, 1:-1, 1:-1] for attn in attns]
elif model_name == 'xlnet':
# Remove attention from <CLS> (last) and <SEP> (second to last) token
attns = [attn[:, :, :-2, :-2] for attn in attns]
else:
raise NotImplementedError
if 'contact_map' in item:
assert (item['contact_map'].shape == attns[0][0, 0].shape) or (attns[0][0, 0].shape[0] == max_seq_len - 2), \
(item['id'], item['contact_map'].shape, attns[0][0, 0].shape)
if 'site_indic' in item:
assert (item['site_indic'].shape == attns[0][0, 0, 0].shape) or (attns[0][0, 0].shape[0] == max_seq_len - 2), \
item['id']
if 'modification_indic' in item:
assert (item['modification_indic'].shape == attns[0][0, 0, 0].shape) or (
attns[0][0, 0].shape[0] == max_seq_len - 2), \
item['id']
attns = torch.stack([attn.squeeze(0) for attn in attns])
return attns.cpu()
def convert_item(dataset_name, x, data, model_name, features):
item = {}
try:
item['id'] = data['id']
except ValueError:
item['id'] = data['id'].decode('utf8')
item['primary'] = data['primary']
if dataset_name == 'proteinnet':
if 'contact_map' in features:
token_ids, input_mask, contact_map, protein_length = x
item['contact_map'] = contact_map
elif dataset_name == 'secondary':
if 'ss4' in features:
ss8_blank_index = 7
ss4_blank_index = 3
item['secondary'] = [ss4_blank_index if ss8 == ss8_blank_index else ss3 for ss3, ss8 in \
zip(data['ss3'], data['ss8'])]
elif dataset_name == 'binding_sites':
if 'binding_sites' in features:
token_ids, input_mask, site_indic = x
item['site_indic'] = site_indic
elif dataset_name == 'protein_modifications':
if 'protein_modifications' in features:
token_ids, input_mask, modification_indic = x
item['modification_indic'] = modification_indic
else:
raise ValueError
if model_name == 'bert':
# Remove label values from <CLS> (first) and <SEP> (last) token
if 'site_indic' in item:
item['site_indic'] = item['site_indic'][1:-1]
if 'modification_indic' in item:
item['modification_indic'] = item['modification_indic'][1:-1]
elif model_name == 'xlnet':
# Remove label values from <CLS> (last) and <SEP> (second to last) token
if 'site_indic' in item:
item['site_indic'] = item['site_indic'][:-2]
if 'modification_indic' in item:
item['modification_indic'] = item['modification_indic'][:-2]
else:
raise NotImplementedError
return item
if __name__ == "__main__":
import pickle
import pathlib
from transformers import BertModel, AutoTokenizer, XLNetModel, XLNetTokenizer, AlbertModel, AlbertTokenizer
from tape import TAPETokenizer, ProteinBertModel
from tape.datasets import ProteinnetDataset, SecondaryStructureDataset
from protein_attention.datasets import BindingSiteDataset, ProteinModificationDataset
from protein_attention.utils import get_cache_path, get_data_path
from protein_attention.attention_analysis.features import AminoAcidFeature, SecStructFeature, BindingSiteFeature, \
ContactMapFeature, ProteinModificationFeature
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--exp-name', required=True, help='Name of experiment. Used to create unique filename.')
parser.add_argument('--features', nargs='+', required=True, help='list of features')
parser.add_argument('--dataset', required=True, help='Dataset id')
parser.add_argument('--num-sequences', type=int, required=True, help='Number of sequences to analyze')
parser.add_argument('--model', default='bert', help='Name of model.')
parser.add_argument('--model-version', help='Name of model version.')
parser.add_argument('--model_dir', help='Optional directory where pretrained model is located')
parser.add_argument('--shuffle', action='store_true', help='Whether to randomly shuffle data')
parser.add_argument('--max-seq-len', type=int, required=True, help='Max sequence length')
parser.add_argument('--seed', type=int, default=123, help='PyTorch seed')
parser.add_argument('--min-attn', type=float, help='min attention value for inclusion in analysis')
parser.add_argument('--no_cuda', action='store_true', help='CPU only')
args = parser.parse_args()
print(args)
if args.model_version and args.model_dir:
raise ValueError('Cannot specify both model version and directory')
if args.num_sequences is not None and not args.shuffle:
print('WARNING: You are using a subset of sequences and you are not shuffling the data. This may result '
'in a skewed sample.')
cuda = not args.no_cuda
torch.manual_seed(args.seed)
if args.dataset == 'proteinnet':
dataset = ProteinnetDataset(get_data_path(), 'train')
elif args.dataset == 'secondary':
dataset = SecondaryStructureDataset(get_data_path(), 'train')
elif args.dataset == 'binding_sites':
dataset = BindingSiteDataset(get_data_path(), 'train')
elif args.dataset == 'protein_modifications':
dataset = ProteinModificationDataset(get_data_path(), 'train')
else:
raise ValueError(f"Invalid dataset id: {args.dataset}")
if not args.num_sequences:
raise NotImplementedError
if args.model == 'bert':
if args.model_dir:
model_version = args.model_dir
else:
model_version = args.model_version or 'bert-base'
if model_version == 'prot_bert_bfd':
model = BertModel.from_pretrained("Rostlab/prot_bert_bfd", output_attentions=True)
tokenizer = AutoTokenizer.from_pretrained("Rostlab/prot_bert_bfd", do_lower_case=False)
elif model_version == 'prot_bert':
model = BertModel.from_pretrained("Rostlab/prot_bert", output_attentions=True)
tokenizer = AutoTokenizer.from_pretrained("Rostlab/prot_bert", do_lower_case=False)
elif model_version == 'prot_albert':
model = AlbertModel.from_pretrained("Rostlab/prot_albert", output_attentions=True)
tokenizer = AlbertTokenizer.from_pretrained("Rostlab/prot_albert", do_lower_case=False)
else:
model = ProteinBertModel.from_pretrained(model_version, output_attentions=True)
tokenizer = TAPETokenizer()
num_layers = model.config.num_hidden_layers
num_heads = model.config.num_attention_heads
elif args.model == 'xlnet':
model_version = args.model_version
if model_version == 'prot_xlnet':
model = XLNetModel.from_pretrained("Rostlab/prot_xlnet", output_attentions=True)
tokenizer = XLNetTokenizer.from_pretrained("Rostlab/prot_xlnet", do_lower_case=False)
else:
raise ValueError('Invalid model version')
num_layers = model.config.n_layer
num_heads = model.config.n_head
else:
raise ValueError(f"Invalid model: {args.model}")
print('Layers:', num_layers)
print('Heads:', num_heads)
if cuda:
model.to('cuda')
if args.shuffle:
random_indices = torch.randperm(len(dataset))[:args.num_sequences].tolist()
items = []
print('Loading dataset')
for i in tqdm(random_indices):
item = convert_item(args.dataset, dataset[i], dataset.data[i], args.model, args.features)
items.append(item)
else:
raise NotImplementedError
features = []
for feature_name in args.features:
if feature_name == 'aa':
features.append(AminoAcidFeature())
elif feature_name == 'ss4':
features.append(SecStructFeature())
elif feature_name == 'binding_sites':
features.append(BindingSiteFeature())
elif feature_name == 'protein_modifications':
features.append(ProteinModificationFeature())
elif feature_name == 'contact_map':
features.append(ContactMapFeature())
else:
raise ValueError(f"Invalid feature name: {feature_name}")
feature_to_weighted_sum, weight_total = compute_mean_attention(
model,
num_layers,
num_heads,
items,
features,
tokenizer,
args.model,
model_version,
cuda,
max_seq_len=args.max_seq_len,
min_attn=args.min_attn)
cache_dir = get_cache_path()
pathlib.Path(cache_dir).mkdir(parents=True, exist_ok=True)
path = cache_dir / f'{args.exp_name}.pickle'
pickle.dump((args, dict(feature_to_weighted_sum), weight_total), open(path, 'wb'))
print('Wrote to', path)
| UTF-8 | Python | false | false | 13,708 | py | 32 | compute_edge_features.py | 26 | 0.59447 | 0.588634 | 0 | 320 | 41.8375 | 119 |
A8IK/Python-2 | 5,360,119,212,848 | 35a4b734bb85edbae4737173c825f7414974d208 | 54d3a1558a4bd38888d4d51f1ae2d2699965087c | /array.py | df7a56f511a10410349ec8c940b4aebe768e9bbe | [] | no_license | https://github.com/A8IK/Python-2 | a86843c6ccfe23d42faebb020307351a108075bd | 538aee64bac73110cd0a8ac74747c9d2fa485149 | refs/heads/main | 2023-01-21T12:42:51.226144 | 2020-12-04T18:14:32 | 2020-12-04T18:14:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from array import*
vals=array('i',[1,2,3,4,-5,9,10])
print (vals) | UTF-8 | Python | false | false | 67 | py | 82 | array.py | 82 | 0.626866 | 0.507463 | 0 | 3 | 21 | 33 |
brjagger/mmvt_seekr | 566,935,708,805 | cb29c68e4832357cdad37a0db083468a033190f7 | c885c0fdf8ff049d71f12fa7e06e56b892243826 | /mmvt_seekr/apbs.py | 7697fc2b6aafd37d2d8d50ecf6b94329b6124339 | [
"MIT"
] | permissive | https://github.com/brjagger/mmvt_seekr | dc0d4a82a44c004b802afbe79336678d7f1be943 | 9ee0f43716e64e16239bbf14e3eca6df6534f758 | refs/heads/master | 2023-07-03T10:22:05.976144 | 2021-08-11T16:06:24 | 2021-08-11T16:06:24 | 207,873,958 | 1 | 1 | MIT | false | 2021-08-11T16:06:25 | 2019-09-11T17:56:36 | 2020-09-02T22:46:55 | 2021-08-11T16:06:25 | 13,107 | 1 | 1 | 1 | Jupyter Notebook | false | false | #!/usr/bin/python
'''
apbs.py
creates the necessary files to run an electrostatic simulation using APBS
'''
import os, sys, shutil #, math, subprocess #, make_fxd
#import numpy as np
import pdb2 as pdb
#from copy import deepcopy # needed to keep track of separate structure objects
import unittest
import re
#import datetime
from adv_template import *
verbose = True
parser = pdb.Big_PDBParser()
self_path = os.path.dirname(os.path.realpath(__file__)) # get the path to this script
apbs_input_template_location = os.path.join(self_path, 'apbs_input.template')
test_inputgen_location = "./inputgen.py"
test_apbs_location = "apbs"
test_pqr_filename = "../test/1cbj.pqr"
default_apbs_params = {
'pqr':'',
'dimx':'65',
'dimy':'65',
'dimz':'65',
'cglenx':'100.0000',
'cgleny':'100.0000',
'cglenz':'100.0000',
'fglenx':'65.0000',
'fgleny':'65.0000',
'fglenz':'65.0000',
'boundary_condition':'sdh',
'lpbe_npbe':'lpbe',
'solute_dielec':'2.0',
'solvent_dielec':'78.5400',
#'ion1crg':'-1.00',
#'ion1conc':'0.150',
#'ion1rad':'1.8150',
#'ion2crg':'1.00',
#'ion2conc':'0.150',
#'ion2rad':'1.8750',
'temp':'310.0',
'stem':'pot',
}
default_inputgen_settings = {
'fadd':'60',
'gmemceil':'64000',
'resolution':'0.5',
'ionic_str':'0.15',
'cfac':'4.0',
}
def make_apbs_input_using_inputgen(inputgen_filename, pqr_filename, fadd=60, cfac=4.0, gmemceil=64000, resolution=0.5, ionic_str=0.15):
"""makes an apbs input file given a pqr file & other parameters. See Inputgen.py in PDB2PQR documentation for descriptions of other arguments"""
pqr_basename= os.path.basename(pqr_filename)
#pre_ext = '.'.join(pqr_filename.split('.')[0:-1]) # the part of the filename before the extension
pqr_abspath=os.path.dirname(os.path.abspath(pqr_filename))
#print 'pqr abspath', pqr_abspath
pre_ext = (pqr_basename.split('.'))[0]
#print "pre_ext", pre_ext
assert os.path.exists(inputgen_filename)
runstring = "python %s --potdx --fadd=%s --cfac=%s --space=%s --gmemceil=%s --istrng=%s %s" % (inputgen_filename, fadd, cfac, resolution, gmemceil, ionic_str, pqr_basename, )
olddir = os.path.abspath(os.curdir)
print("oldir", olddir)
#print 'curdir', os.curdir
os.chdir(os.path.dirname(pqr_filename))
print('curdir', os.path.abspath(os.curdir))
print("Now creating APBS input file using command:", runstring)
#print 'PWD', os.getcwd()
os.system(runstring)
os.chdir(olddir)
print("return to inputgen olddir", os.path.abspath(os.curdir))
#print 'pqr filename', pqr_filename
#pqr_basename= os.path.basename(pqr_filename)
#pre_ext = '.'.join(pqr_filename.split('.')[0:-1]) # the part of the filename before the extension
#pqr_abspath=os.path.dirname(os.path.abspath(pqr_filename))
#print 'pqr abspath', pqr_abspath
#pre_ext = (pqr_basename.split('.'))[0]
#print "pre_ext", pre_ext
input_filename = os.path.join(pqr_abspath+'/'+ pre_ext+'.in')
print("APBS input_filename", input_filename)
return input_filename
#def make_apbs_input_using_inputgen(inputgen_filename, pqr_filename, fadd=60, cfac=4.0, gmemceil=64000, resolution=0.5, ionic_str=0.15):
# """makes an apbs input file given a pqr file & other parameters. See Inputgen.py in PDB2PQR documentation for descriptions of other arguments"""
# runstring = "python %s --potdx --fadd=%s --cfac=%s --space=%s --gmemceil=%s --istrng=%s %s" % (inputgen_filename, fadd, cfac, resolution, gmemceil, ionic_str, pqr_filename, )
# print "Now creating APBS input file using command:", runstring
# os.system(runstring)
# #pre_ext = '.'.join(pqr_filename.split('.')[0:-1]) # the part of the filename before the extension
# pre_ext = pqr_filename
# input_filename = os.path.join(pre_ext+'.in')
# print "APBS input_filename", input_filename
# return input_filename
def scrape_inputfile(input_filename):
'''NOTE: only takes out the dime, pdime, cglen, and fglen parameters from an APBS input file.'''
dimestring = pdimestring = cglenstring = fglenstring = None
infile = open(input_filename,'r')
for line in infile:
if re.search(' dime', line) and not dimestring:
dimestring = line
if re.search('pdime', line) and not pdimestring:
pdimestring = line
if re.search('cglen', line) and not cglenstring:
cglenstring = line
if re.search('fglen', line) and not fglenstring:
fglenstring = line
infile.close()
if pdimestring: raise Exception("Parallel-run dx files not yet implemented...")
apbs_params = {} # a dictionary containing what we scraped outta here
dime_list = dimestring.strip().split()
cglen_list = cglenstring.strip().split()
fglen_list = fglenstring.strip().split()
apbs_params['dimx'],apbs_params['dimy'],apbs_params['dimz'] = dime_list[1:]
apbs_params['cglenx'],apbs_params['cgleny'],apbs_params['cglenz'] = cglen_list[1:]
apbs_params['fglenx'],apbs_params['fgleny'],apbs_params['fglenz'] = fglen_list[1:]
return apbs_params
def make_apbs_input_using_template (new_apbs_params, apbs_file_location="apbs.in"):
apbs_params = {} # create empty directory
apbs_params.update(default_apbs_params) # populate with default values
apbs_params.update(new_apbs_params) # populate with new parameters
apbs_input = File_template(apbs_input_template_location, apbs_params) # fill parameters into the template to make apbs file
print('APBS file loc', apbs_file_location)
apbs_input.save(apbs_file_location) # save the apbs input file
return
def run_apbs (apbs_filename, input_filename, pqr_filename, std_out="apbs.out"):
"""runs apbs using a given input file "input_filename" and writes all standard output to 'std_out'."""
rundir = os.path.dirname(input_filename)
print("copying file: %s to directory: %s" % (pqr_filename, os.path.join(rundir,os.path.basename(pqr_filename))))
if os.path.abspath(os.path.dirname(pqr_filename)) != os.path.abspath(rundir):
shutil.copyfile(pqr_filename, os.path.join(rundir,os.path.basename(pqr_filename)))
pqr_filename = os.path.basename(pqr_filename)
#print "pqr filename:", pqr_filename
#print "input_filename1", input_filename
input_filename = os.path.basename(input_filename)
#print "input_fielname2", input_filename
std_out = os.path.basename(std_out)
runstring = "%s %s > %s" % (apbs_filename, input_filename, std_out) # string to run apbs
print("running command:", runstring)
curdir = os.getcwd()
print("curdir", curdir)
os.chdir(rundir) # we want to run APBS in the directory
print("rundir", os.getcwd())
result = os.system(runstring) # execute the string
if result != 0: raise Exception("There was a problem running APBS") # then an error occured
dx_filename = pqr_filename + '.dx' # inputgen will automatically make this .dx file
os.chdir(curdir)
print("back to dir", os.getcwd())
return dx_filename # return the name of the dx file
def get_debye_length(apbs_std_outfilename):
"""Will parse an apbs stdout file to look for the Debye length."""
debye_string = re.compile("Debye length")
debye_list = [] # a list of numbers that will be returned
for line in open(apbs_std_outfilename, 'r'):
m = re.search(debye_string, line)
if m: # then we've found a line
number_obj = re.search("[0-9.]+", line).group()
debye_list.append(number_obj)
if number_obj == "0": print("ALERT: Debye length of zero found. This may mean that your PQR file has a net charge that is NOT zero, or that your ion concentration was zero...")
assert len(debye_list) > 0, "Debye length not found in APBS output: %s. Please ensure that APBS calculation was completed properly and that the correct output file was specified."
return debye_list[0] # take the first member of it by default. There may be a better way for this but all outputs seem to be the same
def flatten_ion_list(apbs_settings):
if 'ions' not in apbs_settings.keys(): return apbs_settings
ion_list = apbs_settings.pop('ions')
for ion in ion_list:
key = ion['key']
apbs_settings['%sconc' % key] = ion['concentration']
apbs_settings['%scrg' % key] = ion['charge']
apbs_settings['%srad' % key] = ion['radius']
return apbs_settings
def main(pqr_filename,inputgen_settings={},apbs_settings={},):
user_settings = {}
user_settings.update(default_inputgen_settings)
user_settings.update(inputgen_settings)
apbs_settings = flatten_ion_list(apbs_settings)
if apbs_settings['ion1conc']: user_settings['ionic_str'] = apbs_settings['ion1conc']
# make APBS input file using inputgen (enabled)
inputgen_location = inputgen_settings['inputgen_executable']
apbs_location = apbs_settings['apbs_executable']
input_filename = make_apbs_input_using_inputgen(inputgen_location, pqr_filename, fadd=user_settings['fadd'], cfac=user_settings['cfac'], gmemceil=user_settings['gmemceil'], resolution=user_settings['resolution'], ionic_str=user_settings['ionic_str'])
# make APBS input file using template (disabled)
#input_filename
# make DX grids
pqr_filename = os.path.abspath((pqr_filename))
print('INPUT FILENAME', input_filename)
print('PQR filename', pqr_filename)
apbs_out=pqr_filename+'.out' # make a default apbs output file
# use the inputgen-generated file to make our own, more customized file
apbs_params = scrape_inputfile(input_filename)
#if fhpd_mode:
apbs_params['pqr'] = apbs_params['stem'] = os.path.abspath(pqr_filename)
#else:
#apbs_params['pqr'] = apbs_params['stem'] = pqr_filename
apbs_params.update(apbs_settings)
new_input_filename = os.path.abspath(pqr_filename) + '.in'
dx = pqr_filename + '.dx'
print(os.getcwd())
print('new_inp', new_input_filename)
make_apbs_input_using_template(apbs_params, new_input_filename)
#if not fhpd_mode:
run_apbs(apbs_location, new_input_filename, pqr_filename, std_out=apbs_out) # save the electrostatic grid
# find the Debye length
debye = get_debye_length(apbs_out)
return dx, debye
def is_number(s):
'''returns True if the string 's' can be converted to a float/int, False otherwise'''
try:
float(s)
return True
except ValueError:
return False
class Test_apbs_functions(unittest.TestCase):
# several test cases to ensure the functions in this module are working properly
def test_make_apbs_input_using_inputgen(self): # test whether the apbs input file has been created properly
#print 'test pqr filename', test_pqr_filename
self.APBS_inp = make_apbs_input_using_inputgen(test_inputgen_location, test_pqr_filename) # get file location
#print "self.APBS_inp", self.APBS_inp
fileexists = os.path.exists(self.APBS_inp)
self.assertTrue(fileexists)
def test_make_apbs_input_using_template(self):# test whether the apbs input file has been created properly
make_apbs_input_using_template({},'/tmp/test_apbs.in')
fileexists = os.path.exists('/tmp/test_apbs.in')
self.assertTrue(fileexists) # if it exists, then that's good
def test_run_apbs(self): # test whether apbs is running properly
self.APBS_inp = make_apbs_input_using_inputgen(test_inputgen_location, os.path.abspath(test_pqr_filename)) # get file location
self.APBS_inp2 = '/tmp/input2.in'
self.inp_dict = scrape_inputfile(self.APBS_inp)
self.inp_dict['pqr'] = self.inp_dict['stem'] = os.path.abspath(test_pqr_filename)
make_apbs_input_using_template(self.inp_dict, self.APBS_inp2)
run_apbs(test_apbs_location, self.APBS_inp2, test_pqr_filename)
self.APBS_dx = test_pqr_filename + '.dx'
fileexists = os.path.exists(self.APBS_dx)
self.assertTrue(fileexists)
def test_is_number(self): # test the is_number function
self.assertTrue(is_number('0'))
self.assertTrue(is_number('3.14'))
self.assertTrue(is_number('2.0e-8'))
self.assertFalse(is_number('foobar'))
def test_get_debye_length(self):
testfile1 = open('/tmp/debye_test1','w') # file with numbers
testfile1.writelines(['CALCULATION #1: MULTIGRID\n',
'Setting up problem...\n',
'Vpbe_ctor: Using max ion radius (0 A) for exclusion function\n',
'Debye length: 99.23 A\n',
'Current memory usage: 731.506 MB total, 731.506 MB high water\n',
'Using cubic spline charge discretization.\n',])
testfile1.close()
testfile2 = open('/tmp/debye_test2','w') # file with nothing
testfile2.close()
result1 = get_debye_length('/tmp/debye_test1') # should return the numeric value 99.32
self.assertEqual(result1, '99.23')
self.assertRaises(AssertionError, get_debye_length, '/tmp/debye_test2') # this is an empty file, so the function should throw an error
def test_scrape_inputfile(self):
testfile1 = open('/tmp/scrape_test1','w') # file with numbers
testfile1.writelines([''' dime 129 129 193
cglen 80.2842 77.5999 116.9345
fglen 67.2260 65.6470 88.7850
''',])
testfile1.close()
test_params = {'dimx':'129', 'dimy':'129', 'dimz':'193','cglenx':'80.2842', 'cgleny':'77.5999', 'cglenz':'116.9345', 'fglenx':'67.2260', 'fgleny':'65.6470', 'fglenz':'88.7850'}
self.assertEqual(test_params, scrape_inputfile('/tmp/scrape_test1'))
if __name__=='__main__':
print("Running unit tests for apbs.py")
unittest.main() # run tests of all functions
| UTF-8 | Python | false | false | 13,105 | py | 29 | apbs.py | 15 | 0.700038 | 0.677222 | 0 | 291 | 44.034364 | 252 |
abhishekjha2468/HackerRank-Python | 970,662,637,369 | fcb2e58d58e572ab5a9a95b873bc9a43f43b35cd | e8854329efa49df5a9038b2decc1f037540b2ef0 | /Validating Credit Card Number.py | 8fc56f27613bcbcca267b679f93d202d240f1e58 | [] | no_license | https://github.com/abhishekjha2468/HackerRank-Python | 2a6f217ccfa9805629d5af698bf8838b95a77eac | 3d5ff2e51b228e633a8250b97a6dbb4e7e115b42 | refs/heads/main | 2023-03-28T16:26:06.878931 | 2021-03-30T06:07:51 | 2021-03-30T06:07:51 | 352,885,018 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def count(string):
string=string
Count=1
list=[]
for i in range(len(string)):
try:
if i<(len(string)-1):
if string[i]==string[i+1]:
Count=Count+1
else:
list.append(Count)
Count=1
else:
list.append(Count)
except:
list.append(Count)
return max(list)
#----------------------------------------
if __name__=='__main__':
n=int(input())
for m in range(n):
num=list(input())
#print(num)
N=[]
flag='Invalid'
c=True
while(c):
try:
if int(num[0]) not in [4,5,6]:
#print('Goes Wrong in line no.. 10')
c=False
break
#print("First Condition in passed That first number is either 4,5 or 6")
if len(num)==16:
new=[ int(i) for i in num ]
#print("While Checking integer ")
#print("new num list is: ",new)
#set_list=list(set(new))
#print("All Good Till line finding unique number in num list i.e : ",set_list)
if count(''.join(num))>3:
c=False
break
#for i in set_list:
#print("Checking the count of ",i)
#if new.count(i)>=4:
#N.append(new.count(i))
#print("Error on counting")
# c=False
#break
#print("The Highest Count is ",N)
if len(new)==16 and N==[]:
flag='Valid'
#print("Flag is set to Valid")
#print("Breaking the while loop now ")
c=False
break
#-------------------------------------------------
elif len(num)==19 and num.count('-')==3:
#print("we are on secound case of hyphen '-' ")
h=[]
h.append(num.pop(4))
h.append(num.pop(8))
h.append(num.pop(12))
#print("hyphen list is: ",h)
if h!=['-','-','-']:
c=False
break
else: continue
#print("All hyphen are on right position ")
new=[ int(i) for i in num ]
#print("While Checking integer ")
#print("new num list is: ",new)
#set_list=list(set(new))
#print("All Good Till line finding unique number in num list i.e : ",set_list)
if count(''.join(num))>3:
c=False
break
#for i in set_list:
#print("Checking the count of ",i)
#if new.count(i)>=4:
#N.append(new.count(i))
#print("Error on counting")
#c=False
#break
#print("The Highest Count is ",N)
if len(new)==16 and N==[]:
flag='Valid'
#print("Flag is set to Valid")
#print("Breaking the while loop now ")
break
except:
flag='Invalid'
break
#print("Now we are out of the While Loop")
print(flag) | UTF-8 | Python | false | false | 2,518 | py | 108 | Validating Credit Card Number.py | 107 | 0.527403 | 0.515091 | 0 | 96 | 25.239583 | 83 |
Jisup-lim/academy-study | 2,774,548,916,142 | 7546105c1f4625270eabb34d8c8416a1c9f3cd50 | 52c4a91abba094cefe93a1a6cc837eac564bd332 | /ml/m09_SelectModel_2_cancer.py | c77fc708face697326a7db6303b93551652c5b80 | [] | no_license | https://github.com/Jisup-lim/academy-study | 9805a7d1809b51781f341b3b0eceb46f2da956df | 74e07370ac697b01a026aaaf95437300dc9913fd | refs/heads/master | 2023-04-07T19:49:35.605546 | 2021-05-31T15:15:47 | 2021-05-31T15:15:47 | 325,250,894 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.metrics import accuracy_score
from sklearn.utils.testing import all_estimators
# from sklearn.utils import all_estimators
import warnings
warnings.filterwarnings('ignore')
dataset = load_breast_cancer()
x = dataset.data
y = dataset.target
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=104)
allAlgorithms = all_estimators(type_filter='classifier')
for (name,algorithm) in allAlgorithms:
try:
model = algorithm()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print(name,'의 정답률 : ', accuracy_score(y_test,y_pred))
except:
# continue
print(name,'없는 모델')
import sklearn
print(sklearn.__version__) # 0.23.2
# AdaBoostClassifier 의 정답률 : 0.9736842105263158
# BaggingClassifier 의 정답률 : 0.9473684210526315
# BernoulliNB 의 정답률 : 0.6052631578947368
# CalibratedClassifierCV 의 정답률 : 0.9298245614035088
# CategoricalNB 없는 모델
# CheckingClassifier 의 정답률 : 0.39473684210526316
# ClassifierChain 없는 모델
# ComplementNB 의 정답률 : 0.8947368421052632
# DecisionTreeClassifier 의 정답률 : 0.9298245614035088
# DummyClassifier 의 정답률 : 0.5263157894736842
# ExtraTreeClassifier 의 정답률 : 0.9035087719298246
# ExtraTreesClassifier 의 정답률 : 0.956140350877193
# GaussianNB 의 정답률 : 0.9210526315789473
# GaussianProcessClassifier 의 정답률 : 0.9298245614035088
# GradientBoostingClassifier 의 정답률 : 0.9385964912280702
# HistGradientBoostingClassifier 의 정답률 : 0.9736842105263158
# KNeighborsClassifier 의 정답률 : 0.9473684210526315
# LabelPropagation 의 정답률 : 0.42105263157894735
# LabelSpreading 의 정답률 : 0.42105263157894735
# LinearDiscriminantAnalysis 의 정답률 : 0.9736842105263158
# LinearSVC 의 정답률 : 0.8859649122807017
# LogisticRegression 의 정답률 : 0.9298245614035088
# LogisticRegressionCV 의 정답률 : 0.956140350877193
# MLPClassifier 의 정답률 : 0.9210526315789473
# MultiOutputClassifier 없는 모델
# MultinomialNB 의 정답률 : 0.8947368421052632
# NearestCentroid 의 정답률 : 0.868421052631579
# NuSVC 의 정답률 : 0.8508771929824561
# OneVsOneClassifier 없는 모델
# OneVsRestClassifier 없는 모델
# OutputCodeClassifier 없는 모델
# PassiveAggressiveClassifier 의 정답률 : 0.9298245614035088
# Perceptron 의 정답률 : 0.9385964912280702
# QuadraticDiscriminantAnalysis 의 정답률 : 0.9736842105263158
# RadiusNeighborsClassifier 없는 모델
# RandomForestClassifier 의 정답률 : 0.956140350877193
# RidgeClassifier 의 정답률 : 0.9736842105263158
# RidgeClassifierCV 의 정답률 : 0.9649122807017544
# SGDClassifier 의 정답률 : 0.8333333333333334
# SVC 의 정답률 : 0.9210526315789473
# StackingClassifier 없는 모델
# VotingClassifier 없는 모델 | UTF-8 | Python | false | false | 3,025 | py | 242 | m09_SelectModel_2_cancer.py | 237 | 0.767677 | 0.554807 | 0 | 76 | 34.184211 | 91 |
satyam-seth-learnings/ds_algo_learning | 9,491,877,749,891 | 430c51fc8a240cbfbcc1cb24ef612138b92e031e | 48b7c5c6d4576c06f157abb317ced99dae64f5c5 | /Applied Course/4.Problem Solving/5.Problems on Trees/30.All Elements in Two Binary Search Trees.py | 9f7d772881a6ac6e58fc114466eb2ec5edfe0a72 | [] | no_license | https://github.com/satyam-seth-learnings/ds_algo_learning | 38cc5e6545ec8a5fbabefc797aee486c98cfb314 | 761cba2cd2dd7d7cdaf5a3c41503fdfc6dfe73ad | refs/heads/master | 2023-08-31T00:48:30.707533 | 2021-10-16T09:37:45 | 2021-10-16T09:37:45 | 415,274,888 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # https://leetcode.com/problems/all-elements-in-two-binary-search-trees/
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# Logic-1
class Solution:
def getAllElements(self, root1: TreeNode, root2: TreeNode) -> List[int]:
def inorder(root):
if root:
return inorder(root.left)+[root.val]+inorder(root.right)
else:
return []
tree1=inorder(root1)
tree2=inorder(root2)
result=[]
i,j=0,0
while i<len(tree1) and j<len(tree2):
if tree1[i]<tree2[j]:
result.append(tree1[i])
i+=1
else:
result.append(tree2[j])
j+=1
if i<len(tree1):
result+=tree1[i:]
if j<len(tree2):
result+=tree2[j:]
return result
# Logic-2
class Solution:
def getAllElements(self, root1: TreeNode, root2: TreeNode) -> List[int]:
stack1,stack2,result=[],[],[]
while root1 or root2 or stack1 or stack2:
while root1:
stack1.append(root1)
root1=root1.left
while root2:
stack2.append(root2)
root2=root2.left
if not stack2 or stack1 and stack1[-1].val<=stack2[-1].val:
root1=stack1.pop()
result.append(root1.val)
root1=root1.right
else:
root2=stack2.pop()
result.append(root2.val)
root2=root2.right
return result | UTF-8 | Python | false | false | 1,688 | py | 429 | 30.All Elements in Two Binary Search Trees.py | 404 | 0.510071 | 0.476303 | 0 | 51 | 32.117647 | 76 |
thraddash/python_tut | 4,071,629,026,525 | a11e43baef8f5b4aa1c61f139255e0d36979c052 | 799a90344c4e2e367bd79fff063ede765f816549 | /16_exception_handling/pass.py | 4ba63f53cc74b1e3bc65938668b988ac5e12fbef | [] | no_license | https://github.com/thraddash/python_tut | a49765801882003b6d3b1d6958bee476f5768470 | 5bd359f2021bb26953bcd955018dfbf6b2b6395f | refs/heads/master | 2023-03-16T10:28:56.581004 | 2021-03-05T00:08:06 | 2021-03-05T00:08:06 | 325,146,371 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# Ignore exception by pass
def div(x, y):
return x / y
try:
div_result = div(2, 1)
#div_result = div(2, 0)
except:
pass # do nothing
else:
print("Div result is: " + str(div_result))
def div2(x, y):
try:
result = x / y
except:
pass # do nothing
else:
return "Div result is: " + str(result)
print()
print("v2 passing arguments")
print(div2(2, 1))
print(div2(2, 0))
| UTF-8 | Python | false | false | 448 | py | 178 | pass.py | 171 | 0.564732 | 0.537946 | 0 | 25 | 16.92 | 46 |
karthikpappu/pyc_source | 18,408,229,860,346 | 86038a4900b15b777972893507c5333fc984ab84 | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pypi_install_script/yadage-service-cli-0.1.11.tar/setup.py | 810ad4f9a67120c49934ddeecb6ff8e50666a027 | [] | no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | 2023-02-04T11:27:19.098827 | 2020-12-27T04:51:17 | 2020-12-27T04:51:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup, find_packages
setup(
name = 'yadage-service-cli',
version = '0.1.11',
description = 'yadage service command line tools',
url = 'http://github.com/yadage/yadage-service-cli',
author = 'Kyle Cranmer, Lukas Heinrich',
author_email = 'cranmer@cern.ch, lukas.heinrich@cern.ch',
packages = find_packages(),
entry_points = {
'console_scripts': [
'yad = yadagesvccli.cli:yad',
]
},
install_requires = [
'click',
'requests',
'pyyaml',
'requests_toolbelt',
'clint'
],
extras_require = {
'local' : [
'yadage-schemas'
]
}
)
| UTF-8 | Python | false | false | 628 | py | 114,545 | setup.py | 111,506 | 0.598726 | 0.592357 | 0 | 28 | 21.428571 | 59 |
zgreat/transient | 15,324,443,336,750 | 51270bef51b2ff9dc35131945962a06ce9e963d8 | 77da6217bf83d41b2fe479d6e414a1df4f997b3c | /runserver.py | 14dfed33dbd933b5ae057aa7b4b1e8bb9ddb0f19 | [
"MIT"
] | permissive | https://github.com/zgreat/transient | e4deb14951dc05692bc1ccb624c66cf394bc9664 | 1cfc1fe65079ef3c75754eaa0cd97f7ebb55664a | refs/heads/master | 2021-05-30T10:49:40.529829 | 2015-12-20T03:46:39 | 2015-12-20T03:46:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
from transient import api
if __name__ == "__main__":
api.run()
| UTF-8 | Python | false | false | 86 | py | 30 | runserver.py | 26 | 0.581395 | 0.581395 | 0 | 5 | 16.2 | 26 |
jgubert/Ensemble | 11,132,555,279,782 | 9ac2e1464c7dde54db2cdfa8ebbeeb74856f69a9 | c890014818f638d9c6f512689c515b545c21f84e | /main_teste.py | cf814623a77eae4c0095548619de0d4ade40459a | [] | no_license | https://github.com/jgubert/Ensemble | 23689853fbaf8ba5a0f7deb0a1c7dab6d5c194a1 | d1e9d33864357f4adaf9bc8c52209f3d0e6d433b | refs/heads/master | 2020-04-06T16:57:20.383220 | 2018-11-15T02:33:04 | 2018-11-15T02:33:04 | 146,955,467 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Função main()
"""
import fileinput
import csv
import math
import sys
import bootstrap
import decisionTree
import errorMeasures
import header
import kFoldStratified
import preProcessing
import tree
import voting
import random
import sampling
"""
Como executar:
> python3 main_teste.py <datafile.format> <num_trees>
"""
def main(filename, n_trees):
# coletando os argumentos
#filename = str(sys.argv[1])
#n_trees = int(sys.argv[2])
n_folds = 10
list_forest = []
# definindo seed
random.seed(1)
# abrindo o arquivo
#datafile = preProcessing.openDataFile(filename)
# TO DO: FIX THIS PIECE OF CODE
with open(filename) as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
datafile = list(csv_reader)
#print("\n============= DATA FILE =============")
#print (*datafile,sep="\n")
m = math.ceil(math.sqrt(len(datafile[1])))
# Fazendo amostragem para as m colunas com maior ganho
if m > len(datafile[0]):
print("valor m é maior que quantidade de atributos")
return -1
datafile = sampling.sampleAttributes(datafile, m)
# Setando o cabeçalho
dataheader = header.Header()
dataheader.setHeader(datafile)
# Lista que vai armazenar os folds
fold_list = []
fold_list = kFoldStratified.kFoldStratified(datafile, n_folds)
# Quantidade de entradas testadas é o tamanho de um fold
# vezes a quantidade de testes que sera feito
tam_testfold = len(fold_list[0]) * n_folds
'''
print("\n============= FOLD lIST =============")
for i in range(n_folds):
print("\nFold N " + str(i))
print(*fold_list[i], sep="\n")
'''
# inicializa a matriz de confusão
value_classes = kFoldStratified.countPossibleAttributes(datafile)
errorMeasures.initConfusionMatrix(len(value_classes))
# chamando o bootstrap (K-Fold posteriormente)
for i in range(n_folds):
aux_fold_list = []
test_fold = []
training_folds = []
# copia a lista de folds para uma lista auxiliar
aux_fold_list = list(map(list, fold_list))
# pega o fold de teste
test_fold = aux_fold_list[i]
# DEBUG
#print(*test_fold,sep="\n")
#print("\n")
#
#print (*aux_fold_list,sep="\n")
# pega os folds de treinamento
aux_fold_list.remove(test_fold)
# transforma lista de listas em uma lista só, para facilitar implementação
for j in aux_fold_list:
training_folds += j
list_forest.append(decisionTree.makeForest(training_folds, n_trees, dataheader))
final_votes = decisionTree.startClassification(test_fold, list_forest[i], dataheader, value_classes)
# DEBUG: impressão das medidas de erro
errorMeasures.compactConfusionMatrix(value_classes)
print("\n\n ===========================================")
print("Num Folds: " + str(n_folds))
print("Num Trees: " + str(n_trees))
print("RESULT MATRIX:")
errorMeasures.printResultMatrix()
print("CONFUSION MATRIX:")
errorMeasures.printConfusionMatrix()
print("Accuracy: ")
print(errorMeasures.accuracy(tam_testfold,value_classes))
print("Error: ")
print(errorMeasures.error(tam_testfold,value_classes))
print("Recall: ")
print(errorMeasures.recall(value_classes))
print("Precision: ")
print(errorMeasures.precision(value_classes))
print("FMeasure: ")
print(errorMeasures.FMeasure(errorMeasures.precision(value_classes), errorMeasures.recall(value_classes), 1))
print("===========================================")
# Limpando Matriz de Confusão
errorMeasures.resetConfusionMatrix(len(value_classes))
'''
#(*)
# DEBUG: impressão das florestas
for i in range(len(list_forest)):
for j in range(len(list_forest[i])):
#for k in range(len(list_forest[i][j])):
#tree.printTree(list_forest[i][j][k])
tree.printTree(list_forest[i][j])
'''
'''
Executando a main()
'''
#main()
| UTF-8 | Python | false | false | 4,125 | py | 14 | main_teste.py | 12 | 0.617311 | 0.614393 | 0 | 155 | 25.535484 | 113 |
sandeepgoyal194/CustomerChoice | 7,370,163,881,444 | 713ac129cd60bf41c7187f29065bb2cafbeafbe3 | 0a52c30680948e74349105edcd3b424c44df4b20 | /services/TopSiteGratis.py | edf39f80a92323da2618c748032fe8a96cbafdb0 | [] | no_license | https://github.com/sandeepgoyal194/CustomerChoice | e47cedf4e9b0214e949cd29532ce4b77f305abd5 | 755e182900d94a82de0b3a5d803630db6b21fedf | refs/heads/master | 2018-09-03T04:46:21.870455 | 2018-06-29T05:00:38 | 2018-06-29T05:00:38 | 128,005,895 | 0 | 1 | null | false | 2018-07-18T06:30:40 | 2018-04-04T04:14:48 | 2018-06-29T05:01:02 | 2018-07-17T06:05:31 | 21,686 | 0 | 1 | 1 | Python | false | null | from model.Servicemodel import ServiceRecord
from scrapy import Spider, Request
class TopSiteGratis(Spider):
def __init__(self):
pass
def parsing(self, response):
return self.crawl(response,self.category,self.servicename)
def crawl(self, response, category, servicename):
reviews = []
reviews1 = []
self.category = category
self.servicename = servicename
for node in response.xpath(
"//div[@class='reviews product-reviews']/div[@class='item']/p[@class='excerpt']"):
reviews.append(node.xpath('string()').extract());
ratings = response.xpath("//div[@class='reviews product-reviews']/div[@class='item']/div[@class='right-block']/div[@class='ratings']/span[@class='rate_False']/span").extract()
dates = response.xpath("//div[@class='reviews product-reviews']/div[@class='item']/meta[@itemprop='datePublished']/@content").extract()
authors = response.xpath("//div[@class='reviews product-reviews']/div[@class='item']/div[@class='author-info']/a/text()").extract()
img_src = response.xpath(
"//div[@class='reviews product-reviews']/div[@class='item']/div[@class='left-block']/div[@class='product-info']/div[@class='img pull-left']/img/@src").extract()
# headings = response.xpath("//div[@class='pr-review-wrap']/div[@class='pr-review-rating-wrapper']/div[@class='pr-review-rating']/p[@class='pr-review-rating-headline']/text()").extract()
website_name1 = response.xpath("//div[@class='footer']/div[@class='row']/div[@class='col-md-7 text-right']/text()").extract()
website_name = []
i = 0
while(i< len(website_name1)):
c = website_name1[1].split(" ")
website_name.append(c[12])
break
i = i+1
print("Reviews ", len(reviews), reviews)
print("Authors ", len(authors), authors)
print("Rating ", len(ratings), ratings)
print("Dates ", len(dates), dates)
print("img_src ", len(img_src), img_src)
print("websites ", len(website_name), website_name)
for item in range(0, len(reviews)):
servicename1 = ServiceRecord(response.url, ratings[item], None, dates[item], authors[item], category,
servicename, reviews[item], img_src, website_name)
servicename1.save()
| UTF-8 | Python | false | false | 2,407 | py | 91 | TopSiteGratis.py | 84 | 0.606149 | 0.600748 | 0 | 46 | 51.217391 | 194 |
bellyfat/etherscan-python | 9,019,431,338,699 | 26e8d4fa3b7a294e4100dce618b751849a0cc8d1 | 6b902b5fe1dbdbde22047fe503ce46a33040ce7e | /build/lib/etherscan/enums/actions_enum.py | 2ca5d53e64874ac3b93ceaed1ae31d90d1cd246d | [
"Python-2.0",
"MIT"
] | permissive | https://github.com/bellyfat/etherscan-python | fc6cfa9ce9639febd9b069c466827a3c1ea915c0 | 0254144fc2db38c897ff843069ba3c945e19b866 | refs/heads/master | 2022-12-28T06:59:05.873455 | 2020-10-16T11:01:52 | 2020-10-16T11:01:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from dataclasses import dataclass
@dataclass(frozen=True)
class ActionsEnum:
BALANCE_HISTORY: str = "balancehistory"
BALANCE_MULTI: str = "balancemulti"
BALANCE: str = "balance"
CHAIN_SIZE: str = "chainsize"
ETH_BLOCK_NUMBER: str = "eth_blockNumber"
ETH_CALL: str = "eth_call"
ETH_ESTIMATE_GAS: str = "eth_estimateGas"
ETH_GAS_PRICE: str = "eth_gasPrice"
ETH_GET_BLOCK_BY_NUMBER: str = "eth_getBlockByNumber"
ETH_GET_BLOCK_TRANSACTION_COUNT_BY_NUMBER: str = "eth_getBlockTransactionCountByNumber"
ETH_GET_TRANSACTION_BY_BLOCK_NUMBER_AND_INDEX: str = "eth_getTransactionByBlockNumberAndIndex"
ETH_GET_CODE: str = "eth_getCode"
ETH_GET_STORAGE_AT: str = "eth_getStorageAt"
ETH_GET_TRANSACTION_BY_HASH: str = "eth_getTransactionByHash"
ETH_GET_TRANSACTION_COUNT: str = "eth_getTransactionCount"
ETH_GET_TRANSACTION_RECEIPT: str = "eth_getTransactionReceipt"
ETH_GET_UNCLE_BY_BLOCK_NUMBER_AND_INDEX: str = "eth_getUncleByBlockNumberAndIndex"
ETH_PRICE: str = "ethprice"
ETH_SUPPLY: str = "ethsupply"
GAS_ESTIMATE: str = "gasestimate"
GAS_ORACLE: str = "gasoracle"
GET_ABI: str = "getabi"
GET_BLOCK_COUNTDOWN: str = "getblockcountdown"
GET_BLOCK_NUMBER_BY_TIME: str = "getblocknobytime"
GET_BLOCK_REWARD: str = "getblockreward"
GET_MINED_BLOCKS: str = "getminedblocks"
GET_SOURCE_CODE: str = "getsourcecode"
GET_STATUS: str = "getstatus"
GET_TX_RECEIPT_STATUS: str = "gettxreceiptstatus"
TOKEN_BALANCE: str = "tokenbalance"
TOKEN_SUPPLY: str = "tokensupply"
TOKENNFTTX: str = "tokennfttx"
TOKENTX: str = "tokentx"
TXLIST_INTERNAL: str = "txlistinternal"
TXLIST: str = "txlist"
| UTF-8 | Python | false | false | 1,715 | py | 20 | actions_enum.py | 1 | 0.69621 | 0.69621 | 0 | 40 | 41.85 | 98 |
aloscc/Ambulance-Dispatching | 13,065,290,545,068 | 0be62f9214bd80b49085c425e3b8a4977468e65a | 1cd37beb04515d22a185624e5a304a9de0923801 | /src/gt/plot/DispatcherPlot.py | d07b56860af6950089cb2820758aca0827ee0f57 | [] | no_license | https://github.com/aloscc/Ambulance-Dispatching | fac3585096b1c3f6c5d2c9ab23cc6be5e57e44dd | 429538a08f8580043f2c72aeb289527477ec4949 | refs/heads/master | 2020-05-22T00:31:39.958716 | 2019-05-11T17:45:20 | 2019-05-11T17:45:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from gt.core.HospitalsAndDispatcherModel import DispatcherAndHospitalsModel
from neng.game import Game
from common import *
def find_nash(la, mu, n, print_matrix=False):
model = DispatcherAndHospitalsModel(la, mu, n, 3, 10)
matrix = model.game_matrix()
if print_matrix:
print(matrix)
text = "NFG 1 R \"Ambulance Dispatching Game\" { \"Dispatcher\" \"Hospital1\" \"Hospital2\" }\n\n"
text += "{ { \"N2\" \"N1\" \"BE\" }\n"
text += "{ \"A\" \"R\" }\n"
text += "{ \"A\" \"R\" }\n"
text += "}\n\"\"\n\n"
text += "{\n"
for k in range(2):
for j in range(2):
for i in range(3):
text += "{ \"\" %f, %f, %f }\n" % (matrix[i][j][k][0], matrix[i][j][k][2], matrix[i][j][k][2])
text += "}\n"
text += "1 2 3 4 5 6 7 8 9 10 11 12"
game = Game(text)
sol = game.findEquilibria('pne')
return extract_strategies_from_solutions(sol)
def extract_strategies_from_solutions(solutions):
strategies = set()
if not solutions:
return strategies
for sol in solutions:
cur_stra = ''
if sol[0][0] == 1:
cur_stra += 'N2'
elif sol[0][1] == 1:
cur_stra += 'N1'
else:
cur_stra += 'BE'
strategies.add(cur_stra)
continue
cur_stra += ';'
if sol[1][0] == 1:
cur_stra += 'A'
else:
cur_stra += 'R'
if sol[2][0] == 1:
cur_stra += 'A'
else:
cur_stra += 'R'
strategies.add(cur_stra)
return strategies
def solution_plot(n, ax=None, legend=False):
print('Computing for n = {}'.format(n))
data = {
'Lambda': [],
'Mu': [],
'Nash Equilibrium': []
}
for mu in np.linspace(0.5, 3, 30):
# print('Computing for mu={}'.format(mu))
for l in np.linspace(0.5, 3, 30):
data['Lambda'].append(l)
data['Mu'].append(mu)
data['Nash Equilibrium'].append(','.join(find_nash(l, mu, n)))
data = pd.DataFrame(data)
if ax is not None:
sns.scatterplot(x='Lambda', y='Mu', hue='Nash Equilibrium', data=data, ax=ax, legend=legend, marker='s', s=1000)
ax.set_title('N = ' + str(n))
else:
sns.scatterplot(x='Lambda', y='Mu', hue='Nash Equilibrium', data=data, legend=legend, marker='s', s=1000)
if __name__ == '__main__':
_, axs = plt.subplots(nrows=3, ncols=3, figsize=(15, 15))
solution_plot([1, 1], ax=axs[0][0], legend='brief')
solution_plot([1, 2], ax=axs[0][1], legend='brief')
solution_plot([1, 3], ax=axs[0][2], legend='brief')
solution_plot([2, 1], ax=axs[1][0], legend='brief')
solution_plot([2, 2], ax=axs[1][1], legend='brief')
solution_plot([2, 3], ax=axs[1][2], legend='brief')
solution_plot([3, 1], ax=axs[2][0], legend='brief')
solution_plot([3, 2], ax=axs[2][1], legend='brief')
solution_plot([3, 3], ax=axs[2][2], legend='brief')
plt.savefig('../../images/Dispatcher/Dispatcher Nash Equilibrium')
plt.show()
| UTF-8 | Python | false | false | 3,060 | py | 8 | DispatcherPlot.py | 7 | 0.524837 | 0.491176 | 0 | 93 | 31.903226 | 120 |
DenisCarriere/gpsimage | 2,576,980,414,240 | b92a3038f63f8230030312f1a3f8a29d56c78fb4 | 1eb44f45c7972def3b78d783582d1fe51a01c2ed | /gpsimage/base.py | 654ef0a7804c32723f757c40cd0a7b8313784ecf | [
"Apache-2.0"
] | permissive | https://github.com/DenisCarriere/gpsimage | 3796adc92d9b8d3e7c718801b3b904f520d6238b | 4f2f6c0acb7c6bb173299f86932d894c553e6c5c | refs/heads/master | 2022-11-30T23:19:06.771136 | 2014-12-01T01:22:02 | 2014-12-01T01:22:02 | 22,666,022 | 7 | 6 | Apache-2.0 | false | 2022-11-22T00:30:49 | 2014-08-06T01:53:16 | 2022-07-05T12:13:46 | 2022-11-22T00:30:46 | 6,177 | 6 | 7 | 5 | Python | false | false | import os
import time
import datetime
import dateutil.parser
import exifread
class GPSImage(object):
"""
"""
_exclude = ['lat', 'lng','debug','json','ok', 'help', 'x', 'y', 'path','exif', 'image']
exif = {}
def __init__(self, image):
if isinstance(image, str):
self.path = os.path.abspath(image)
self.filename = os.path.basename(self.path)
self.image = open(self.path)
else:
self.image = image
# Initial Functions
self._read_exif()
def __repr__(self):
if self.ok:
return '<GPSImage - {0} [{1}, {2} ({3})]>'.format(self.filename, self.lat, self.lng, self.datum)
else:
return '<GPSImage [{1}]>'.format(self.status)
def _read_exif(self):
self.exif = exifread.process_file(self.image)
def _dms_to_dd(self, dms, ref):
if len(dms) == 3:
degrees = dms[0].num
minutes = dms[1].num / 60.0
seconds = float(dms[2].num) / float(dms[2].den) / 60.0 / 60.0
dd = degrees + minutes + seconds
# South & West returns Negative values
if ref in ['S', 'W']:
dd *= -1
return dd
def _pretty(self, key, value, special=''):
if special:
key = special.get(key)
if key:
extra_spaces = ' ' * (20 - len(key))
return '{0}{1}: {2}'.format(key, extra_spaces, value)
def debug(self):
# JSON Results
print('## JSON Results')
for key, value in self.json.items():
print(self._pretty(key, value))
print('')
# Camera Raw
if self._exif:
print('## Camera Raw')
for key, value in self._exif.items():
print(self._pretty(key, value, TAGS))
print('')
# GPS Raw
if self._GPSInfo:
print('## GPS Raw')
for key, value in self._GPSInfo.items():
print(self._pretty(key, value, GPSTAGS))
@property
def status(self):
if not self.exif:
return 'ERROR - Exif not found'
elif not self.ok:
return 'ERROR - No Geometry'
else:
return 'OK'
"""
@property
def dpi(self):
value = self._image.info.get('dpi')
if value:
if len(value) == 2:
if bool(value[0] and value[1]):
return value
# If both values are (0, 0) then change it to the standard 72DPI
else:
return (72, 72)
else:
# Retrieves X & Y resolution from Exif instead of PIL Image
x = self._divide(self.XResolution)
y = self._divide(self.YResolution)
if bool(x and y):
return (int(x), int(y))
"""
@property
def ok(self):
if bool(self.lat and self.lng):
return True
else:
return False
"""
@property
def model(self):
return self.Model
@property
def make(self):
return self.Make
"""
@property
def datum(self):
datum = self.exif.get('GPS GPSMapDatum')
if datum:
return datum.values
else:
return 'WGS-84'
@property
def lng(self):
lng_dms = self.exif.get('GPS GPSLongitude')
lng_ref = self.exif.get('GPS GPSLongitudeRef')
if bool(lng_dms and lng_ref):
return self._dms_to_dd(lng_dms.values, lng_ref.values)
@property
def x(self):
return self.lng
@property
def lat(self):
lat_dms = self.exif.get('GPS GPSLatitude')
lat_ref = self.exif.get('GPS GPSLatitudeRef')
if bool(lat_dms and lat_ref):
return self._dms_to_dd(lat_dms.values, lat_ref.values)
@property
def y(self):
return self.lat
@property
def altitude(self):
altitude = self.exif.get('GPS GPSAltitude')
if altitude:
return altitude.values
@property
def direction(self):
direction = self.exif.get('GPS GPSImgDirection')
if direction:
return direction.values
@property
def timestamp(self):
# For GoPro
timestamp = self.exif.get('Image DateTime')
if timestamp:
timestamp = timestamp.values.replace(':','-',2)
return dateutil.parser.parse(timestamp)
"""
@property
def width(self):
return self._image.size[0]
@property
def height(self):
return self._image.size[1]
@property
def size(self):
if bool(self.height and self.width):
return (self.width, self.height)
"""
@property
def geometry(self):
if self.ok:
return {'type':'POINT', 'coordinates':[self.lng, self.lat]}
@property
def satellites(self):
satellites = self.exif.get('GPS GPSSatellites').values
if satellites:
return int(satellites)
@property
def json(self):
container = {}
for key in dir(self):
if bool(not key.startswith('_') and key not in self._exclude):
value = getattr(self, key)
if value:
container[key] = value
return container
if __name__ == '__main__':
img = GPSImage('/home/denis/Github/gpsimage/gpsimage/images/nikon_coolpix_aw100.jpg')
print img.json | UTF-8 | Python | false | false | 5,484 | py | 6 | base.py | 4 | 0.519329 | 0.511306 | 0 | 207 | 25.497585 | 108 |
jingshisun/Sentiment-Analysis | 1,348,619,732,610 | fb1a0bfad2910a66feabcae47c7cb00aa426681d | f8857b6a70c38d55056457047beeed79d9381504 | /Project Part 3/projectpart3.py | b308dc2780057f5d561354ea74d5fed8c5708034 | [] | no_license | https://github.com/jingshisun/Sentiment-Analysis | 122e90cba465bffdceba176f52befdab9bb30b7a | 8d062138e9c3379eb942a18b0940c9ec126979ed | refs/heads/master | 2021-09-01T08:29:41.101618 | 2017-12-26T02:02:18 | 2017-12-26T02:02:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 22 11:31:20 2016
@author: tomec
"""
import urllib
import pandas as pd
from datetime import timedelta
import datetime
import csv
import re
import unicodedata
import nltk
from nltk.sentiment.util import mark_negation
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import silhouette_score
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
from selenium import webdriver
from bs4 import BeautifulSoup
import time
from selenium.webdriver.common.keys import Keys
from statsmodels.tsa.stattools import grangercausalitytests
from statsmodels.tsa.stattools import adfuller
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPRegressor
import sys
from bokeh.io import output_file, show, vplot
from bokeh.plotting import figure
from bokeh.models import Span
### Create function to break apart contractions to its derivative words
### A text file containing this('contractions.txt') should be located at the
### working directory along with this script.
def break_contractions(text):
#### Import dictionary of contractions: contractions.txt
with open('contractions.txt','r') as inf:
contractions = eval(inf.read())
pattern = re.compile(r'\b(' + '|'.join(contractions.keys()) + r')\b')
result = pattern.sub(lambda x: contractions[x.group()], text)
return(result)
### Create function to lemmatize (stem) words to their root
### This requires the NLTK wordnet dataset.
def lemmatize_words(text):
# Create a lemmatizer object
wordnet_lemmatizer = nltk.stem.WordNetLemmatizer()
out = []
for word in text:
word = ''.join(w.lower() for w in word if w.isalpha())
out.append(wordnet_lemmatizer.lemmatize(word))
return(out)
#### Create function to remove stopwords (e.g., and, if, to)
#### Removes stopwords from a list of words (i.e., to be used on lyrics after splitting).
#### This requires the NLTK stopwords dataset.
def remove_stopwords(text):
# Create set of all stopwords
stopword_set = set(w.lower() for w in nltk.corpus.stopwords.words())
out = []
for word in text:
# Convert words to lower case alphabetical letters only
# word = ''.join(w.lower() for w in word if w.isalpha())
if word not in stopword_set:
out.append(word)
# Return only words that are not stopwords
return(out)
#### Create a class that stores the NRC Word-Emotion Assocations dataset as a
#### a dictionary (once the word_association object is constructed), then
#### provides the 'count_emotions' method to count the number occasions for
#### emotion.
class word_assocations:
def __init__(self):
# Import NRC Word-Emotion Association data
with open("NRC-emotion-lexicon-wordlevel-alphabetized-v0.92.txt", "r",
newline = '', encoding = 'utf-8') as f:
file = f.readlines()
file = file[46:] # First 45 lines are comments
# Create dictionary with words and their associated emotions
associations = {}
for line in file:
elements = line.split()
if elements[2] == '1':
if elements[0] in associations:
associations[elements[0]].append(elements[1])
else:
associations[elements[0]] = [elements[1]]
# Initializes associations dictionary (so not to repeat it)
self.associations = associations
def count_emotions(self, text):
# Clean up the string of characters
temp0 = break_contractions(text) # Break up contractions
temp1 = lemmatize_words(temp0.split()) # Split string to words, then lemmatize
temp2 = mark_negation(temp1, double_neg_flip = True) # Account for negations
temp3 = remove_stopwords(temp2) # Remove any stopwords
# check_spelling(temp2) # Function is no longer useful
# Count number of emotional associations for each valid word
bank = []
wordcount = 0
for word in temp3:
if word in self.associations:
bank.extend(self.associations[word])
wordcount += 1
# Returns a tuple of integers for negative, positive, anger, fear, anticipation,
# surprise, trust, sadness, joy, disgust, and total word count, respectively.
return((bank.count('negative'),
bank.count('positive'),
bank.count('anger'),
bank.count('fear'),
bank.count('anticipation'),
bank.count('surprise'),
bank.count('trust'),
bank.count('sadness'),
bank.count('joy'),
bank.count('disgust'),
wordcount))
# This function removes parentheses and also the contents of the parentheses
# for the purposes of improving search matches when finding lyrics.
def remove_parenth(text):
patt = re.compile('\s*\(.*?\)\s*')
out = re.findall(patt, text)
if len(out) > 0:
text = text.replace(out[0], "")
return(text)
# This function converts characters (byte string) that are otherwise
# not caught by the replace_accents normalization function.
def replace_special(text):
temp1 = text.encode('utf-8')
temp2 = temp1.replace(b"\xc3\x98", b"O")
temp3 = temp2.replace(b"|", b"L")
temp4 = temp3.decode()
return(temp4)
# This function uses unicodedata to attempt to convert exotic characters, such
# as accents, to a byte-friendly alternative that can be used in a url.
def replace_accents(text):
temp1 = unicodedata.normalize('NFKD', text)
temp2 = temp1.encode('ASCII', 'ignore')
temp3 = temp2.decode()
return(temp3)
# This function removes html comment text embedded inside the lyric text.
def remove_comments(text):
patt = re.compile('(<!--.+?-->)')
out = re.findall(patt, text)
if len(out) > 0:
temp = text.replace(out[0], "")
else:
temp = text
return(temp)
# This function produces decimal text based on their integer code. This is
# needed to decode the lyrics during webscraping (which is in coded in decimal).
def decode_decimal(letters):
iletters = []
for i in letters:
if len(i) < 4:
iletters.append(int(i))
lyrics = ""
for i in iletters:
lyrics = lyrics + chr(i)
return(lyrics)
def getlyrics(track, artist):
# Main regex search pattern
Pattern = re.compile('lyricbox..>(.+?)<div class=..lyricsbreak')
# Attempt initial search using the raw song and artist name
url = "http://lyrics.wikia.com/wiki/" + artist + ":" + track
url = remove_parenth(url) # url: remove parentheses and its contents
url = url.strip().replace(" ", "_") # url: replace spaces with underscores
url = replace_special(url) # url: replace non-convertible special characters
url = replace_accents(url) # url: remove accents on characters
req = urllib.request.Request(url) # create Request object
print(req.get_full_url()) # print full url passed to urlopen
try:
data = urllib.request.urlopen(req) # open site and pull html
getdata = str(data.read()) # convert html to byte string
output = re.findall(Pattern, getdata) # search suing main regex pattern
# If the search fails, but there is a recommended url:
if len(output) == 0:
patt = re.compile('Did you mean <a href=.(.+?)..title=')
output = re.findall(patt, getdata)
# If search still fails, but a redirect exists:
if len(output) == 0:
patt = re.compile('redirectText.><li><a href=.(.+?)..title=')
output = re.findall(patt, getdata)
url = "http://lyrics.wikia.com"
url = url + str(output[0]) # url: create new url
url = url.strip().replace(" ", "_") # url: replace spaces with underscores
url = replace_special(url) # url: replace non-convertible special characters
url = replace_accents(url) # url: remove accents on characters
req = urllib.request.Request(url) # url: create Request object
print(req.get_full_url()) # print full url passed to urlopen
data = urllib.request.urlopen(req) # open site and pull html
getdata = str(data.read()) # convert html to byte string
output = re.findall(Pattern, getdata) # search using main regex pattern
text = remove_comments(output[0]) # data: remove html comments
text = text.replace("<br />", " ") # data: replace breaks with spaces
text = text.replace("<i>", "") # data: remove italic formatting
text = text.replace("</i>", "") # data: remove italic formatting
text = text.replace("&#", "") # data: remove throwaway characters
letters = text.split(sep = ";") # data: split data based on semicolon
letters.pop() # data: remove last element (always blank)
lyrics = decode_decimal(letters) # data: convert integers to decimal characters
# Write to output file
return(lyrics)
# This is the last-resort case where there are no reasonable matches
except Exception:
return('Not found')
pass
# This function creates a string list of all days between the start and end dates
# including the start date, but excluding the end date
def days_between(start, end):
# Start and end must be date objects
delta = end - start
out = []
for i in range(0, delta.days):
out.append(str(start + timedelta(i)))
return(out)
# This function combines streaming data from spotifycharts.com based on the
# requested start and end dates (output is written to "spotifycharts.csv")
def spotify_charts(start, end):
headers = ['Position', 'Track Name', 'Artist', 'Streams', 'URL', 'Date']
# Write headers into output CSV file
with open("spotifycharts.csv", "w", newline = '', encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames = headers)
writer.writeheader()
# Create string list of days between requested start and end dates
datelist = days_between(start, end)
# Collect CSV file for each date, and write to output file
for i in datelist:
# Open connection to URL
url = 'https://spotifycharts.com/regional/us/daily/' + i + '/download'
f = urllib.request.urlopen(url)
output = pd.read_csv(f)
for line in output.iterrows():
with open("spotifycharts.csv", "a", newline = '', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, fieldnames = headers)
writer.writerow({'Position': line[1][0],
'Track Name': line[1][1],
'Artist': line[1][2],
'Streams': line[1][3],
'URL': line[1][4],
'Date': i})
f.close() # Close connection
def spotify_charts_emotions():
# Read the data
df = pd.read_csv('spotifycharts.csv')
# Create track name and artist concatenation (to determine unique tracks)
df['name'] = df[['Track Name', 'Artist']].apply(lambda x: '--'.join(x), axis = 1)
# Create flag variable for unique tracks
bank = [] # Create a bank of unique uid's
duplicates = [] # This will become a Boolean list: 0=First instance, 1=Duplicate
for i in df['name']:
if i not in bank:
duplicates.append(0)
bank.append(i)
else:
duplicates.append(1)
df['Duplicates'] = duplicates
# Create data frame of only unique tracks
uniquetracks = df[['Track Name', 'Artist', 'name']].loc[df['Duplicates'] == 0]
associator = word_assocations()
headers = ['Track Name', 'Artist', 'name', 'negative', 'positive', 'anger',
'fear', 'anticipation', 'surprise', 'trust', 'sadness', 'joy',
'disgust', 'wordcount', 'lyrics', 'negative_percent', 'positive_percent',
'anger_percent', 'fear_percent', 'anticipation_percent',
'surprise_percent', 'trust_percent', 'sadness_percent', 'joy_percent',
'disgust_percent']
# Write headers into output CSV file
with open("spotifychartsemotions.csv", "w", newline = '', encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames = headers)
writer.writeheader()
for line in uniquetracks.iterrows():
temp_track = line[1][0]
temp_artist = line[1][1]
temp_name = line[1][2]
temp_lyrics = getlyrics(temp_track, temp_artist)
temp_emotions = associator.count_emotions(temp_lyrics)
if temp_emotions[0] > 0:
negative_percent = temp_emotions[0] / temp_emotions[10]
positive_percent = temp_emotions[1] / temp_emotions[10]
anger_percent = temp_emotions[2] / temp_emotions[10]
fear_percent = temp_emotions[3] / temp_emotions[10]
anticipation_percent= temp_emotions[4] / temp_emotions[10]
surprise_percent = temp_emotions[5] / temp_emotions[10]
trust_percent = temp_emotions[6] / temp_emotions[10]
sadness_percent = temp_emotions[7] / temp_emotions[10]
joy_percent = temp_emotions[8] / temp_emotions[10]
disgust_percent = temp_emotions[9] / temp_emotions[10]
with open("spotifychartsemotions.csv", "a", newline = '', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, fieldnames = headers)
writer.writerow({'Track Name': temp_track,
'Artist': temp_artist,
'name': temp_name,
'negative': temp_emotions[0],
'positive': temp_emotions[1],
'anger': temp_emotions[2],
'fear': temp_emotions[3],
'anticipation': temp_emotions[4],
'surprise': temp_emotions[5],
'trust': temp_emotions[6],
'sadness': temp_emotions[7],
'joy': temp_emotions[8],
'disgust': temp_emotions[9],
'wordcount': temp_emotions[10],
'lyrics': temp_lyrics,
'negative_percent': negative_percent,
'positive_percent': positive_percent,
'anger_percent': anger_percent,
'fear_percent': fear_percent,
'anticipation_percent': anticipation_percent,
'surprise_percent': surprise_percent,
'trust_percent': trust_percent,
'sadness_percent': sadness_percent,
'joy_percent': joy_percent,
'disgust_percent': disgust_percent})
def articles_emotions():
# Read the data
df = pd.read_csv('getdayarticles.csv')
associator = word_assocations()
headers = ['id', 'header', 'date', 'location', 'categories', 'description',
'socialmediascore','negative', 'positive', 'anger',
'fear', 'anticipation', 'surprise', 'trust', 'sadness', 'joy',
'disgust', 'wordcount', 'negative_percent', 'positive_percent',
'anger_percent', 'fear_percent', 'anticipation_percent',
'surprise_percent', 'trust_percent', 'sadness_percent', 'joy_percent',
'disgust_percent']
# Write headers into output CSV file
with open("getdayarticlesemotions.csv", "w", newline = '', encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames = headers)
writer.writeheader()
for line in df.iterrows():
temp_emotions = associator.count_emotions(line[1][1] + ' ' + line[1][5])
if temp_emotions[0] > 0:
negative_percent = temp_emotions[0] / temp_emotions[10]
positive_percent = temp_emotions[1] / temp_emotions[10]
anger_percent = temp_emotions[2] / temp_emotions[10]
fear_percent = temp_emotions[3] / temp_emotions[10]
anticipation_percent= temp_emotions[4] / temp_emotions[10]
surprise_percent = temp_emotions[5] / temp_emotions[10]
trust_percent = temp_emotions[6] / temp_emotions[10]
sadness_percent = temp_emotions[7] / temp_emotions[10]
joy_percent = temp_emotions[8] / temp_emotions[10]
disgust_percent = temp_emotions[9] / temp_emotions[10]
with open("getdayarticlesemotions.csv", "a", newline = '', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, fieldnames = headers)
writer.writerow({'id': line[1][0],
'header': line[1][1],
'date': line[1][2],
'location': line[1][3],
'categories': line[1][4],
'description': line[1][5],
'socialmediascore': line[1][6],
'negative': temp_emotions[0],
'positive': temp_emotions[1],
'anger': temp_emotions[2],
'fear': temp_emotions[3],
'anticipation': temp_emotions[4],
'surprise': temp_emotions[5],
'trust': temp_emotions[6],
'sadness': temp_emotions[7],
'joy': temp_emotions[8],
'disgust': temp_emotions[9],
'wordcount': temp_emotions[10],
'negative_percent': negative_percent,
'positive_percent': positive_percent,
'anger_percent': anger_percent,
'fear_percent': fear_percent,
'anticipation_percent': anticipation_percent,
'surprise_percent': surprise_percent,
'trust_percent': trust_percent,
'sadness_percent': sadness_percent,
'joy_percent': joy_percent,
'disgust_percent': disgust_percent})
def articles_emotions_perday():
# Read the data
df = pd.read_csv('getdayarticlesemotions.csv')
headers = ['date', 'anger_percent_weighted', 'sadness_percent_weighted', 'joy_percent_weighted']
# Convert string to integer
df['socialmediascore'] = [int(x.replace(',', '')) for x in list((df['socialmediascore']))]
df['anger_percent_weighted'] = np.multiply(list(df['socialmediascore']), list(df['anger_percent']))
df['sadness_percent_weighted'] = np.multiply(list(df['socialmediascore']), list(df['sadness_percent']))
df['joy_percent_weighted'] = np.multiply(list(df['socialmediascore']), list(df['joy_percent']))
sums = df['socialmediascore'].groupby(df['date']).sum()
anger_percent_weighted = df['anger_percent_weighted'].groupby(df['date']).sum()
sadness_percent_weighted = df['sadness_percent_weighted'].groupby(df['date']).sum()
joy_percent_weighted = df['joy_percent_weighted'].groupby(df['date']).sum()
out = pd.concat([sums, anger_percent_weighted, sadness_percent_weighted, joy_percent_weighted], axis = 1)
out['anger_percent_weighted'] = np.divide(list(out['anger_percent_weighted']), list(out['socialmediascore']))
out['sadness_percent_weighted'] = np.divide(list(out['sadness_percent_weighted']), list(out['socialmediascore']))
out['joy_percent_weighted'] = np.divide(list(out['joy_percent_weighted']), list(out['socialmediascore']))
# Write headers into output CSV file
with open("getdayarticlesemotionsperday.csv", "w", newline = '', encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames = headers)
writer.writeheader()
for line in out.iterrows():
with open("getdayarticlesemotionsperday.csv", "a", newline = '', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, fieldnames = headers)
writer.writerow({'date': line[0][5:],
'anger_percent_weighted': line[1][1],
'sadness_percent_weighted': line[1][2],
'joy_percent_weighted': line[1][3]})
#### Generates K-means centroids (as a CSV file) and also returns the labels
def kmeans_centroids(df, var, k, name):
kmeans = KMeans(n_clusters = k)
kmeans.fit(df[var])
labels = kmeans.labels_ # Save labels for use later
centroids = kmeans.cluster_centers_
kmeansout = pd.DataFrame(centroids, columns = var) # Create dataframe of centroids
kmeanscounts = pd.Series(labels, name = "Counts").value_counts() # Create number of points in each cluster
kmeansout = pd.concat([kmeansout, kmeanscounts], axis = 1)
kmeansout.to_csv(name, sep=',', index = True, header = True)
return(labels) # Return labels to be used later
#### Generates silhouette scores for K-means using a list of the number of clusters
def kmeans_silhouette(df, var, k, name):
with open(name, "w", newline = None, encoding = 'utf-8') as file:
file.write("\n\nThe following are silhouette scores for K-means with varying number of K clusters: \n\n")
with open(name, "a", newline = None, encoding = 'utf-8') as file:
for c in k:
kmeans = KMeans(n_clusters = c)
kmeans.fit(df[var])
labels = kmeans.labels_
file.write("For K=" + str(c) + ", the silhouette score is: " + str(silhouette_score(df[var], labels)) + "\n")
#### Generates Ward group means (as a CSV file) and also returns the labels
def ward_groupmeans(df, var, k, name):
ward = AgglomerativeClustering(n_clusters = k, linkage = 'ward')
ward.fit(df[var])
labels = ward.labels_ # Save labels for use later
wardout = df[var].groupby(labels).mean() # Create grouped means
wardcounts = pd.Series(labels, name = "Counts").value_counts() # Create number of points in each cluster
wardout = pd.concat([wardout, wardcounts], axis = 1)
wardout.to_csv(name, sep=',', index = True, header = True) # Save to file
return(labels) # Return labels to be used later
#### Generates silhouette scores for Ward using a list of the number of clusters
def ward_silhouette(df, var, k, name):
with open(name, "w", newline = None, encoding = 'utf-8') as file:
file.write("\n\nThe following are silhouette scores for Ward's method with varying number of K clusters: \n\n")
with open(name, "a", newline = None, encoding = 'utf-8') as file:
for c in k:
ward = AgglomerativeClustering(n_clusters = c, linkage = 'ward')
ward.fit(df[var])
labels = ward.labels_
file.write("For K=" + str(c) + ", the silhouette score is: " + str(silhouette_score(df[var], labels)) + "\n")
#### Generates 3D scatterplots
def scatterplotclusters(df, var, labels, title, savename):
fig = plt.figure(figsize = (12, 12))
ax = fig.add_subplot(111, projection = '3d')
colors = cm.rainbow(np.linspace(0, 1, len(set(labels)))) # Use automatic color selection based on cluster count
for name, group in df[var].groupby(labels):
ax.scatter(group[var[0]], group[var[1]], group[var[2]],
alpha = 0.8, c = colors[name], label = name)
ax.set_xlabel(var[0])
ax.set_ylabel(var[1])
ax.set_zlabel(var[2])
plt.title(title)
ax.legend()
plt.savefig(savename)
plt.clf()
plt.close()
#### Pulls news articles from EventRegisty.org for each day within the specified range
def getdayarticles(start, end, directory, login_email, login_password):
# Create CSV file with appropriate headers
with open("getdayarticles.csv", "w", newline = '', encoding='utf-8') as f:
fieldnames = ['id', 'header', 'date', 'location', 'categories', 'description', 'socialmediascore']
writer = csv.DictWriter(f, fieldnames = fieldnames)
writer.writeheader()
# Open new browser and login to eventregistry.org
browser = webdriver.Firefox(firefox_binary = directory)
browser.get("http://eventregistry.org/login?redirectUrl=%2FsearchEvents")
time.sleep(5)
username = browser.find_element_by_id("email")
password = browser.find_element_by_id("pass")
username.send_keys(login_email) # Enter email
password.send_keys(login_password) # Enter password
browser.find_element_by_xpath('//*[@id="form-id"]/button').click() # Click submit
time.sleep(5)
for day in days_between(start, end):
# Open new tab
browser.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 't')
# Create URL based on day
url = "http://eventregistry.org/searchEvents?query=%7B%22" + \
"locations%22:%5B%7B%22label%22:%22United%20States" + \
"%22,%22uri%22:%22http:%2F%2Fen.wikipedia.org%2Fwiki%2F" + \
"United_States%22,%22negate%22:false%7D%5D,%22dateStart%22:%22" + \
day + "%22,%22dateEnd%22:%22" + \
day + "%22,%22lang%22:%22eng%22,%22minArticles%22:50,%22" + \
"preferredLang%22:%22eng%22%7D&tab=events"
browser.get(url) # Open URL
time.sleep(50) # Wait 20 seconds for page to load
# Click "sort events by social media hotness" to get most popular events
browser.find_element_by_xpath('//*[@id="tab-events"]/div/div/div[3]/div[2]/div/div[2]/button[4]').click()
time.sleep(5) # Wait 5 seconds for page to reload
out = browser.page_source.encode("utf-8") # Save source code
# Save social media score for each news event
temp1 = BeautifulSoup(out, "lxml").findAll("span", {'class': "score ng-binding"})
socialmedia = []
for i in temp1:
socialmedia.append(i.contents[0])
# Save header for each news event
temp2 = BeautifulSoup(out, "lxml").findAll("h4", {'class': "media-heading"})
articleheader = []
for i in temp2:
articleheader.append(i.contents[0].contents[0])
# Save time and date and location for each news event
temp3 = BeautifulSoup(out, "lxml").findAll("span", {'class': "info-val ng-binding"})
timedate = []
for i in temp3:
timedate.append(i.contents[0])
dates = timedate[::2]
location = timedate[1::2]
# Save categories for each news event
temp4 = BeautifulSoup(out, "lxml").findAll("div", {'class': "categories"})
categories = []
for i in temp4:
k = i.findAll("span", {'class': "ng-binding"})
t = []
for j in k:
t.append(j.contents[0].replace('→',', '))
categories.append(t)
# Save description of each news event
temp5 = BeautifulSoup(out, "lxml").findAll("div", {'class': "lighter smaller ng-binding"})
description = []
for i in temp5:
description.append(i.contents[0])
# Save news event ID
temp6 = BeautifulSoup(out, "lxml").find_all("a", {'target': "_blank", 'class': "ng-binding"}, href=True)
eventids = []
for i in temp6:
eventids.append(i['href'])
eventids = eventids[1:] # Remove first element (contains no information)
ids = []
for i in eventids:
ids.append(re.findall('/event/(.......).lang=eng', i)[0])
articles = pd.DataFrame([ids, articleheader, dates, location, categories, description, socialmedia])
for j in range(0, articles.shape[1]):
# Write to output file
with open("getdayarticles.csv", "a", newline = '', encoding='utf-8') as file:
writer = csv.DictWriter(file, fieldnames = fieldnames)
writer.writerow({'id': articles[j][0],
'header': articles[j][1],
'date': articles[j][2],
'location': articles[j][3],
'categories': articles[j][4],
'description': articles[j][5],
'socialmediascore': articles[j][6]})
browser.quit()
#### Applies the longitudinal multi-layer perceptron model with 100 hidden layers, number of lags of the
#### predictor value and returns the predicted values. It also plots the results as 'name.png'.
def nn_tester(df, predictor, predicted, lag, name):
length = lag
start = 0
temp = df[[predictor, predicted]]
iterations = len(temp[predicted])
X = pd.DataFrame(np.zeros(length)).T
y = [0]
for i in range(length, iterations):
temp_y = temp[predicted][i]
temp_X = pd.DataFrame(temp[predictor][start:(i)]).T.reset_index(drop = True)
temp_X.columns = [x for x in range(0, lag)]
y.extend([temp_y])
X = pd.concat([X, temp_X])
start = start + 1
X.reset_index(inplace = True, drop = True)
X.drop(X.index[[0]], inplace = True)
X.reset_index(inplace = True, drop = True)
y = y[1:]
X_train = X[0:100] # Training set
X_test = X[100:] # Test set
y_train = y[0:100] # Training set
y_test = y[100:] # Test set
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
mlp = MLPRegressor(activation = 'logistic', solver = 'lbfgs', max_iter = 10000, tol = 1e-5,
alpha = .01, hidden_layer_sizes = (100,), random_state = 1)
mlp.fit(X_train, y_train)
print(mlp.score(X_test, df[predicted][-len(y_test):]))
plt.figure(figsize = (20, 12))
plot_pred = plt.plot(df['date'][lag:], mlp.predict(scaler.transform(X)))
plot_ytest = plt.plot(df['date'][lag:], y)
plt.setp(plot_pred, color = 'black', linestyle = '--', linewidth = 1.0)
plt.setp(plot_ytest, color = 'black', linewidth = 1.0)
plt.figtext(.8, .85, "R-Squared = " +
str(mlp.score(X_test, df[predicted][-len(y_test):]).round(3)), fontsize = 12)
plt.axvline(df['date'][len(df['date']) - len(y_test) - 1], color = 'r', linewidth = 1.0)
locs, labels = plt.xticks()
plt.setp(labels, rotation = 90)
plt.savefig(name)
plt.clf()
plt.close()
return(mlp.predict(scaler.transform(X)))
def main():
##################################################################################
# The following lines of code can be uncommented and run for test purposes, but
# we recommend running them with a smaller date window. It should also be noted
# EventRegistry.org (the site where the news article data were pulled) is currently
# undergoing restructuring due to Google's move to fund their project -- so it is
# possible that the results will incomplete if run at the current time.
#
# IMPORTANT: The getdayarticles() function requires the installation of the
# selenium Python package (through pip), the geckodriver application (which is
# included in the with this code for Windows), and a valid installation of Firefox
# along with the directory to its application (which needs to be placed in the
# 'directory' argument).
##################################################################################
#
# start_date = date(2016, 6, 1)
# end_date = date(2016, 11, 21)
# spotify_charts(start_date, end_date)
# spotify_charts_emotions()
# getdayarticles(start = start_date,
# end = end_date,
# directory = r'C:\Program Files (x86)\Mozilla Firefox\firefox.exe',
# login_email = "jmc511@georgetown.edu",
# login_password = "password123")
# articles_emotions()
# articles_emotions_perday()
#
##################################################################################
##### Clustering for Songs #####
# Import data
df = pd.read_csv('spotifychartsemotions.csv')
# Silhouette scores
kmeans_silhouette(df, var = ['anger_percent', 'sadness_percent', 'joy_percent'],
k = [2,3,4,5,6,7,8,9,10], name = "kmeans_silhouettescores_songs.txt")
ward_silhouette(df, var = ['anger_percent', 'sadness_percent', 'joy_percent'],
k = [2,3,4,5,6,7,8,9,10], name = "ward_silhouettescores_songs.txt")
# Get labels for K-means and Ward
labels1 = kmeans_centroids(df, var = ['anger_percent', 'sadness_percent', 'joy_percent'], k = 3,
name = 'kmeans_centroids_songs.csv')
labels2 = ward_groupmeans(df, var = ['anger_percent', 'sadness_percent', 'joy_percent'], k = 3,
name = 'ward_groupedmeans_songs.csv')
# Plot 3D scatterplot
scatterplotclusters(df = df, var = ['anger_percent', 'sadness_percent', 'joy_percent'],
labels = labels1,
title = 'K-Means Scatterplot by Cluster',
savename = "kmeans_3Dscatterplot_songs")
scatterplotclusters(df = df, var = ['anger_percent', 'sadness_percent', 'joy_percent'],
labels = labels2,
title = 'Ward Scatterplot by Cluster',
savename = "ward_3Dscatterplot_songs")
##### Group Song longitudinal by Clusters #####
# Import data
df2 = pd.read_csv('spotifycharts.csv')
df2['name'] = df2[['Track Name', 'Artist']].apply(lambda x: '--'.join(x), axis = 1)
# Use labels and group longitudinal data by clusters
positive_tracks = df['name'].loc[labels1 == 1]
negative_tracks = df['name'].loc[labels1 == 0]
null_tracks = df['name'].loc[labels1 == 2]
positive_tracks_labels = df2.loc[df2['name'].isin(positive_tracks)]
negative_tracks_labels = df2.loc[df2['name'].isin(negative_tracks)]
null_tracks_labels = df2.loc[df2['name'].isin(null_tracks)]
##### Get average stream counts for each of emotion class #####
positive_grouped = positive_tracks_labels.groupby('Date').mean()
negative_grouped = negative_tracks_labels.groupby('Date').mean()
null_grouped = null_tracks_labels.groupby('Date').mean()
emotion_grouped = pd.concat([positive_grouped['Streams'],
negative_grouped['Streams'],
null_grouped['Streams']], axis = 1,
keys = ['positive_grouped', 'negative_grouped', 'null_grouped'])
emotion_grouped['date'] = emotion_grouped.index.values
emotion_grouped = emotion_grouped.reset_index(drop = True)
# Get emotion percentages
article_emotions = pd.read_csv('getdayarticlesemotionsperday.csv')
article_emotions['date'] = [datetime.datetime.strptime(x, '%B %d, %Y') for x in article_emotions['date']]
article_emotions = article_emotions.sort_values('date')
article_emotions['date'] = [str(x)[:10] for x in article_emotions['date']]
article_emotions = article_emotions.reset_index(drop = True)
# Merge data for plotting
plotdata = pd.merge(emotion_grouped, article_emotions[['anger_percent_weighted', 'sadness_percent_weighted',
'joy_percent_weighted', 'date']],
on = 'date', how = 'left')
plotdata.fillna(0, inplace = True) # Some article percentages are NaN, so convert these to zero
plotdata['date'] = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in plotdata['date']]
plotdata.to_csv('emotion_analysis.csv', sep=',', index = True, header = True) # Write to CSV
##### Line Plots (matplotlib images) #####
# Import data
plotdata = pd.read_csv('emotion_analysis.csv')
plotdata['date'] = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in plotdata['date']]
# Plot of news article emotion percentages
plt.figure(figsize = (10, 8))
anger_article = plt.plot(plotdata['date'], plotdata['anger_percent_weighted'])
sadness_article = plt.plot(plotdata['date'], plotdata['sadness_percent_weighted'])
joy_article = plt.plot(plotdata['date'], plotdata['joy_percent_weighted'])
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, 0, .6))
plt.setp(anger_article, color = 'r', linewidth = 1.0)
plt.setp(sadness_article, color = 'b', linewidth = 1.0)
plt.setp(joy_article, color = 'g', linewidth = 1.0)
plt.tick_params(axis = 'y', which = 'major', labelsize = 10)
plt.tick_params(axis = 'y', which = 'minor', labelsize = 10)
plt.tick_params(axis = 'x', which = 'major', labelsize = 9)
plt.tick_params(axis = 'x', which = 'minor', labelsize = 9)
locs, labels = plt.xticks()
plt.setp(labels, rotation = 90)
plt.legend()
plt.savefig('emotion_articles')
plt.clf()
plt.close()
# Plot of average song streaming counts by emotion
plt.figure(figsize = (10, 8))
positive_streamed = plt.plot(plotdata['date'], plotdata['positive_grouped'])
negative_streamed = plt.plot(plotdata['date'], plotdata['negative_grouped'])
null_streamed = plt.plot(plotdata['date'], plotdata['null_grouped'])
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, 0, 500000))
plt.setp(positive_streamed, color = 'g', linewidth = 1.0)
plt.setp(negative_streamed, color = 'r', linewidth = 1.0)
plt.setp(null_streamed, color = 'black', linewidth = 1.0)
plt.tick_params(axis = 'y', which = 'major', labelsize = 10)
plt.tick_params(axis = 'y', which = 'minor', labelsize = 10)
plt.tick_params(axis = 'x', which = 'major', labelsize = 9)
plt.tick_params(axis = 'x', which = 'minor', labelsize = 9)
locs, labels = plt.xticks()
plt.setp(labels, rotation = 90)
plt.legend()
plt.savefig('emotion_streamed')
plt.clf()
plt.close()
##### Hypothesis Tests #####
# Perform Granger Causality Tests (use sys.stdout to capture printed outputs)
plotdata.set_index(keys = plotdata['date'], inplace = True) # Set date as index
# Article percentages are all stationary (ADF should be significant)
print(adfuller(plotdata['anger_percent_weighted'], autolag = 'bic',
regression = 'ct', maxlag = 10))
print(adfuller(plotdata['sadness_percent_weighted'], autolag = 'bic',
regression = 'ct', maxlag = 10))
print(adfuller(plotdata['joy_percent_weighted'], autolag = 'bic',
regression = 'ct', maxlag = 10))
# Make positive_grouped stationary via moving average
moving_avg = pd.rolling_mean(plotdata['positive_grouped'], 6)
plotdata['positive_grouped_ma'] = plotdata['positive_grouped'] - moving_avg
print(adfuller(plotdata['positive_grouped_ma'].dropna(), autolag = 'bic',
regression = 'ct', maxlag = 10))
# Make positive_grouped stationary via moving average
moving_avg = pd.rolling_mean(plotdata['negative_grouped'], 6)
plotdata['negative_grouped_ma'] = plotdata['negative_grouped'] - moving_avg
print(adfuller(plotdata['negative_grouped_ma'].dropna(), autolag = 'bic',
regression = 'ct', maxlag = 10))
# Make null_grouped stationary via moving average
moving_avg = pd.rolling_mean(plotdata['null_grouped'], 6)
plotdata['null_grouped_ma'] = plotdata['null_grouped'] - moving_avg
print(adfuller(plotdata['null_grouped_ma'].dropna(), autolag = 'bic',
regression = 'ct', maxlag = 10))
# Perform Granger tests using ma variables (and save t0 grangertests.txt)
former, sys.stdout = sys.stdout, open('grangertests.txt', 'w')
print('\n\nOutput for Granger: anger_percent_weighted Granger causes positive_grouped_ma\n')
grangercausalitytests(plotdata[['positive_grouped_ma', 'anger_percent_weighted']].dropna(), maxlag = 7)
print('\n\nOutput for Granger: sadness_percent_weighted Granger causes positive_grouped_ma\n')
grangercausalitytests(plotdata[['positive_grouped_ma', 'sadness_percent_weighted']].dropna(), maxlag = 7)
print('\n\nOutput for Granger: joy_percent_weighted Granger causes positive_grouped_ma\n')
grangercausalitytests(plotdata[['positive_grouped_ma', 'joy_percent_weighted']].dropna(), maxlag = 7)
print('\n\nOutput for Granger: anger_percent_weighted Granger causes negative_grouped_ma\n')
grangercausalitytests(plotdata[['negative_grouped_ma', 'anger_percent_weighted']].dropna(), maxlag = 7)
print('\n\nOutput for Granger: sadness_percent_weighted Granger causes negative_grouped_ma\n')
grangercausalitytests(plotdata[['negative_grouped_ma', 'sadness_percent_weighted']].dropna(), maxlag = 7)
print('\n\nOutput for Granger: joy_percent_weighted Granger causes negative_grouped_ma\n')
grangercausalitytests(plotdata[['negative_grouped_ma', 'joy_percent_weighted']].dropna(), maxlag = 7)
print('\n\nOutput for Granger: anger_percent_weighted Granger causes null_grouped_ma\n')
grangercausalitytests(plotdata[['null_grouped_ma', 'anger_percent_weighted']].dropna(), maxlag = 7)
print('\n\nOutput for Granger: sadness_percent_weighted Granger causes null_grouped_ma\n')
grangercausalitytests(plotdata[['null_grouped_ma', 'sadness_percent_weighted']].dropna(), maxlag = 7)
print('\n\nOutput for Granger: joy_percent_weighted Granger causes null_grouped_ma\n')
grangercausalitytests(plotdata[['null_grouped_ma', 'joy_percent_weighted']].dropna(), maxlag = 7)
results, sys.stdout = sys.stdout, former
results.close()
##### Prediction using Neural Networks #####
# Run multi-layer perceptron with 100 hidden units, alpha = .01, lbfgs optimizer, and
# logistic activation function. Functions return predicted values.
lag = 7
nn_positive_anger = nn_tester(df = plotdata, predictor = 'anger_percent_weighted',
predicted = 'positive_grouped', lag = lag,
name = 'nn_positive_anger')
nn_positive_sadness = nn_tester(df = plotdata, predictor = 'sadness_percent_weighted',
predicted = 'positive_grouped', lag = lag,
name = 'nn_positive_sadness')
nn_positive_joy = nn_tester(df = plotdata, predictor = 'joy_percent_weighted',
predicted = 'positive_grouped', lag = lag,
name = 'nn_positive_joy')
nn_negative_anger = nn_tester(df = plotdata, predictor = 'anger_percent_weighted',
predicted = 'negative_grouped', lag = lag,
name = 'nn_negative_anger')
nn_negative_sadness = nn_tester(df = plotdata, predictor = 'sadness_percent_weighted',
predicted = 'negative_grouped', lag = lag,
name = 'nn_negative_sadness')
nn_negative_joy = nn_tester(df = plotdata, predictor = 'joy_percent_weighted',
predicted = 'negative_grouped', lag = lag,
name = 'nn_negative_joy')
nn_null_anger = nn_tester(df = plotdata, predictor = 'anger_percent_weighted',
predicted = 'null_grouped', lag = lag,
name = 'nn_null_anger')
nn_null_sadness = nn_tester(df = plotdata, predictor = 'sadness_percent_weighted',
predicted = 'null_grouped', lag = lag,
name = 'nn_null_sadness')
nn_null_joy = nn_tester(df = plotdata, predictor = 'joy_percent_weighted',
predicted = 'null_grouped', lag = lag,
name = 'nn_null_joy')
##### Interactive plot of findings (bokeh) #####
# x-axis
x = plotdata['date'][lag:]
# Different y-axes
y1_1 = plotdata['positive_grouped'][lag:]
y1_2 = nn_positive_anger
y2_1 = plotdata['positive_grouped'][lag:]
y2_2 = nn_positive_sadness
y3_1 = plotdata['positive_grouped'][lag:]
y3_2 = nn_positive_joy
y4_1 = plotdata['negative_grouped'][lag:]
y4_2 = nn_negative_anger
y5_1 = plotdata['negative_grouped'][lag:]
y5_2 = nn_negative_sadness
y6_1 = plotdata['negative_grouped'][lag:]
y6_2 = nn_negative_joy
y7_1 = plotdata['null_grouped'][lag:]
y7_2 = nn_null_anger
y8_1 = plotdata['null_grouped'][lag:]
y8_2 = nn_null_sadness
y9_1 = plotdata['null_grouped'][lag:]
y9_2 = nn_null_joy
# Plot predictions for Average Positive Stream
output_file("plots1.html", title = "Prediction of Song Playcounts")
s1 = figure(width = 900, plot_height = 300, title = "Positive Streams by Article Anger",
x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset")
s1.line(x, y1_1, color = 'green', legend = "Average Positive Stream Count", line_width = 2,
line_alpha = 0.7)
s1.line(x, y1_2, color = 'black', line_alpha = 0.7,
legend = "Predicted by Percent Anger", line_width = 2, line_dash = 'dotted')
s1.left[0].formatter.use_scientific = False
vline1 = Span(location = plotdata['date'][lag + 100 - 1].timestamp()*1000,
dimension = 'height', line_color = 'red', line_width = 1)
s1.renderers.extend([vline1])
s2 = figure(width = 900, plot_height = 300, title = "Positive Streams by Article Sadness",
x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset")
s2.line(x, y2_1, color = 'green', legend = "Average Positive Stream Count", line_width = 2,
line_alpha = 0.7)
s2.line(x, y2_2, color = 'black', line_alpha = 0.7,
legend = "Predicted by Percent Sadness", line_width = 2, line_dash = 'dotted')
s2.left[0].formatter.use_scientific = False
vline2 = Span(location = plotdata['date'][lag + 100 - 1].timestamp()*1000,
dimension = 'height', line_color = 'red', line_width = 1)
s2.renderers.extend([vline2])
s3 = figure(width = 900, plot_height = 300, title = "Positive Streams by Article Joy",
x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset")
s3.line(x, y3_1, color = 'green', legend = "Average Positive Stream Count", line_width = 2,
line_alpha = 0.7)
s3.line(x, y3_2, color = 'black', line_alpha = 0.7,
legend = "Predicted by Percent Joy", line_width = 2, line_dash = 'dotted')
s3.left[0].formatter.use_scientific = False
vline3 = Span(location = plotdata['date'][lag + 100 - 1].timestamp()*1000,
dimension = 'height', line_color = 'red', line_width = 1)
s3.renderers.extend([vline3])
p = vplot(s1, s2, s3)
show(p)
# Plot predictions for Average Negative Stream
output_file("plots2.html", title = "Prediction of Song Playcounts")
s4 = figure(width = 900, plot_height = 300, title = "Negative Streams by Article Anger",
x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset")
s4.line(x, y4_1, color = 'red', legend = "Average Negative Stream Count", line_width = 2,
line_alpha = 0.7)
s4.line(x, y4_2, color = 'black', line_alpha = 0.7,
legend = "Predicted by Percent Anger", line_width = 2, line_dash = 'dotted')
s4.left[0].formatter.use_scientific = False
vline4 = Span(location = plotdata['date'][lag + 100 - 1].timestamp()*1000,
dimension = 'height', line_color = 'red', line_width = 1)
s4.renderers.extend([vline4])
s5 = figure(width = 900, plot_height = 300, title = "Negative Streams by Article Sadness",
x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset")
s5.line(x, y5_1, color = 'red', legend = "Average Negative Stream Count", line_width = 2,
line_alpha = 0.7)
s5.line(x, y5_2, color = 'black', line_alpha = 0.7,
legend = "Predicted by Percent Sadness", line_width = 2, line_dash = 'dotted')
s5.left[0].formatter.use_scientific = False
vline5 = Span(location = plotdata['date'][lag + 100 - 1].timestamp()*1000,
dimension = 'height', line_color = 'red', line_width = 1)
s5.renderers.extend([vline5])
s6 = figure(width = 900, plot_height = 300, title = "Negative Streams by Article Joy",
x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset")
s6.line(x, y6_1, color = 'red', legend = "Average Negative Stream Count", line_width = 2,
line_alpha = 0.7)
s6.line(x, y6_2, color = 'black', line_alpha = 0.7,
legend = "Predicted by Percent Joy", line_width = 2, line_dash = 'dotted')
s6.left[0].formatter.use_scientific = False
vline6 = Span(location = plotdata['date'][lag + 100 - 1].timestamp()*1000,
dimension = 'height', line_color = 'red', line_width = 1)
s6.renderers.extend([vline6])
p = vplot(s4, s5, s6)
show(p)
# Plot predictions for Average Null Stream
output_file("plots3.html", title = "Prediction of Song Playcounts")
s7 = figure(width = 900, plot_height = 300, title = "Null Streams by Article Anger",
x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset")
s7.line(x, y7_1, color = 'black', legend = "Average Null Stream Count", line_width = 2,
line_alpha = 0.7)
s7.line(x, y7_2, color = 'black', line_alpha = 0.7,
legend = "Predicted by Percent Anger", line_width = 2, line_dash = 'dotted')
s7.left[0].formatter.use_scientific = False
vline7 = Span(location = plotdata['date'][lag + 100 - 1].timestamp()*1000,
dimension = 'height', line_color = 'red', line_width = 1)
s7.renderers.extend([vline7])
s8 = figure(width = 900, plot_height = 300, title = "Null Streams by Article Sadness",
x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset")
s8.line(x, y8_1, color = 'black', legend = "Average Null Stream Count", line_width = 2,
line_alpha = 0.7)
s8.line(x, y8_2, color = 'black', line_alpha = 0.7,
legend = "Predicted by Percent Sadness", line_width = 2, line_dash = 'dotted')
s8.left[0].formatter.use_scientific = False
vline8 = Span(location = plotdata['date'][lag + 100 - 1].timestamp()*1000,
dimension = 'height', line_color = 'red', line_width = 1)
s8.renderers.extend([vline8])
s9 = figure(width = 900, plot_height = 300, title = "Null Streams by Article Joy",
x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset")
s9.line(x, y9_1, color = 'black', legend = "Average Null Stream Count", line_width = 2,
line_alpha = 0.7)
s9.line(x, y9_2, color = 'black', line_alpha = 0.7,
legend = "Predicted by Percent Joy", line_width = 2, line_dash = 'dotted')
s9.left[0].formatter.use_scientific = False
vline9 = Span(location = plotdata['date'][lag + 100 - 1].timestamp()*1000,
dimension = 'height', line_color = 'red', line_width = 1)
s9.renderers.extend([vline9])
p = vplot(s7, s8, s9)
show(p)
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 54,521 | py | 37 | projectpart3.py | 3 | 0.564705 | 0.547792 | 0.000037 | 1,098 | 47.653005 | 121 |
glennneiger/magicmirror | 6,992,206,759,743 | 140faa740f9a491c35b78e915857b50f23331f36 | 82acb938feb7482b07d72114342e9f29663611f2 | /python scripts/motion_sensor_updated.py | 7175556fbfaa63a4b6a01d298b71242167eff6e3 | [] | no_license | https://github.com/glennneiger/magicmirror | 80c677fc467d6b1946fd390b9b20a715e4396428 | 606f7801d57c613c23696a988d0d435f7e46cda4 | refs/heads/master | 2020-04-19T06:00:14.523986 | 2018-10-08T08:01:59 | 2018-10-08T08:01:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import time
import os
import glob
import serial
import subprocess
from subprocess import Popen, PIPE, STDOUT
GPIO.setmode(GPIO.BOARD)
LED_OUT = 10
MOTION_IN = 8
device_file = ""
def initialize_all():
#ser = serial.Serial('/dev/ttyACM0', 115200)
GPIO.setup(LED_OUT, GPIO.OUT)
GPIO.setup(MOTION_IN, GPIO.IN)
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28-01be4007010c')[0]
device_file = device_folder + '/w1_slave'
print ("Sensor initializing............")
time.sleep(1);
print ('Sensor Ready....')
return 0
def motion_detect():
while True:
if GPIO.input(MOTION_IN) == True:
print ('Motion Detected!!!!\n')
GPIO.output(LED_OUT, True)
command = ('vcgencmd display_power 1')
subprocess.call(command, shell=True)
else:
command = 'vcgencmd display_power 0'
subprocess.call(command, shell=True)
print ('No one Here\n')
GPIO.output(LED_OUT, False)
return 0
def temperature_read():
f = open(device_file, 'r')
lines = f.readlines()
print(lines)
f.close()
return lines
def read_temp():
lines = temperature_read()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = temperature_read()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_c, temp_f
def write_temp_to_serial():
print(read_temp())
return 0
#initializes all sensors and variables
initialize_all()
#reads to check if motion has been detected from the sensor
motion_detect()
#reads temperature values from the temp* sensor
temperature_read()
#writes the read temperature value to the serial port
#which will be then read from NodeJs
write_temp_to_serial()
| UTF-8 | Python | false | false | 2,121 | py | 12 | motion_sensor_updated.py | 10 | 0.603017 | 0.578029 | 0 | 137 | 14.481752 | 62 |
nikolajjakubets/bypass_utility | 12,128,987,650,458 | e89e415f95a34b6f11edc1fcf3a410e3622d7f43 | b3e063f035f97f90d1305b148ea16fa37d320db2 | /src/exploit.py | c8b51f44723af9520de11460aad0c2eba36e3c20 | [
"MIT"
] | permissive | https://github.com/nikolajjakubets/bypass_utility | 31f9b36f5ee18a16fb179857424ed09356ad6ed5 | 08f860671813951934900031a358598bc032f162 | refs/heads/master | 2023-02-21T11:02:02.976401 | 2021-01-23T17:18:13 | 2021-01-23T17:28:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from src.common import to_bytes
import usb
def exploit(device, watchdog_address, var_0, var_1, payload):
addr = watchdog_address + 0x50
device.write32(addr, [0xA1000]) # 0x00100A00
if var_0:
readl = var_0 + 0x4
device.read32(addr - var_0, readl // 4)
else:
cnt = 15
for i in range(cnt):
device.read32(addr - (cnt - i) * 4, cnt - i + 1)
device.echo(0xE0)
payload = payload.read()
while len(payload) % 4 != 0:
payload += to_bytes(0)
device.echo(len(payload), 4)
# clear 2 bytes
device.read(2)
if len(payload) >= 0xA00:
raise RuntimeError("payload too large")
device.write(payload)
# clear 4 bytes
device.read(4)
udev = usb.core.find(idVendor=0x0E8D, idProduct=0x3)
try:
# noinspection PyProtectedMember
udev._ctx.managed_claim_interface = lambda *args, **kwargs: None
except AttributeError as e:
raise RuntimeError("libusb is not installed for port {}".format(device.dev.port)) from e
try:
udev.ctrl_transfer(0xA1, 0, 0, var_1, 0)
except usb.core.USBError as e:
print(e)
pattern = device.read(4)
if pattern != to_bytes(0xA1A2A3A4, 4):
raise RuntimeError("received {} instead of expected pattern".format(pattern.hex()))
| UTF-8 | Python | false | false | 1,325 | py | 2 | exploit.py | 2 | 0.610566 | 0.561509 | 0 | 52 | 24.480769 | 96 |
azulnaturalbz/chillbout_django | 18,511,309,077,209 | e734bbaed0d71052d95718addd07f376260b2ca4 | 675b512308cea8b96c42a1ad27d373355fcf7ebb | /establishments/migrations/0003_establishment_creation_date.py | faa7388c6707986212db5986d05511df4741c85c | [] | no_license | https://github.com/azulnaturalbz/chillbout_django | 2e7d8a959fb9d44801995a7dc8bbd1e21e91d45e | 0948c291f146368bf6ab0f74e1b9383d6e2655f8 | refs/heads/master | 2018-09-10T23:48:35.423582 | 2018-09-04T03:15:33 | 2018-09-04T03:15:33 | 136,164,108 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.4 on 2018-05-02 12:39
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('establishments', '0002_auto_20180426_1458'),
]
operations = [
migrations.AddField(
model_name='establishment',
name='creation_date',
field=models.DateField(default=datetime.datetime(2018, 5, 2, 12, 39, 55, 10623, tzinfo=utc)),
),
]
| UTF-8 | Python | false | false | 514 | py | 62 | 0003_establishment_creation_date.py | 43 | 0.638132 | 0.544747 | 0 | 20 | 24.7 | 105 |
encse/adventofcode-2015-python | 8,581,344,699,049 | 1b9496792bf7e45582e9ac76e97dba2a821e9eb2 | 47e082011ce03c55f3e92ef172233ea7bcc7c9a8 | /day20/solution.py | 81ed5c26f2a46e68af4c87f97e8b862b1608d099 | [] | no_license | https://github.com/encse/adventofcode-2015-python | 1138ade1e46ee5b37762054e7fa67d8c27e49e2b | 3e222b682430ae5570626ea9acf2ffe40f1e6264 | refs/heads/master | 2021-06-09T05:37:04.129547 | 2016-12-28T13:33:39 | 2016-12-28T13:33:39 | 47,879,224 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | dsum1 = [1] * 3600000
dsum2 = [1] * 3600000
i=2
res1 = None
res2 = None
while True:
d = i
while d < len(dsum1):
dsum1[d] += i
if d < i*50:
dsum2[d] += i
d += i
if not res1 and dsum1[i]*10 >= 36000000:
res1 = i
if not res2 and dsum2[i]*11 >= 36000000:
res2 = i
if res1 and res2:
break
i+=1
print res1
print res2 | UTF-8 | Python | false | false | 355 | py | 26 | solution.py | 26 | 0.549296 | 0.388732 | 0 | 23 | 13.521739 | 41 |
gvnsai/python_practice | 15,796,889,717,697 | 1f2d2b4d7e8c3362ed55c0260f8dbbf910b9cb08 | d6a31cfac7f6c899399c492f450ff832f2f2f4f5 | /assignment-3/numpy_food/numpy_food.py | 6c1910338645249bed5074d41727e486257d8189 | [] | no_license | https://github.com/gvnsai/python_practice | c2e51e9c73bc901375b1fb397b80a28802d85754 | dffe4b6bb4a991d2caa790415c1e5b041ed95ee3 | refs/heads/master | 2020-03-26T17:55:25.713217 | 2018-11-10T05:56:48 | 2018-11-10T05:56:48 | 145,187,300 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ''' # Numpy in Python #
Create a Python script (numpy_food.py) to perform the following tasks:
• Query the database and retrieve data to calculate the average violations for every month
between July 2015 and December 2017 for each postcode.
• Use MatPlotLib to plot the follow data over time:
o The violations per month for the postcode(s) with the highest total violations o The
violations per month for the postcode(s) with the greatest variance (difference) between
the lowest and highest number of violations for all months.
o
The average violations per month for ALL of California (all postcodes combined) o The
violations per month for all McDonalds and Burger Kings. This will require a new query as
it is not grouped by postal code.'''
import pandas as pd
import sqlite3
import matplotlib.pyplot as plt
pdlist=[]
pd1list=[]
mclist=[]
burlist=[]
try:
conn = sqlite3.connect('food_violations.db')
print ("Database co0nnected successfully");
#selecting distinct bussiness with atlest one violation sql query
sql = """SELECT \
VIOLATIONS.serial_number as v_code, \
INSPECTIONS.activity_date as date,\
INSPECTIONS.facility_zip as postcode\
FROM INSPECTIONS\
INNER JOIN VIOLATIONS ON INSPECTIONS.serial_number=VIOLATIONS.serial_number WHERE INSPECTIONS.activity_date BETWEEN "2015-07-01" AND "2017-12-01";"""
#select inspections.activity_date,violations.violation_description from inspections,violations where inspections.serial_number=violations.serial_number WHERE INSPECTIONS.activity_date BETWEEN "2015-07-01" AND "2017-12-01";
cursor = conn.execute(sql)
#appending to a list inside the for
for row in cursor:
templist=[]
templist.append(row[0])
templist.append(row[1])
templist.append(row[2])
pdlist.append(templist)
print ("Operation done successfully");
sql_mc = """ SELECT facility_name, violations.serial_number as v_code, inspections.activity_date as date FROM inspections INNER JOIN violations ON inspections.serial_number=violations.serial_number WHERE inspections.facility_name like '%McDonalds%' OR '%BURGER KING%';"""
cursor = conn.execute(sql_mc)
#appending to a list inside the for
for row in cursor:
templist=[]
templist.append(row[0])
templist.append(row[1])
templist.append(row[2])
mclist.append(templist)
print ("mcd Operation done successfully");
sql_bur = """ SELECT facility_name, violations.serial_number as v_code, inspections.activity_date as date FROM inspections INNER JOIN violations ON inspections.serial_number=violations.serial_number WHERE inspections.facility_name like '%BURGER KING%';"""
cursor = conn.execute(sql_bur)
#appending to a list inside the for
for row in cursor:
templist=[]
templist.append(row[0])
templist.append(row[1])
templist.append(row[2])
burlist.append(templist)
print ("burger Operation done successfully");
except Exception as e:
print(e)
clos=["v_code","date","postcodes"]
df=pd.DataFrame(pdlist,columns=clos)
df3=pd.DataFrame()
df4=pd.DataFrame()
y1=pd.DataFrame()
df['date'] = pd.to_datetime(df['date'])
df2=df.groupby(['v_code', 'date']).size().reset_index(name='counts')
df3['avg_counts']=df2.groupby(df['date'].dt.strftime('%B'))['counts'].mean().sort_values()
df3['month']=df3.index
#avg on month bases
print(df3.head())
print(df.head())
#df to plot highest violations
df4=df.groupby(['postcodes','date']).size().reset_index(name='post_counts')
df4['count_max'] = df4.groupby(df4['date'].dt.strftime('%B'))['post_counts'].transform(max)
df4['count_min'] = df4.groupby(df4['date'].dt.strftime('%B'))['post_counts'].transform(min)
df4['diff_count'] = df4['count_max']-df4['count_min']
# bk dfs
mcclos=["name","v_code","date"]
mc=pd.DataFrame(mclist,columns=mcclos)
print(mc.head())
# bk dfs
bkclos=["name","v_code","date"]
bkdf=pd.DataFrame(burlist,columns=bkclos)
print(bkdf.head())
framelist=[mc,bkdf]
mcbkdf = pd.concat(framelist)
print(mcbkdf.head(30))
mcbkdf=mcbkdf.groupby(['name', 'date']).size().reset_index(name='counts')
#print(mcbkdf.head())
y1['avg_counts']=mcbkdf.groupby(df['date'].dt.strftime('%B'))['counts'].mean()
print(type(y1))
y1['month']=y1.index
print(y1.head())
#graphs
x=df4['date'].dt.strftime('%B').head(30)
y=df4['diff_count'].head(30)
plt.bar(x,y,color = "red" )
plt.title("violations difference")
plt.xlabel("dates")
plt.ylabel("differences of max and min postcodes")
plt.legend()
plt.show()
x2=df4['date'].dt.strftime('%B')
y3=df3['avg_counts']=df2.groupby(df['date'].dt.strftime('%B'))['counts'].mean().sort_values()
plt.plot(y3)
plt.title("averages")
plt.xlabel("dates")
plt.ylabel("monthely avreage violations")
plt.show()
y2=mcbkdf.groupby(df['date'].dt.strftime('%B'))['counts'].mean()
plt.plot(y2)
plt.title("McD and BK average per month")
plt.xlabel("months")
plt.ylabel("averages per month")
plt.show()
y4=df4['count_max']
plt.plot(y4)
plt.title("max number of violation based on postcodes")
plt.xlabel("month")
plt.ylabel("max violations")
plt.show()
| UTF-8 | Python | false | false | 5,075 | py | 25 | numpy_food.py | 20 | 0.714455 | 0.695524 | 0 | 153 | 32.124183 | 291 |
rachittoshniwal/opencv-projects | 10,995,116,320,360 | 186105fc3745345bae18be60c2a876fc977ca5a1 | 00102adf23cfec94df86bab5663f4cbc7a5dc2a9 | /face_blurring_image_haar.py | 809362b651ec8dcb1308b1b68ee536110665ca00 | [] | no_license | https://github.com/rachittoshniwal/opencv-projects | 10236d5107c3e53fe8a2fa72a1ab01e02d548995 | 7c1378ba719bc79f17668b40bc86596aaf0c62fa | refs/heads/master | 2023-06-25T10:49:44.040554 | 2021-07-21T16:09:52 | 2021-07-21T16:09:52 | 386,977,089 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2 as cv
haar_face = cv.CascadeClassifier('haarcascade_frontalface_default.xml')
img = cv.imread('./test images/friends1.jpg')
img = cv.resize(img, (640,480))
cv.imshow("original image", img)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
face_rect = haar_face.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5)
for face in face_rect:
(x, y, w, h) = face
face_roi = img[y:y+h, x:x+w]
face_roi = cv.GaussianBlur(face_roi, (29,29), 15)
img[y:y+h, x:x+w, :] = face_roi
cv.imshow("blurred image", img)
cv.waitKey(0) | UTF-8 | Python | false | false | 545 | py | 4 | face_blurring_image_haar.py | 2 | 0.67156 | 0.636697 | 0 | 19 | 27.736842 | 77 |
edublancas/sklearn-evaluation | 11,433,202,956,858 | c9c7e8c6cfbeb849ad522ec3b86e0a845a98e9e6 | 4b2d797b26bc919b91ce4b91479237c1ffe417a0 | /src/sklearn_evaluation/plot/_matrix.py | cb6c27e01268c33a464f66bbaf7f6027a6609b3d | [
"MIT"
] | permissive | https://github.com/edublancas/sklearn-evaluation | c0c4a0948889dab6b8987775b82672dfc5fb5787 | 91b547e7705c4096f0d7ebee5aaf7e34af795dd3 | refs/heads/master | 2023-05-28T14:46:32.627880 | 2023-01-13T21:57:34 | 2023-01-13T21:57:34 | 589,342,851 | 3 | 0 | MIT | true | 2023-01-15T21:18:53 | 2023-01-15T21:18:52 | 2023-01-15T08:53:00 | 2023-01-15T17:14:45 | 13,596 | 0 | 0 | 0 | null | false | false | import numpy as np
from matplotlib.tri import Triangulation
from sklearn_evaluation.util import default_heatmap
def add(first, second, ax, invert_axis=False, max_=None):
# Adapted from: https://stackoverflow.com/a/63531813/709975
# TODO: validate first and second have the same shape
M, N = first.shape if not invert_axis else first.shape[::-1]
x = np.arange(M + 1)
y = np.arange(N + 1)
xs, ys = np.meshgrid(x, y)
zs = (xs * ys) % 10
zs = zs[:-1, :-1].ravel()
if max_ is None:
max_ = np.max([first.max(), second.max()])
triangles1 = [
(i + j * (M + 1), i + 1 + j * (M + 1), i + (j + 1) * (M + 1))
for j in range(N)
for i in range(M)
]
triangles2 = [
(i + 1 + j * (M + 1), i + 1 + (j + 1) * (M + 1), i + (j + 1) * (M + 1))
for j in range(N)
for i in range(M)
]
triang1 = Triangulation(xs.ravel() - 0.5, ys.ravel() - 0.5, triangles1)
triang2 = Triangulation(xs.ravel() - 0.5, ys.ravel() - 0.5, triangles2)
cmap = default_heatmap()
ax.tripcolor(triang1, first.ravel(), cmap=cmap, vmax=max_)
ax.tripcolor(triang2, second.ravel(), cmap=cmap, vmax=max_)
ax.set_xlim(x[0] - 0.5, x[-1] - 0.5)
ax.set_ylim(y[-1] - 0.5, y[0] - 0.5)
for pad, arr in ((-1 / 5, first), (1 / 5, second)):
for (y, x), v in np.ndenumerate(arr):
try:
label = "{:.2}".format(v)
except Exception:
label = v
ax.text(
x + pad,
y + pad,
label,
horizontalalignment="center",
verticalalignment="center",
)
| UTF-8 | Python | false | false | 1,686 | py | 158 | _matrix.py | 109 | 0.502966 | 0.46382 | 0 | 57 | 28.578947 | 79 |
Lorry1123/logging | 10,075,993,290,554 | 786c9052b1314d14b863d53f126b4bfdb24fdfc2 | 626d8d39c4864b8086b8aed3ecfed62ed4c52a7e | /logging_demo.py | ac2f977975d790f6e3c6b2873a5677307747905c | [] | no_license | https://github.com/Lorry1123/logging | 6f9424e8394e4916f2ea2ba43b4d981dcfe2365c | 2a26e1d7b0a53c3fad51bedcc2110dcbc323e7f2 | refs/heads/master | 2020-04-08T19:07:30.904086 | 2019-03-29T02:36:51 | 2019-03-29T02:36:51 | 159,641,002 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf8
import sys
from exceptions import Exception
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
class Logger():
def __init__(self, name, propagate=True):
self.name = name
self.handlers = []
self.level = NOTSET
self.parent = None
self.propagate = propagate
def _log(self, level, msg, args):
record = LogRecord(self.name, level, msg, args)
self.callHandlers(record)
def callHandlers(self, record):
c = self
found = 0
while c:
for handler in c.handlers:
found += 1
if record.levelno >= handler.level:
handler.emit(record)
if not c.propagate:
c = None
else:
c = c.parent
if found == 0:
raise Exception('No handlers could be found for logger "%s"' % self.name)
def addHandler(self, handler):
if not isinstance(handler, BaseHandler):
raise Exception('addHandler only receive a instance of BaseHandler')
self.handlers.append(handler)
def setLevel(self, level):
self.level = level
def isEnabledFor(self, level):
return level >= self.level
def debug(self, msg, *args):
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args)
def info(self, msg, *args):
if self.isEnabledFor(INFO):
self._log(INFO, msg, args)
def warning(self, msg, *args):
if self.isEnabledFor(WARN):
self._log(WARN, msg, args)
warn = warning
def error(self, msg, *args):
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args)
class BaseHandler():
def __init__(self):
self.formatter = Formatter('%(message)s')
self.level = NOTSET
def emit(self, record):
pass
def setFormatter(self, formatter):
self.formatter = formatter
def setLevel(self, level):
self.level = level
class MyStreamHandler(BaseHandler):
def __init__(self):
BaseHandler.__init__(self)
self.stream = sys.stdout
def emit(self, record):
msg = self.formatter.format(record)
self.stream.write('%s\n' % msg)
class LogRecord():
def __init__(self, name, level, msg, args):
self.name = name
self.level = _levelNames[level]
self.levelno = level
self.msg = msg
self.args = args
def getMessage(self):
return self.msg % self.args
class Formatter():
def __init__(self, fmt):
self.fmt = fmt
def format(self, record):
record.message = record.getMessage()
ret = self.fmt % record.__dict__
return ret
# test code
logger = Logger('my_logger')
sh = MyStreamHandler()
sh.setFormatter(Formatter('[%(level)s][%(message)s]'))
logger.addHandler(sh)
# logger.addHandler(MyStreamHandler())
# logger.setLevel(WARN)
# logger.info('hello world')
# logger.warn('hello %s', 'lorry')
# sh.setLevel(ERROR)
# logger.info('hello info')
# logger.warn('hello warning')
parent = Logger('parent logger')
parent_hdl = MyStreamHandler()
parent_hdl.setFormatter(Formatter('[PARENT][%(level)s][%(message)s]'))
parent.addHandler(parent_hdl)
# 父节点
logger.parent = parent
logger.info('hello parent')
logger.propagate = False
logger.info('hello parent 2')
| UTF-8 | Python | false | false | 3,738 | py | 4 | logging_demo.py | 2 | 0.587085 | 0.582797 | 0 | 167 | 21.347305 | 85 |
ii0/algorithms-6 | 7,859,790,181,301 | 9a1317a89a2eb0c8a2e2a76bb53f55d370f43457 | 1c7b5b866b505b7b8c47dce504c5bd27a34d5992 | /Leetcode/BinarySearch/69_Sqrt(x).py | 64813d0bf503b1347e64f80a499a7d92942f40b7 | [] | no_license | https://github.com/ii0/algorithms-6 | 2dbcb3df504810ea52b41e5129b334f62136d70a | 3eddc77d2f3dafffd177f2a9ee28e9850da2f020 | refs/heads/master | 2022-04-25T23:17:53.332297 | 2019-09-19T14:52:04 | 2019-09-19T14:52:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
author: buppter
datetime: 2019/8/8 12:45
题目描述:
实现 int sqrt(int x) 函数
Implement int sqrt(int x).
Compute and return the square root of x, where x is guaranteed to be a non-negative integer.
Since the return type is an integer, the decimal digits are truncated and only the
integer part of the result is returned.
示例:
Input: 4
Output: 2
Input: 8
Output: 2
Explanation: The square root of 8 is 2.82842..., and since
the decimal part is truncated, 2 is returned.
解题思路:
二叉查找
"""
class Solution:
def mySqrt(self, x: int) -> int:
if x == 0 or x == 1:
return x
l = 0
r = x
while l <= r:
mid = (l + r) // 2
if mid * mid <= x < (mid + 1) * (mid + 1):
return mid
elif x < mid * mid:
r = mid
else:
l = mid
| UTF-8 | Python | false | false | 907 | py | 100 | 69_Sqrt(x).py | 98 | 0.544509 | 0.512139 | 0 | 43 | 19.116279 | 92 |
murrayrm/BioCRNPyler | 11,493,332,488,539 | a28076cc7ca78b0a63d58bdb0395a40ebb14ec54 | 5e6a21328057f91d489319533e1927b8107b9e0c | /biocrnpyler/dcas9.py | 980a4ab55d7178053a7eb4c642994dee9e5cb524 | [
"BSD-3-Clause"
] | permissive | https://github.com/murrayrm/BioCRNPyler | d84437326742a04ac508a7e068c19a8c8816d7d7 | 2e7d4c521b1ebdf7cff6867b25cbee014e0ee1a3 | refs/heads/master | 2020-07-05T10:12:56.382470 | 2020-04-23T20:03:01 | 2020-04-23T20:03:01 | 202,620,151 | 1 | 0 | BSD-3-Clause | true | 2019-08-15T22:35:16 | 2019-08-15T22:35:16 | 2019-08-14T16:57:46 | 2019-08-14T10:32:31 | 5,189 | 0 | 0 | 0 | null | false | false | # Copyright (c) 2019, Build-A-Cell. All rights reserved.
# See LICENSE file in the project root directory for details.
from .component import Component, RNA
from .mechanism import Reversible_Bimolecular_Binding
from .chemical_reaction_network import Species
class guideRNA(RNA):
def __init__(self, guide_name, dCas9 = "dCas9", **keywords):
if isinstance(dCas9, Species):
self.dCas = dCas9
elif isinstance(dCas9, str):
self.dCas = Species(dCas9, material_type ="protein")
elif isinstance(dCas9, Component) and dCas9.get_species()!= None:
self.dCas = dCas9.get_species()
else:
raise ValueError("dCas9 parameter must be a "
"chemical_reaction_network.species, Component "
"with get_species(), or a string")
self.default_mechanisms = {
"dCas9_binding" :
Reversible_Bimolecular_Binding(name = "dCas9_binding",
mechanism_type = "bimolecular binding")
}
RNA.__init__(self, name = guide_name, **keywords)
self.gRNA = self.get_species()
def get_dCasComplex(self):
binding_species = \
self.mechanisms['dCas9_binding'].update_species(self.gRNA, self.dCas)
if len(binding_species) > 1:
raise ValueError("dCas9_binding mechanisms "
f"{self.mechanisms['dCas9_binding'].name} returned "
"multiple complexes. Unclear which is active." )
else:
return binding_species[0]
def update_species(self):
species = [self.gRNA, self.dCas]
species += self.mechanisms['dCas9_binding'].update_species(self.gRNA,
self.dCas)
return species
def update_reactions(self):
ku = self.get_parameter("ku", part_id = self.gRNA.name,
mechanism = self.mechanisms['dCas9_binding'])
kb = self.get_parameter("kb", part_id = self.gRNA.name,
mechanism = self.mechanisms['dCas9_binding'])
rxns = self.mechanisms['dCas9_binding'].update_reactions(self.gRNA,
self.dCas, kb = kb, ku = ku)
return rxns
| UTF-8 | Python | false | false | 2,369 | py | 29 | dcas9.py | 20 | 0.552976 | 0.542423 | 0 | 56 | 41.303571 | 80 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.