repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
JulseJiang/DrugKBPrototype | 3,032,246,933,707 | 699b403b2d1b2d7deae4507a01013afe8739cb95 | 3b9194499503f9e06dbebe113c53afad0b572ab2 | /Visualization/functionhelp.py | 2c17b58cfc37b4fa09a344204cc4c1932e90d348 | []
| no_license | https://github.com/JulseJiang/DrugKBPrototype | 0bffa42b2913996d22e40942d24c4da0feb76f33 | 34567630fe8febb1ea6827541fd3d92f42935ecc | refs/heads/main | 2023-03-19T17:57:14.822743 | 2021-03-18T11:54:22 | 2021-03-18T11:54:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- encoding: utf-8 -*-
#-------------------------------------------------------------------------------
# @file: function
# @Author: GuoSijia
# @Purpose:
# @Created: 2019-04-21
# @update: 2019-04-21 9:39
# @Software: PyCharm
#-------------------------------------------------------------------------------
import pymongo
import numpy as np
import math
class DataStorage:
def __init__(self, collection_name="", ip_adress='127.0.0.1', port=27017, user="tanxian123", password="123456",
db_name="Denovo"):
self.name = collection_name
self.path = self.__login(ip_adress, port, user, password, db_name)
def __login(self, ip_adress, port, user, password, db_name):
client = pymongo.MongoClient("127.0.0.1", 27017)
db = client['Denovo']
# db.authenticate("tanxian123", "123456")
collection = client['Denovo'][self.name]
return collection
def Storage(self, dic):
return self.path.insert(dic)
def FindByID(self, ID):
return self.path.find_one({'ENTREZ_ID': ID})
def find_one_by_one_condition(self, db_field, value):
return self.path.find_one({db_field: value})
def find_count_by_one_condition(self, db_field, value):
result_list = []
for item in self.path.find({db_field: value}):
result_list.append(item)
return result_list
class PPIPointClass(object):
def __init__(self):
self.ds_gene = DataStorage('Genes')
# self.ds_protein_exp = DataStorage('ProteinExpress_Update')
# self.ds_gene_express = DataStorage('Gtex_Gene_AllTissue')
def get_mutation_count(self, id):
'''
判断目标基因携带新发突变个数
:param id: 基因ENTREZ_ID
:return: 该基因携带新发突变个数
'''
gene_inforamtion = self.ds_gene.FindByID(id)
if gene_inforamtion == None:
return 0
else:
return gene_inforamtion.get('MutationCount')
def get_protein_brain_exp_flag(self, id):
'''
判断目标基因对应的蛋白在脑组织中是否有表达
:param id: 基因ENTREZ_ID
:return: 蛋白层面是否在脑中有表达
'''
gene_inforamtion = self.ds_gene.FindByID(id)
if gene_inforamtion == None:
return False
else:
return gene_inforamtion.get('ProteinBrainExpress')
def get_gene_brain_exp_flag(self, id):
'''
判断目标基因在脑组织中是否有表达
:param id: 基因ENTREZ_ID
:return: 基因层面是否在脑中有表达
'''
gene_inforamtion = self.ds_gene.FindByID(id)
if gene_inforamtion == None:
return False
else:
return gene_inforamtion.get('GeneBrainExpress')
# if __name__ == '__main__':
# node_id='126961'
# data_nodes = {'data': {}}
# ppipointclass=PPIPointClass()
# print(ppipointclass.get_mutation_count(node_id))
# print(ppipointclass.get_protein_brain_exp_flag(node_id))
# print(ppipointclass.get_gene_brain_exp_flag(node_id))
# if ((ppipointclass.get_mutation_count(node_id) != 0) and (
# ppipointclass.get_protein_brain_exp_flag(node_id) == False) and (
# ppipointclass.get_gene_brain_exp_flag(node_id) == False)):
# data_nodes['data']['color'] = '#FE0000' # 红色
# data_nodes['data']['type'] = 'circle'
# elif ((ppipointclass.get_mutation_count(node_id) == 0) and (
# ppipointclass.get_protein_brain_exp_flag(node_id) == True) and (
# ppipointclass.get_gene_brain_exp_flag(node_id) == False)):
# data_nodes['data']['color'] = '#000081' # 蓝色
# data_nodes['data']['type'] = 'circle'
# elif ((ppipointclass.get_mutation_count(node_id) == 0) and (
# ppipointclass.get_protein_brain_exp_flag(node_id) == False) and (
# ppipointclass.get_gene_brain_exp_flag(node_id) == True)):
# data_nodes['data']['color'] = '#039735' # 绿色
# data_nodes['data']['type'] = 'circle'
# elif ((ppipointclass.get_mutation_count(node_id) != 0) and (
# ppipointclass.get_protein_brain_exp_flag(node_id) == True) and (
# ppipointclass.get_gene_brain_exp_flag(node_id) == False)):
# data_nodes['data']['color'] = '#D60093'
# data_nodes['data']['type'] = 'triangle'
# elif ((ppipointclass.get_mutation_count(node_id) != 0) and (
# ppipointclass.get_protein_brain_exp_flag(node_id) == False) and (
# ppipointclass.get_gene_brain_exp_flag(node_id) == True)):
# data_nodes['data']['color'] = '#FFFE35'
# data_nodes['data']['type'] = 'rectangle'
# elif ((ppipointclass.get_mutation_count(node_id) == 0) and (
# ppipointclass.get_gene_brain_exp_flag(node_id) == True) and (
# ppipointclass.get_protein_brain_exp_flag(node_id) == True)):
# data_nodes['data']['color'] = '#3497CE'
# data_nodes['data']['type'] = 'barrel'
# elif ((ppipointclass.get_mutation_count(node_id) != 0) and (
# ppipointclass.get_gene_brain_exp_flag(node_id) == True) and (
# ppipointclass.get_protein_brain_exp_flag(node_id) == True)):
# data_nodes['data']['type'] = 'diamond'
# data_nodes['data']['color'] = '#000000'
# else:
# data_nodes['data']['type'] = 'circle'
# data_nodes['data']['color'] = '#669999'
# print(data_nodes) | UTF-8 | Python | false | false | 4,950 | py | 67 | functionhelp.py | 54 | 0.631889 | 0.608119 | 0 | 135 | 34.222222 | 112 |
pranavgarg1506/Machine_Learning | 9,156,870,305,944 | 2f1d912d53450d23d7cfaa7a80ea93f7a7b68c63 | 9a92c117403e9b8ed6d0ab4d25ece315288fdfe1 | /passwordGenerator/passwordgenerate.py | 94b3ed859e53fcace9f280b53a8a4da21b93f48b | []
| no_license | https://github.com/pranavgarg1506/Machine_Learning | b459c1eb0fecf91f958ab349a60d653e8dad51a9 | 865906d6d9edf76657f71cc8ae275ca1e27ae93a | refs/heads/master | 2022-04-11T14:49:28.227047 | 2020-03-31T15:35:12 | 2020-03-31T15:35:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import datetime
Capital = "ABCDEFGIJKLMNOPQRSTUVWXYZ"
small = 'abcdefgijklmnopqrstuvwxyz'
number = '0123456789'
specialCharacters = '!@#$%^&*()'
digit = int(input())
print("digit ", digit)
password = ''
index = 0
# defining probabilities for all the sets
# p_Capital = 0
# p_small = 0
# p_number = 0
# p_character = 0
# calculating the currentSum on the basis of current Time
def choice1():
currentTimeStamp = datetime.datetime.now()
currentMonth = currentTimeStamp.month
currentYear = currentTimeStamp.year
currentDay = currentTimeStamp.day
currentMinute = currentTimeStamp.minute
currentHour = currentTimeStamp.hour
currentSecond = currentTimeStamp.second
currentMicroSecond = currentTimeStamp.microsecond
currentSum = currentYear + currentMonth + currentDay + currentHour + currentMinute + currentSecond + currentMicroSecond
print("currentSum ", currentSum)
return currentSum
def checkForDuplicates(currentSum, flag):
byMod = 0
if (flag == 0):
tempArray = Capital
byMod = len(Capital)
elif (flag == 1):
tempArray = small
byMod = len(small)
elif (flag == 2):
tempArray = number
byMod = len(number)
elif (flag == 3):
tempArray = specialCharacters
byMod = len(specialCharacters)
tempIndex = currentSum % byMod
flag = True
while (flag):
for i in password:
if i == tempArray[tempIndex % byMod]:
tempIndex = tempIndex + 1
flag = True
break
flag = False
return tempIndex
def choiceCapital(currentSum):
# index = currentSum % 26
index = checkForDuplicates(currentSum, 0)
print("value of index for Capital", index)
print("Capital index ", Capital[index])
return Capital[index]
def choiceSmall(currentSum):
# index = currentSum % 26
index = checkForDuplicates(currentSum, 1)
print("value of index for Small", index)
print("small index", small[index])
return small[index]
def choicenumber(currentSum):
# index = currentSum % 10
index_number = checkForDuplicates(currentSum, 2)
print("value of index for number", index_number)
print("number index", number[index_number])
return number[index_number]
def choiceCharacters(currentSum):
# index = currentSum % 10
index_character = checkForDuplicates(currentSum, 3)
print("value of index for characters", index_character)
print("character index", specialCharacters[index_character])
return specialCharacters[index_character]
for index in range(0, digit):
currentSum = choice1()
if currentSum % 4 == 0:
character = choiceCapital(currentSum)
password = password + character
elif currentSum % 4 == 1:
character = choiceSmall(currentSum)
password = password + character
elif currentSum % 4 == 2:
character = choicenumber(currentSum)
password = password + character
elif currentSum % 4 == 3:
character = choiceCharacters(currentSum)
password = password + character
else:
print("Error 404")
break
print("Generated Password", password)
| UTF-8 | Python | false | false | 3,210 | py | 6 | passwordgenerate.py | 4 | 0.664174 | 0.649533 | 0 | 116 | 26.672414 | 123 |
Chenjianqi1992/Movie_comment | 1,400,159,367,740 | 6889d13882024bab36cf319628e955794316e501 | e17b9b9978ba214294d752d170bb50caff6ec70a | /movie_comment/movie/migrations/0002_remove_movie_info_movie_published.py | cc5629c9c3ed9e2a5b6dde1d8de57e2c94e3c8ee | []
| no_license | https://github.com/Chenjianqi1992/Movie_comment | e0563ab215e456f1da8dd87ed473c988c1f6013f | 73900ef887fa38bc2a4fc770c39eef0e4ebcb4c3 | refs/heads/master | 2021-07-16T13:01:33.512789 | 2020-05-19T02:31:09 | 2020-05-19T02:31:09 | 144,814,257 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.7 on 2018-08-07 06:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('movie', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='movie_info',
name='movie_published',
),
]
| UTF-8 | Python | false | false | 345 | py | 13 | 0002_remove_movie_info_movie_published.py | 9 | 0.550725 | 0.495652 | 0 | 17 | 18.294118 | 47 |
buraksmg/e-itim | 14,465,449,897,259 | 7f190558ca228b7bbdb54b6480c79b67fd24754c | d9c7c6da6c55d6f77f9cbbb754ce88863195b871 | /egitim_1.py | 94953c4d0d8ac4425bc4f5503a046310d5b1fe67 | []
| no_license | https://github.com/buraksmg/e-itim | f2f89d3bf2764d3a5c7f3363c2b4daef74e12da7 | fc518478ccc2470c96db9a03c52fe2851e3fc0c4 | refs/heads/master | 2022-11-20T08:43:42.429429 | 2020-07-20T13:01:52 | 2020-07-20T13:01:52 | 280,889,960 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """test"""
print("Hello world")
print("merhaba doğum tarihinizi yazınız")
a = int(input())
print("yaşınız:"+str(2020-a))
| UTF-8 | Python | false | false | 134 | py | 7 | egitim_1.py | 7 | 0.640625 | 0.609375 | 0 | 6 | 19.333333 | 41 |
csm9493/denoising_gan | 19,061,064,862,038 | f5122de663339e52f2ad2185a9c17746ea863284 | 71b193a2bb083efd33406fb3b00b07f9f1469abd | /main_noisegan.py | f935b0fd315307a0906506d6e50fbe3aa6a3daa4 | []
| no_license | https://github.com/csm9493/denoising_gan | cbbb4d73e29ab8d1a7bc2a26914e6d0d65ed9b2d | 0a2dc2329035ae1f0a8fb044b8e6be4c2c306949 | refs/heads/master | 2020-04-25T20:09:57.178031 | 2019-03-27T00:31:47 | 2019-03-27T00:31:47 | 173,045,206 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from core.noisegan import NoiseGAN
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, in_features):
super(ResidualBlock, self).__init__()
conv_block = [ nn.ReflectionPad2d(1),
nn.Conv2d(in_features, in_features, 3),
nn.InstanceNorm2d(in_features),
nn.ReLU(inplace=True),
nn.ReflectionPad2d(1),
nn.Conv2d(in_features, in_features, 3),
nn.InstanceNorm2d(in_features) ]
self.conv_block = nn.Sequential(*conv_block)
def forward(self, x):
return x + self.conv_block(x)
class Generator(nn.Module):
def __init__(self, input_nc, output_nc, n_residual_blocks=9):
super(Generator, self).__init__()
# Initial convolution block
# model = [ nn.ReflectionPad2d(3),
# nn.Conv2d(input_nc, 64, 7),
# nn.InstanceNorm2d(64),
# nn.ReLU(inplace=True) ]
self.pad2d_1 = nn.ReflectionPad2d(3)
self.conv2d_1 = nn.Conv2d(input_nc, 64, 7)
self.norm2d_1 = nn.InstanceNorm2d(64)
self.relu_1 = nn.ReLU(inplace=True)
self.rv_embedding = nn.Linear(1, 64)
self.tanh = nn.Tanh()
model = []
# Downsampling
in_features = 64
out_features = in_features*2
for _ in range(2):
model += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.InstanceNorm2d(out_features),
nn.ReLU(inplace=True) ]
in_features = out_features
out_features = in_features*2
# Residual blocks
for _ in range(n_residual_blocks):
model += [ResidualBlock(in_features)]
# Upsampling
out_features = in_features//2
for _ in range(2):
model += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),
nn.InstanceNorm2d(out_features),
nn.ReLU(inplace=True) ]
in_features = out_features
out_features = in_features//2
# Output layer
model += [ nn.ReflectionPad2d(3),
nn.Conv2d(64, output_nc, 7),
nn.Tanh() ]
self.model = nn.Sequential(*model)
def forward(self, x, z):
out1 = self.pad2d_1(x)
out2 = self.conv2d_1(out1)
out1_1 = self.rv_embedding(z)
out1_2 = self.tanh(out1_1)
out1_3 = out1_2.view(-1,64,1,1)
out2_1 = out2*out1_3
out3 = self.norm2d_1((out2_1))
out4 = self.relu_1((out3))
return self.model(out4)
class Discriminator(torch.nn.Module):
def __init__(self, channels):
super().__init__()
# Filters [256, 512, 1024]
# Input_dim = channels (Cx64x64)
# Output_dim = 1
self.main_module = nn.Sequential(
# Omitting batch normalization in critic because our new penalized training objective (WGAN with gradient penalty) is no longer valid
# in this setting, since we penalize the norm of the critic's gradient with respect to each input independently and not the enitre batch.
# There is not good & fast implementation of layer normalization --> using per instance normalization nn.InstanceNorm2d()
# Image (Cx32x32)
nn.Conv2d(in_channels=channels, out_channels=256, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(256, affine=True),
nn.LeakyReLU(0.2, inplace=True),
# State (256x16x16)
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(512, affine=True),
nn.LeakyReLU(0.2, inplace=True),
# State (512x8x8)
nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(1024, affine=True),
nn.LeakyReLU(0.2, inplace=True))
# output of main module --> State (1024x4x4)
self.output = nn.Sequential(
# The output of D is no longer a probability, we do not apply sigmoid at the output of D.
nn.Conv2d(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0))
def forward(self, x):
x = self.main_module(x)
return self.output(x)
def feature_extraction(self, x):
# Use discriminator for feature extraction then flatten to vector of 16384
x = self.main_module(x)
return x.view(-1, 1024*4*4)
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal(m.weight.data, 1.0, 0.02)
torch.nn.init.constant(m.bias.data, 0.0)
input_nc = 1
output_nc = 1
lr_g = 2e-4 #learning rate for generator
lr_c = 5e-5 #learning rate for critic
ep = 20 #training epochs
decay_ep = 10 #learning rate decay
mbs = 32 #mini batch size
im_size = 100 #cropped image size
tr_data_name = 'gan_trdata_20500_patch_120x120.hdf5' #training data name
critic_iter = 5 #iterations for trainin critic
tr_type = 'only_zi' #training type 'only_zi', 'shuffle'
experiment_type = '_zi-zi_hat_mbs32_not_equal_loss_x0001_cycleloss_x5' #information about experiment (name for saving result file and weight)
sv_name = '190322_cycleGAN_wGAN_experiment_'+ tr_type + experiment_type
netG_A2B = Generator(input_nc, output_nc)
netG_B2A = Generator(output_nc, input_nc)
netD_B = Discriminator(input_nc)
netG_A2B.cuda()
netG_B2A.cuda()
netD_B.cuda()
netG_A2B.apply(weights_init_normal)
netG_B2A.apply(weights_init_normal)
netD_B.apply(weights_init_normal)
trainer = NoiseGAN(netG_A2B, netG_B2A, netD_B, save_name=sv_name, lr_g=lr_g, lr_critic=lr_c, epochs=ep, decay_epoch = decay_ep,
mini_batch_size=mbs, img_size=im_size, tr_data_name = tr_data_name, critic_iter = critic_iter,
input_nc=input_nc, output_nc = output_nc, train_type=tr_type)
| UTF-8 | Python | false | false | 6,326 | py | 5 | main_noisegan.py | 5 | 0.585836 | 0.543155 | 0 | 169 | 36.372781 | 149 |
N1te0wl1384/inzopr2020 | 12,773,232,784,532 | 7c817870f4cf59a571325a419d63e442ee3250b4 | c465c76e7bbbcc5c2a74dc2a1fd47d8def839d51 | /git_operations/pracownicy.py | 87b7b8d27ec520da52970c52b00810c619b15bb5 | []
| no_license | https://github.com/N1te0wl1384/inzopr2020 | 547f796662834be13ee7746b63a09f5959131313 | c25aa1bf401f114a9702b663bf288a36ac1eb5b5 | refs/heads/master | 2022-05-28T23:38:28.849880 | 2020-04-30T14:37:01 | 2020-04-30T14:37:01 | 259,095,822 | 0 | 0 | null | false | 2020-04-30T14:37:02 | 2020-04-26T17:48:34 | 2020-04-30T14:24:23 | 2020-04-30T14:37:01 | 16 | 0 | 0 | 0 | Python | false | false | class Pracownik:
def __init__(self, imie, pensja):
self.imie = imie
self.pensja = pensja
def obliczPensje(self):
b = self.pensja
c = float('%.2f'% (round(b*0.0976,2) + round(b*0.015,2) + round(b*0.0245,2)))
d = b - c
e = float('%.2f'% round(d*0.09, 2))
f = float('%.2f'%round(d*0.0775, 2))
g = 111.25
h = float('%.2f'%round(b - g - c))
i = float('%.2f'%(round(h*0.18,2) -46.33))
j = round(i - f)
k = b - c - e - j
return k
def skladkiPracodawcy(self):
b = self.pensja
c = float('%.2f'%(round(b*0.0976,2) + round(b*0.065,2) + round(b*0.0245,2) + round(b*0.0193,2) + round(b*0.001,2)))
return c
liczba_pracownikow = int(input())
pracownicy = [];
for i in range(liczba_pracownikow):
pracownik = input()
dane=[]
for dana in pracownik.split():
dane.append(dana)
pracownicy.append(dane)
lacznie = 0
for i in pracownicy:
pracownik_instancja = Pracownik(i[0], float(i[1]))
skladki_pracodawcy = pracownik_instancja.skladkiPracodawcy()
pensja = pracownik_instancja.pensja
laczny_koszt = skladki_pracodawcy + pensja
print(pracownik_instancja.imie, '%.2f' % pracownik_instancja.obliczPensje(), '%.2f' % skladki_pracodawcy, '%.2f' % laczny_koszt)
lacznie = lacznie + laczny_koszt
print('%.2f' % lacznie)
| UTF-8 | Python | false | false | 1,383 | py | 5 | pracownicy.py | 4 | 0.573391 | 0.514823 | 0 | 41 | 32.731707 | 132 |
meijida258/mediaSpider | 16,269,336,159,180 | c3dfbe1bf9a93c64aa15e7b3ab852440462ea30c | 54891a2c2ddd56de28c495309a52e98b36290cf0 | /crawl/mmjpg.py | 3caec44e10e4ad56f26a8056c7d8449ac3d672e8 | []
| no_license | https://github.com/meijida258/mediaSpider | 61287f5ca58650c6a9578afdbab764fe063c5b39 | 9a9ef40d1f3dbd46dff25fad4a8b87ee85e5e1fd | refs/heads/master | 2021-01-23T08:10:14.211074 | 2018-06-20T02:19:12 | 2018-06-20T02:19:12 | 86,479,178 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import asyncio, time, re, os, requests, json, random, datetime
from lxml import etree
from multiprocessing.dummy import Pool
from pymongo import MongoClient
from PIL import Image
from tool.ImgOut import iop
client = MongoClient('localhost', 27017)
db = client.MMjpg
collection1 = db.Image1
collection2 = db.Image2
collection3 = db.Image3
collection4 = db.Image4
set_path = 'C:/Users/Administrator/Desktop/图/fhx20161004'
out_path = 'C:/Users/Administrator/Desktop'
def random_output(output_path, source_collection):
st = time.time()
img_list = source_collection.find()
img_random = img_list[random.randint(0, img_list.count())]
img_set_name = img_random['set_name']
img_set_list = source_collection.find({'set_name':img_set_name})
if not os.path.exists(output_path + os.path.sep + img_set_name):
os.mkdir(output_path + os.path.sep + img_set_name)
for img_dict in img_set_list:
save_path = output_path + os.path.sep + img_dict['set_name'] + os.path.sep + img_dict['img_num'] + '.' + img_dict['img_type']
with open(save_path, 'wb') as img:
img.write(img_dict['img_data'])
img.close()
print('输出完毕,耗时%s' % str(time.time()-st))
# iop.random_output(out_path, collection4)
def insert_pic(i, save_collection = collection4):
img_name_list = os.listdir(set_path + os.path.sep + i)
for each_img in img_name_list:
if save_collection.find({'set_name': i, 'img_num': each_img.split('.')[0]}).count() == 0:
start_time = time.time()
with open(set_path + os.path.sep + i + os.path.sep + each_img, 'rb') as img:
img_data = img.read()
img.close()
insert_dict = {}
insert_dict['img_data'] = img_data
insert_dict['set_name'] = i
insert_dict['img_type'] = each_img.split('.')[-1]
insert_dict['img_num'] = each_img.split('.')[0]
save_collection.insert(insert_dict)
print('录入一张图片,来自%s的第%s张图片,耗时%s' % (insert_dict['set_name'], insert_dict['img_num'], str(time.time() - start_time)))
def up_photos():
collection = [collection1, collection2, collection3, collection4]
save_file = out_path + os.sep + 'photos'
for i in collection:
pic_list = i.find()
collection_count = 0
while True:
try:
save_pic_count = len(os.listdir(save_file))
save_path = save_file + os.sep + 'img_%s.' % str(save_pic_count + 1) + pic_list[collection_count]['img_type']
with open(save_path, 'wb') as pic:
pic.write(pic_list[collection_count]['img_data'])
pic.close()
collection_count += 2
except:
break
up_photos()
# exit()
# b = requests.get('http://imgs.aixifan.com/live/1493028073796/1493028073796.jpg').content
#
# fl = open('a.txt', 'wb')
# fl.write(b)
# fl.close()
# fl = open('a.txt', 'rb')
# with open('b.jpg', 'wb') as c:
# c.write(fl.read())
# c.close() | UTF-8 | Python | false | false | 3,072 | py | 122 | mmjpg.py | 93 | 0.597948 | 0.577763 | 0 | 78 | 37.75641 | 133 |
hi-noikiy/hall0 | 5,995,774,369,967 | c56c8fd31581724bab21bc7911e6e3126f917611 | 2b5dfacdb7389aefff64c67fac863e3f82d3723e | /source/tyframework/src/tyframework/_private_/dao/userprops_/usermedal.py | 95de216065f5bc50b36cdd1ed59226c47e7bc7a5 | []
| no_license | https://github.com/hi-noikiy/hall0 | 54ef76c715f7ac7fec4c9ca175817e12f60fbd6a | 21ea94c5b048bc611fb1557ac0b6e3ef4fdbbc09 | refs/heads/master | 2020-04-08T21:58:55.239106 | 2018-01-15T14:58:32 | 2018-01-15T14:58:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding=utf-8 -*-
from tyframework._private_.dao.userprops_.daobase import DaoBase
from tyframework._private_.dao.userprops_.daoconst import DaoConst
from tyframework._private_.dao.userprops_.decorator.structdataitem import load_struct_data, \
dump_struct_data
class UserMedal(DaoConst, DaoBase):
def _init_singleton_(self):
pass
def get_medal_by_id(self, uid, gameid, medalid, medal_cls):
'''
取得一个medalid对应内容, 将其内容实例化为medal_cls类的新实例
class类必须为使用 @struct_data_item 修饰的类
返回: 依据medal_cls的@struct_data_item的标记内容,返回@struct_data_item的实例或实例数组
'''
data = self.__ctx__.RedisGame.execute(uid, 'HGET', 'medal:' + str(gameid) + ':' + str(uid), medalid)
return load_struct_data(medal_cls, data)
def get_medal_by_id_list(self, uid, gameid, medalid_list, medal_cls):
'''
取得一个medalid_list集合中所有medalid对应的内容, 将所有内容实例化为medal_cls类的新实例
class类必须为使用 @struct_data_item 修饰的类
返回: 数组, 每一项对应的medal_cls的实例或数组
'''
datas = self.__ctx__.RedisGame.execute(uid, 'HMGET', 'medal:' + str(gameid) + ':' + str(uid), *medalid_list)
values = []
for data in datas:
obj = load_struct_data(medal_cls, data)
values.append(obj)
return values
def get_medal_by_id_dict(self, uid, gameid, medalid_cls_dict):
'''
medalid_cls_dict为一个集合,
key为medalid
value为itmeid所对应的class类
class类必须为使用 @struct_data_item 修饰的类
返回: dict集合, key为medalid, value为对应的类的实例
'''
medalids = medalid_cls_dict.keys()
datas = self.__ctx__.RedisGame.execute(uid, 'HMGET', 'medal:' + str(gameid) + ':' + str(uid), *medalids)
values = {}
for x in xrange(len(medalids)):
data = datas[x]
medalid = medalids[x]
medal_cls = medalid_cls_dict[medalid]
obj = load_struct_data(medal_cls, data)
values[medalid] = obj
return values
def update_medal_by_id(self, uid, gameid, medalid, medal_obj):
'''
更新一个medal的内容, medal_obj必须为使用@struct_data_item修饰过的类实例
'''
data = dump_struct_data(medal_obj)
self.__ctx__.RedisGame.execute(uid, 'HSET', 'medal:' + str(gameid) + ':' + str(uid), medalid, data)
def update_medal_by_id_list(self, uid, gameid, medalid_list, medal_value_list):
'''
更新一组medal的内容,
medalid_list 为medalid的列表集合
medal_value_list为medalid对应的数据集合
medal_value_list中的每一项,可以为单对象数据也可以为同一类的实例的list集合
数据项目必须为使用@struct_data_item修饰过的类实例
'''
datas = []
for x in xrange(len(medalid_list)):
datas.append(medalid_list[x])
datas.append(medal_value_list[x])
self.__ctx__.RedisGame.execute(uid, 'HMSET', 'medal:' + str(gameid) + ':' + str(uid), *datas)
def update_medal_by_id_dict(self, uid, gameid, medalid_ins_dict):
'''
更新一组medal的内容, medalid_ins_dict为一个dict,
key为medalid,value为medalid对应的数据值
value可以为单对象数据也可以为同一类的实例的list集合
数据项目必须为使用@struct_data_item修饰过的类实例
'''
datas = []
for k, v in medalid_ins_dict.items():
datas.append(k)
datas.append(dump_struct_data(v))
self.__ctx__.RedisGame.execute(uid, 'HMSET', 'medal:' + str(gameid) + ':' + str(uid), *datas)
def remove_medal_by_id(self, uid, gameid, medalid):
'''
删除一个medal
'''
self.__ctx__.RedisGame.execute(uid, 'HDEL', 'medal:' + str(gameid) + ':' + str(uid), medalid)
def remove_medal_by_id_list(self, uid, gameid, medalid_list):
'''
删除一组medal
'''
self.__ctx__.RedisGame.execute(uid, 'HDEL', 'medal:' + str(gameid) + ':' + str(uid), *medalid_list)
def insure_medal_ids(self, uid, gameid, all_medalid_list):
'''
删除所有不再all_medalid_list中的其他的所有ID项目, 通常为整理数据内容时使用
'''
allids = set()
for x in all_medalid_list:
allids.add(str(x))
delids = []
rids = self.__ctx__.RedisGame.execute(uid, 'HKEYS', 'medal:' + str(gameid) + ':' + str(uid))
for rid in rids:
if rid not in allids:
delids.append(rid)
if delids:
self.__ctx__.RedisGame.execute(uid, 'HDEL', 'medal:' + str(gameid) + ':' + str(uid), *delids)
| UTF-8 | Python | false | false | 4,966 | py | 1,079 | usermedal.py | 601 | 0.584984 | 0.584754 | 0 | 113 | 37.424779 | 116 |
PurpleMyst/porcupine | 18,219,251,289,688 | f017a806ba28bc62cc281deb3445d569032374c1 | 3c98132198637077b55d3b6630a7134f747e8e39 | /porcupine/plugins/autopep8.py | 5d24f6b133af09b5fdd842989959e709202cdeff | [
"MIT"
]
| permissive | https://github.com/PurpleMyst/porcupine | ebff59278fb426e8cba862ac6ef6f3ce7d4bd3a9 | 1ce4e15a5b2fed97efb889e56216c93c042fc7a8 | refs/heads/master | 2021-01-25T06:56:11.144326 | 2017-09-07T19:11:12 | 2017-09-07T19:11:12 | 93,628,416 | 0 | 1 | null | true | 2017-06-07T11:35:06 | 2017-06-07T11:35:05 | 2017-05-29T12:16:37 | 2017-06-07T10:00:48 | 774 | 0 | 0 | 0 | null | null | null | import platform
import subprocess
import porcupine
from porcupine import tabs, utils
def run_autopep8(code):
try:
import autopep8 # noqa
except ImportError:
# this command is wrong in some cases, but most of the time
# it's ok
if platform.system() == 'Windows':
command = "py -m pip install autopep8"
app = 'command prompt or PowerShell'
else:
command = "python3 -m pip install --user autopep8"
app = 'terminal'
utils.errordialog(
"Cannot find autopep8",
"Looks like autopep8 is not installed.\n" +
"You can install it by running this command on a %s:" % app,
command)
return None
# autopep8's main() does some weird signal stuff, so we'll run it in
# a subprocess just to make sure that the porcupine process is ok
command = [utils.python_executable, '-m', 'autopep8', '-']
process = subprocess.Popen(
command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, errors) = process.communicate(code.encode('utf-8'))
if process.returncode != 0:
utils.errordialog(
"Running autopep8 failed",
"autopep8 exited with status code %r." % process.returncode,
errors.decode('utf-8', errors='replace'))
return None
return output.decode('utf-8')
def on_click():
widget = porcupine.get_tab_manager().current_tab.textwidget
before = widget.get('1.0', 'end - 1 char')
after = run_autopep8(before)
if after is None:
# error
return
if before != after:
widget['autoseparators'] = False
widget.delete('1.0', 'end - 1 char')
widget.insert('1.0', after)
widget.edit_separator()
widget['autoseparators'] = True
def setup():
porcupine.add_action(on_click, "Tools/autopep8", tabtypes=[tabs.FileTab])
| UTF-8 | Python | false | false | 1,957 | py | 57 | autopep8.py | 44 | 0.60603 | 0.593255 | 0 | 63 | 30.063492 | 77 |
ahtouw/Projects | 9,680,856,317,183 | 0d3c8d4f4845c46313d5fc3e6f849dc877b5ea4f | 23019dc9f099efe5361d387817022130af59334b | /Comp Sci/Python/hw4/hw4-3.py | f7b06e5e5a91ccc4c724207580a34c8d0e47990e | []
| no_license | https://github.com/ahtouw/Projects | 950dc23b371fe0997b48c4475ad268aa0342fbca | 15d25449e3ddcfe204d2ac6546ccb9272f074928 | refs/heads/master | 2020-04-14T15:38:02.363349 | 2019-01-03T07:16:34 | 2019-01-03T07:16:34 | 163,932,996 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Homework 4 - Part 3 - Substitution
# William Ah Tou
# 3/12/17
# Mustafa Al-Lail
import sys
import re
inFile = sys.argv[1]
outFile = sys.argv[2]
def readFile(fileName):
with open(fileName,'r') as f:
lines = f.read().splitlines()
return lines
def writeFile(newLine):
appendStr = newLine + '\n'
with open(outFile,'a') as i:
i.write(appendStr)
def modify(testStr):
a = re.compile(r"(\("
r"\d\d\d"
r"\)"
r"\d\d\d"
r"\-"
r"\d\d\d\d)")
b = re.compile(r"(\("
r"\d\d\d"
r"\)\-"
r"\d\d\d"
r"\-"
r"\d\d\d\d)")
c = re.compile(r"(\("
r"\d\d\d"
r"\)\s"
r"\d\d\d"
r"\-"
r"\d\d\d\d)")
d = re.compile(r"("
r"\d\d\d"
r"\-"
r"\d\d\d"
r"\-"
r"\d\d\d\d)")
e = re.compile(r"(\("
r"\d\d\d"
r"\)"
r"\d\d\d"
r"\."
r"\d\d\d\d)")
f = re.compile(r"(\("
r"\d\d\d"
r"\)\."
r"\d\d\d"
r"\."
r"\d\d\d\d)")
g = re.compile(r"(\("
r"\d\d\d"
r"\)\s"
r"\d\d\d"
r"\."
r"\d\d\d\d)")
h = re.compile(r"("
r"\d\d\d"
r"\."
r"\d\d\d"
r"\."
r"\d\d\d\d)")
substit = testStr
if a.search(substit):
substit = testStr
if b.search(substit):
substit = re.sub(r"(\(\d\d\d\))-(\d\d\d)-(\d\d\d\d)",r"\1\2-\3",substit)
if c.search(substit):
substit = re.sub(r"(\(\d\d\d\))\s(\d\d\d)-(\d\d\d\d)",r"\1\2-\3",substit)
if d.search(substit):
substit = re.sub(r"(\d\d\d)-(\d\d\d)-(\d\d\d\d)", r"(\1)\2-\3", substit)
if e.search(substit):
substit = re.sub(r"(\(\d\d\d\))(\d\d\d).(\d\d\d\d)", r"\1\2-\3", substit)
if f.search(substit):
substit = re.sub(r"(\(\d\d\d\)).(\d\d\d).(\d\d\d\d)", r"\1\2-\3", substit)
if g.search(substit):
substit = re.sub(r"(\(\d\d\d\))\s(\d\d\d).(\d\d\d\d)", r"\1\2-\3", substit)
if h.search(substit):
substit = re.sub(r"(\d\d\d).(\d\d\d).(\d\d\d\d)", r"(\1)\2-\3", substit)
print(substit)
return(substit)
lines = readFile(inFile)
modLines = []
for i in lines:
modLines.append(modify(i))
for i in modLines:
writeFile(i) | UTF-8 | Python | false | false | 2,151 | py | 40 | hw4-3.py | 16 | 0.451883 | 0.437936 | 0 | 98 | 20.959184 | 77 |
darionyaphet/python.learning | 8,521,215,159,316 | fc035da22f55faa92bbe141491c880559f67884d | f319e49b4a4852539fca48fbe830f3871f9f5a2f | /pandas/start.py | 77a7d5af338e73b20cf02844f9ccfadc6d009e0d | []
| no_license | https://github.com/darionyaphet/python.learning | 75b4d8f7e863286f212f55a6b0478a68b11e48d9 | eec24569ef11c7c459a2a10aff9f6522edb1e352 | refs/heads/master | 2020-05-21T12:23:02.060758 | 2018-12-12T07:03:10 | 2018-12-12T07:03:10 | 36,119,713 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
10 Minutes to pandas:
http://pandas.pydata.org/pandas-docs/stable/10min.html
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Creating a Series by passing a list of values, letting pandas create a default integer index
s = pd.Series([1,3,5,np.nan,6,8])
'''
0 1
1 3
2 5
3 NaN
4 6
5 8
dtype: float64
'''
#Creating a DataFrame by passing a numpy array, with a datetime index and labeled columns
dates = pd.date_range('20130101', periods=6)
'''
DatetimeIndex(['2013-01-01', '2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05', '2013-01-06'],
dtype='datetime64[ns]', freq='D', tz=None)
'''
df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD'))
'''
A B C D
2013-01-01 0.469112 -0.282863 -1.509059 -1.135632
2013-01-02 1.212112 -0.173215 0.119209 -1.044236
2013-01-03 -0.861849 -2.104569 -0.494929 1.071804
2013-01-04 0.721555 -0.706771 -1.039575 0.271860
2013-01-05 -0.424972 0.567020 0.276232 -1.087401
2013-01-06 -0.673690 0.113648 -1.478427 0.524988
'''
#Creating a DataFrame by passing a dict of objects that can be converted to series-like
df2 = pd.DataFrame({ 'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
'E' : pd.Categorical(["test","train","test","train"]),
'F' : 'foo' })
'''
A B C D E F
0 1 2013-01-02 1 3 test foo
1 1 2013-01-02 1 3 train foo
2 1 2013-01-02 1 3 test foo
3 1 2013-01-02 1 3 train foo
'''
df2.dtypes
'''
A float64
B datetime64[ns]
C float32
D int32
E category
F object
dtype: object
'''
df.head()
'''
A B C D
2013-01-01 0.469112 -0.282863 -1.509059 -1.135632
2013-01-02 1.212112 -0.173215 0.119209 -1.044236
2013-01-03 -0.861849 -2.104569 -0.494929 1.071804
2013-01-04 0.721555 -0.706771 -1.039575 0.271860
2013-01-05 -0.424972 0.567020 0.276232 -1.087401
'''
df.tail(3)
'''
A B C D
2013-01-04 0.721555 -0.706771 -1.039575 0.271860
2013-01-05 -0.424972 0.567020 0.276232 -1.087401
2013-01-06 -0.673690 0.113648 -1.478427 0.524988
'''
| UTF-8 | Python | false | false | 2,443 | py | 53 | start.py | 50 | 0.561195 | 0.289808 | 0 | 84 | 28.047619 | 93 |
MarcoBurgos/ayeWedding | 17,093,969,866,911 | d18d3d8c1b5acc479b13e121eaffcefebc75b5e1 | 74315bccd4faef88b2ac33dcb1b26977121a4d0f | /ayeApp/models.py | 50d474e7bd40906ad5c44f90540cd1c2f8633429 | []
| no_license | https://github.com/MarcoBurgos/ayeWedding | b5aebe825272f8a9a1a32348515a7260b164bf8b | 16d231ae5e6bd623c66874ef150d7e2ea3d7eb1f | refs/heads/main | 2022-12-30T12:25:33.158855 | 2020-10-20T02:02:42 | 2020-10-20T02:02:42 | 305,221,217 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.forms import ModelForm
# Create your models here.
class Invitation(models.Model):
invitation_owner_name = models.CharField(max_length=90, verbose_name="Propietario de la invitación")
email = models.EmailField(max_length = 128, unique=True)
telephone = models.CharField(max_length=16)
guest_names = models.CharField(max_length=256, verbose_name="Nombres de invitados")
number_of_guests = models.IntegerField(verbose_name="Total de invitados")
guests_confirmed = models.CharField(max_length=256, null=True, blank=True, verbose_name="Nombres confirmados")
total_guests_confirmed = models.IntegerField(null=True, blank=True, verbose_name="Total de confirmados")
is_RSVP = models.BooleanField(null=True, blank=True, verbose_name="¿Confirmó?")
date_RSVP =models.DateField(null=True, blank=True, verbose_name="Fecha confirmación")
def __str__(self):
return str(self.guest_names)
class Meta:
verbose_name_plural = "Invitaciones"
| UTF-8 | Python | false | false | 1,029 | py | 13 | models.py | 8 | 0.736585 | 0.723902 | 0 | 20 | 50.25 | 114 |
SaurabhAgarwala/grapp | 13,503,377,206,205 | 3f9d28756c601b20f5534d1fcbc663f99a9e7dc5 | c7f47100965c15b4cb120610a4c8af33ab727c3d | /grievances/forms.py | bc2030b50bd712fcbf7d331d05b93d19f30c0210 | []
| no_license | https://github.com/SaurabhAgarwala/grapp | a60213f262115e6599b77a03cdd0c5b46678644a | d0d568add196308e6940c94735a1e980d9b23a38 | refs/heads/master | 2020-03-26T22:30:33.608655 | 2018-08-20T19:58:37 | 2018-08-20T19:58:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from . import models
class ComplainForm(forms.ModelForm):
class Meta:
model = models.Complain
fields = ['title', 'body']
class RespondForm(forms.ModelForm):
class Meta:
model = models.Complain
fields = ['status', 'comments'] | UTF-8 | Python | false | false | 291 | py | 16 | forms.py | 8 | 0.649485 | 0.649485 | 0 | 12 | 23.333333 | 39 |
xinsec/python-framework | 14,654,428,435,711 | d1338348b197822003edf7738f8c79af7532e8d5 | 4ebf8067f596707dc5336914bf0ea0a97d9fbcb7 | /tornadoapp/tornadoapp/websocket/client.py | ed4bc7ae825713f550c9436098dfec7305bcaf03 | []
| no_license | https://github.com/xinsec/python-framework | 0ade696c5467635a6fd38125f1b2467d58047184 | 2da2258896145456312342f471d91cec95ddfb24 | refs/heads/master | 2018-02-06T19:13:18.152713 | 2017-06-28T03:54:21 | 2017-06-28T03:54:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
pip install websocket-client
"""
from websocket import create_connection
if __name__ == '__main__':
ws_server = "ws://localhost:30001/websocket/"
ws = create_connection(ws_server)
# ws.send('{"name":"admin"}')
ws.send('{"message":{"reload":1},"destination":"admin"}')
print ws.recv()
ws.close() | UTF-8 | Python | false | false | 324 | py | 35 | client.py | 27 | 0.611111 | 0.592593 | 0 | 13 | 24 | 61 |
woobinda/mysite_v2 | 764,504,206,826 | bf4a14bc18cc7d923fcdcf6b3732ec332a1c7c94 | cad8d97ff00189c5bcc5f9d8278cb809ff044772 | /musicians/migrations/0027_auto_20160412_1443.py | 869dfa91f853e9d54658f82642d8f5cb68aa9120 | []
| no_license | https://github.com/woobinda/mysite_v2 | d1a405148ffd1bb87b254c9449f46dc50c73085f | 9a105e042c4ae1c204d031a4a2a9cd712ce0e831 | refs/heads/master | 2016-06-03T08:11:04.791096 | 2016-04-15T17:56:08 | 2016-04-15T17:56:08 | 51,135,993 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-04-12 11:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('musicians', '0026_auto_20160412_1407'),
]
operations = [
migrations.CreateModel(
name='MusicianInstrument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=100, unique=True, verbose_name='slug')),
],
),
migrations.RemoveField(
model_name='musician',
name='instrument',
),
migrations.RemoveField(
model_name='musician',
name='instrument_slug',
),
migrations.AlterField(
model_name='musician',
name='playing_style',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='musicians.MusicStyle', verbose_name='Playing style'),
),
]
| UTF-8 | Python | false | false | 1,203 | py | 81 | 0027_auto_20160412_1443.py | 46 | 0.58271 | 0.551122 | 0 | 37 | 31.513514 | 149 |
cyriltasse/FindCluster | 8,315,056,690,198 | 1e00c4aa44c79a38d75e139eb7c778e19dcbae5c | 593930a095ace43d083e850c6a155eea3824dfcf | /ClassLogDiffLikelihoodMachine.py | d938c2eb8cc3d2cfb3a80bd245f372c5d26fb48c | []
| no_license | https://github.com/cyriltasse/FindCluster | edc96280e0e5824ed2d607a5b4a4d62686c99559 | f8010c04b6802751669a81bfc63bb5a84777ca31 | refs/heads/master | 2023-01-28T12:26:54.549341 | 2020-05-27T14:34:03 | 2020-05-27T14:34:03 | 186,429,403 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import pylab
import ClassMassFunction
import ClassCatalogMachine
import ClassDisplayRGB
from DDFacet.Other import ClassTimeIt
from DDFacet.ToolsDir.ModToolBox import EstimateNpix
np.random.seed(1)
from DDFacet.Other import logger
log = logger.getLogger("ClassLikelihoodMachine")
from DDFacet.Other.AsyncProcessPool import APP, WorkerProcessError
import ClassInitGammaCube
from ClassAndersonDarling import *
from ClassShapiroWilk import *
from ClassEigenShapiroWilk import *
class ClassLikelihoodMachine():
def __init__(self,CM):
self.CM=CM
self.zParms=self.CM.zg_Pars
self.logMParms=self.CM.logM_Pars
self.logM_g=np.linspace(*self.logMParms)
self.MassFunction=ClassMassFunction.ClassMassFunction()
self.MassFunction.setSelectionFunction(self.CM)
self.NSlice=self.zParms[-1]-1
self.MAP=1
# APP.registerJobHandlers(self)
def ComputeIndexCube(self,NPix):
self.NPix=NPix
self.IndexCube=np.array([i*np.int64(NPix**2)+np.int64(self.CM.Cat_s.xCube*NPix)+np.int64(self.CM.Cat_s.yCube) for i in range(self.NSlice)]).flatten()
X,Y=self.CM.Cat_s.xCube,self.CM.Cat_s.yCube
# # X=np.int64(np.random.rand(X.size)*NPix)
# # Y=np.int64(np.random.rand(X.size)*NPix)
# # X.fill(NPix//2)
# # Y.fill(NPix//2)
self.MassFunction.GammaMachine.ThisMask.fill(0)
self.IndexCube_xy_Slice=(np.int64(X*NPix)+np.int64(Y)).flatten()
indy,indx=np.where(self.MassFunction.GammaMachine.ThisMask==0)
#self.IndexCube_Mask=np.array([i*(np.int64(indy).flatten()*NPix+np.int64(indx).flatten()) for i in range(self.NSlice)]).flatten()
self.IndexCube_Mask_Slice=(np.int64(indy).flatten()*NPix+np.int64(indx).flatten())
self.IndexCube_Mask=np.array([i*NPix**2+(np.int64(indy).flatten()*NPix+np.int64(indx).flatten()) for i in range(self.NSlice)]).flatten()
if self.MAP:
GM=self.MassFunction.GammaMachine
self.LCAD=[]
self.LCSW=[]
self.PowerMAP=1.#4.#2.5
# for iSlice in range(self.NSlice):
# CAD=ClassAndersonDarlingMachine()
# CAD.generatePA2(GM.L_NParms[iSlice],NTry=2000)
# self.LCAD.append(CAD)
# self.CSWFull=ClassShapiroWilk()
# self.CSWFull.Init(GM.NParms,NTry=2000)
self.CSW=ClassEigenShapiroWilk(GM.L_NParms)
# for iSlice in range(self.NSlice):
# log.print("======================================="%iSlice)
# log.print("Init ShapiroWilk for slice %i"%iSlice)
# CAD=ClassShapiroWilk()
# CAD.Init(GM.L_NParms[iSlice],NTry=2000)
# self.LCSW.append(CAD)
def measure_dlogPdg(self,g0,DoPlot=0):
g=g0.copy()
L0=self.logL(g,DoPlot=DoPlot)
dL_dg0=self.dlogLdg(g)
NN=g.size
dg=.001
NTest=100
# Parm_id=np.arange(NN)[::-1][0:NTest]
Parm_id=np.int64(np.random.rand(NTest)*g.size)
dL_dg1=np.zeros((Parm_id.size,),np.float64)
for i in range(Parm_id.size):
g1=g.copy()
g1[Parm_id[i]]+=dg
L1=self.logL(g1,DoPlot=DoPlot)
dL_dg1[i]=(L0-L1)/((g-g1)[Parm_id[i]])
if (L0-L1)==0:
print(" 0 diff for iParm = %i"%i)
pylab.figure("Jacob")
pylab.clf()
# pylab.plot(dL_dg0[Parm_id]/dL_dg1)
pylab.plot(dL_dg0[Parm_id],label="Computed")
pylab.plot(dL_dg1,ls="--",label="Measured")
pylab.legend()
pylab.draw()
pylab.show(block=False)
pylab.pause(0.1)
# #############################################
def measure_d2logPdg2(self,g0,DoPlot=0):
g=g0.copy()
NN=g.size
dg=1e-2
NTest=1
Parm_id=np.int64(np.random.rand(NTest)*g.size)
Parm_id=np.arange(NN)
dJdg0=self.d2logPdg2(g,Diag=True)
dJdg0_Full=self.d2logPdg2(g,
Diag=False,
ParmId=Parm_id)
#dJdg2=self.buildFulldJdg(g,ParmId=Parm_id)
pylab.figure("Hessian")
pylab.clf()
pylab.plot(dJdg0[Parm_id],label="CalcDiag",color="black")
pylab.plot(np.diag(dJdg0_Full)[Parm_id],label="np.diag(CalcFull)",ls="--",lw=2)
# #pylab.plot(np.diag(dJdg2)[Parm_id])
# pylab.draw()
# pylab.show(block=False)
# pylab.pause(0.1)
dLdg_0=self.dlogPdg(g)
dJdg1_row=np.zeros((Parm_id.size,dLdg_0.size),np.float64)
dJdg1=np.zeros((Parm_id.size,),np.float64)
for i in range(Parm_id.size):
print("%i/%i"%(i,Parm_id.size))
g1=g.copy()
g1[Parm_id[i]]+=dg
dLdg_1=self.dlogPdg(g1)
dJdg1[i]=(dLdg_0[Parm_id[i]]-dLdg_1[Parm_id[i]]) / ((g-g1)[Parm_id[i]])
dJdg1_row[i,:]=(dLdg_1-dLdg_0) / dg
# pylab.plot(dL_dg0[Parm_id]/dL_dg1)
pylab.plot(dJdg1,label="measure of diag",ls="-.",lw=2)
pylab.legend()
pylab.draw()
pylab.show(block=False)
pylab.pause(0.1)
#I0=np.log10(np.abs(dJdg0_Full))
#I1=np.log10(np.abs(dJdg1_row))
I0=(np.abs(dJdg0_Full))
I1=(np.abs(dJdg1_row))
I0[I0<=0]=1e-10
I1[I1<=0]=1e-10
I1=np.log10(I1)
I0=np.log10(I0)
v0=np.min([I0.min(),I1.min()])
v1=np.max([I0.max(),I1.max()])
#m=np.median(I0[I0>0])
#v0,v1=0,100*m
pylab.figure("Full Hessian")
pylab.clf()
ax=pylab.subplot(2,2,1)
pylab.imshow(I1,interpolation="nearest",vmin=v0,vmax=v1)
pylab.title("measured")
pylab.subplot(2,2,2,sharex=ax,sharey=ax)
pylab.imshow(I0,interpolation="nearest",vmin=v0,vmax=v1)
pylab.title("computed")
pylab.subplot(2,2,3,sharex=ax,sharey=ax)
pylab.imshow(I1-I0,interpolation="nearest",vmin=-1,vmax=1)
pylab.title("resid")
pylab.colorbar()
# ind=np.int64(np.random.rand(1000)*dJdg1_row.size)
# x=dJdg1_row.T.flatten()[ind]
# y=dJdg0_Full[Parm_id,:].T.flatten()[ind]
# #x=np.log10(np.abs(x))
# #y=np.log10(np.abs(y))
# #x=np.log10(np.abs(x))
# #y=np.log10(np.abs(y))
# pylab.plot(x,label="row measured",color="black")
# pylab.plot(y,ls="--",label="row computed")
# # pylab.plot(dJdg1_row.T,label="row measured",color="black")
# # pylab.plot(dJdg0_Full[Parm_id,:].T,ls="--",label="row computed")
# pylab.legend()
pylab.draw()
pylab.show(False)
pylab.pause(0.1)
# ################################################
def logP(self,g):
logL=self.logL(g)
#logL=0.
if self.MAP:
logL+=self.CSW.logP_x(g.flatten())
return logL
def dlogPdg(self,g):
NParms=self.MassFunction.GammaMachine.NParms
J=np.zeros((NParms,),np.float32)
J+=self.dlogLdg(g)
#J.fill(0)
if self.MAP:
J+=self.CSW.dlogPdx(g.flatten())
return J
def d2logPdg2(self,g0,Diag=True,ParmId=None):
if Diag:
H=self.d2logLdg2(g0)
#H.fill(0)
if self.MAP:
H[:]+=self.CSW.d2logPdx2_Diag(g0.flatten())
else:
# H=np.zeros((g0.size,g0.size),np.float64)
# dg=1e-3
# dLdg0=self.dlogLdg(g0)
# if ParmId is None:
# ParmId=np.arange(g0.size)
# for ii,i in enumerate(ParmId):
# print("%i/%i"%(ii,ParmId.size))
# g1=g0.copy()
# g1[i]+=dg
# dLdg1=self.dlogLdg(g1)
# H[i]=(dLdg1-dLdg0) / dg
H=self.d2logLdg2_Full(g0)
#H.fill(0)
if self.MAP:
H+=self.CSW.d2logPdx2_Full(g0)
return H
# ################################################
def logprob(self,g):
return np.float64(self.logP(g)),np.float64(self.dlogPdg(g))
def logL(self,g0,DoPlot=0):
GM=self.MassFunction.GammaMachine
g0=np.float64(g0)
GM.computeGammaCube(g0)
self.CellRad_0=self.MassFunction.GammaMachine.CellRad
self.CellRad_1=(.00001/3600)*np.pi/180
g=g0.reshape((-1,1))
L_SqrtCov=GM.L_SqrtCov
L_NParms=GM.L_NParms
Ns=self.CM.Cat_s.shape[0]
n_z=self.CM.DicoDATA["DicoSelFunc"]["n_z"]
n_zt=self.CM.Cat_s.n_zt
TypeSum=np.float64
#TypeSum=np.float32
# self.funcNormLog=np.log10
# self.fNormLog=np.log(10)
self.funcNormLog=np.log
self.fNormLog=1.
if DoPlot: GM.PlotGammaCube(Cube=GM.GammaCube,FigName="JacobCube")
Nx_1=np.zeros((Ns,),np.float64)
# Ax_1=np.zeros((Ns,),np.float32)
L_Ax_1_z=[]
for iSlice in range(self.NSlice):
Gamma_i=GM.GammaCube[iSlice].flat[self.IndexCube_xy_Slice]
Nx_1[:]+=n_z[iSlice]*Gamma_i*self.CellRad_1**2
Ax_1_z=n_zt[:,iSlice]*Gamma_i*self.CellRad_1**2
#Ax_1[:]+=Ax_1_z
L_Ax_1_z.append(Ax_1_z)
L=np.float64([0.])
# #######################
# SumNx_0=np.sum(TypeSum((n_z.reshape(-1,1,1)*self.CellRad_0**2)*GM.GammaCube))
SumNx_0=np.sum(TypeSum(n_z.reshape(-1,1,1)*GM.GammaCube).flat[self.IndexCube_Mask])*self.CellRad_0**2/self.fNormLog
L+=-SumNx_0
# #######################
SumNx_1=np.sum(TypeSum(Nx_1))
L+= SumNx_1
# #######################
self.Ax_1_z=np.array(L_Ax_1_z)
self.Ax_1=np.sum(TypeSum(self.Ax_1_z),axis=0)
SumAx_1=np.sum(self.funcNormLog(TypeSum(self.Ax_1)))
L+= SumAx_1
self.gCurrentL=g0.copy()
return L[0]
def dlogLdg(self,g0,Slice=None):
T=ClassTimeIt.ClassTimeIt()
T.disable()
GM=self.MassFunction.GammaMachine
GM.computeGammaCube(g0)
g=g0.reshape((-1,1))
T.timeit("Compute Gamma")
L_SqrtCov=GM.L_SqrtCov
L_NParms=GM.L_NParms
NParms=GM.NParms
Ns=self.CM.Cat_s.shape[0]
n_z=self.CM.DicoDATA["DicoSelFunc"]["n_z"]
n_zt=self.CM.Cat_s.n_zt
# Sum_z_Ax_1_z=np.sum(self.Ax_1_z,axis=0)
L_Ax_1_z=[]
for iSlice in range(self.NSlice):
Gamma_i=GM.GammaCube[iSlice].flat[self.IndexCube_xy_Slice]
Ax_1_z=n_zt[:,iSlice]*Gamma_i*self.CellRad_1**2
L_Ax_1_z.append(Ax_1_z)
Ax_1_z=np.array(L_Ax_1_z)
Sum_z_Ax_1_z=np.sum(Ax_1_z,axis=0)
# print(GM.GammaCube.flat[0])
ii=0
J=np.zeros((NParms,),np.float64)
if Slice is None:
Sl=slice(None)
else:
Sl=slice(Slice,Slice+1)
for iSlice in range(self.NSlice)[Sl]:
ThisNParms=L_NParms[iSlice]
iPar=ii
jPar=iPar+ThisNParms
GammaSlice=GM.GammaCube[iSlice]
SqrtCov=L_SqrtCov[iSlice]
SqrtCov_xy=SqrtCov[self.IndexCube_xy_Slice,:]
# #######################
dNx_0_dg=n_z[iSlice]*SqrtCov[:,:]*GammaSlice.reshape((-1,1))*self.CellRad_0**2
Sum_dNx_0_dg=np.sum(dNx_0_dg[self.IndexCube_Mask_Slice,:],axis=0)
J[iPar:jPar]+= -Sum_dNx_0_dg
# #########################
dNx_1_dg=n_z[iSlice]*SqrtCov_xy[:,:]*GammaSlice.flat[self.IndexCube_xy_Slice].reshape((-1,1))*self.CellRad_1**2
Sum_dNx_1_dg=np.sum(dNx_1_dg,axis=0)
J[iPar:jPar]+= Sum_dNx_1_dg
# #########################
dAx_dg_0 = n_zt[:,iSlice].reshape((-1,1))*SqrtCov_xy[:,:]*GammaSlice.flat[self.IndexCube_xy_Slice].reshape((-1,1))#*np.log(10)
dAx_dg_1 = Sum_z_Ax_1_z
dAx_dg = dAx_dg_0/dAx_dg_1.reshape((-1,1))*self.CellRad_1**2
Sum_dAx_dg=np.sum(dAx_dg,axis=0)
J[iPar:jPar]+= + Sum_dAx_dg
ii+=ThisNParms
return J
# #################################
# HESSIANs
def d2logLdg2(self,g0):
T=ClassTimeIt.ClassTimeIt()
T.disable()
GM=self.MassFunction.GammaMachine
GM.computeGammaCube(g0)
g=g0.reshape((-1,1))
T.timeit("Compute Gamma")
L_SqrtCov=GM.L_SqrtCov
L_NParms=GM.L_NParms
NParms=GM.NParms
Ns=self.CM.Cat_s.shape[0]
n_z=self.CM.DicoDATA["DicoSelFunc"]["n_z"]
n_zt=self.CM.Cat_s.n_zt
# Sum_z_Ax_1_z=np.sum(self.Ax_1_z,axis=0)
L_Ax_1_z=[]
for iSlice in range(self.NSlice):
Gamma_i=GM.GammaCube[iSlice].flat[self.IndexCube_xy_Slice]
Ax_1_z=n_zt[:,iSlice]*Gamma_i*self.CellRad_1**2
L_Ax_1_z.append(Ax_1_z)
Ax_1_z=np.array(L_Ax_1_z)
Sum_z_Ax_1_z=np.sum(Ax_1_z,axis=0)
ii=0
H=np.zeros((NParms,),np.float32)
for iSlice in range(self.NSlice):
ThisNParms=L_NParms[iSlice]
iPar=ii
jPar=iPar+ThisNParms
GammaSlice=GM.GammaCube[iSlice]
SqrtCov=L_SqrtCov[iSlice]
SqrtCov_xy=SqrtCov[self.IndexCube_xy_Slice,:]
# ##################################"
dNx_0_dg=n_z[iSlice] * (SqrtCov[:,:])**2 * GammaSlice.reshape((-1,1))*self.CellRad_0**2*self.fNormLog
Sum_dNx_0_dg=np.sum(dNx_0_dg[self.IndexCube_Mask_Slice,:],axis=0)
H[iPar:jPar]+= -Sum_dNx_0_dg
# ##################################"
dNx_1_dg=n_z[iSlice]*(SqrtCov_xy[:,:])**2 * GammaSlice.flat[self.IndexCube_xy_Slice].reshape((-1,1))*self.CellRad_1**2
Sum_dNx_1_dg=np.sum(dNx_1_dg,axis=0)
H[iPar:jPar]+= + Sum_dNx_1_dg
# ##################################"
dAx_dg_0 = n_zt[:,iSlice].reshape((-1,1))* (SqrtCov_xy[:,:]) \
* GammaSlice.flat[self.IndexCube_xy_Slice].reshape((-1,1))#*np.log(10)
dAx_dg_1 = Sum_z_Ax_1_z
dAx_dg_A = SqrtCov_xy[:,:]*dAx_dg_0 / dAx_dg_1.reshape((-1,1)) * self.CellRad_1**2
dAx_dg_B = - ( (dAx_dg_0 * dAx_dg_0) * self.CellRad_1**4 / (dAx_dg_1.reshape((-1,1)))**2 )
dAx_dg = dAx_dg_A + dAx_dg_B
Sum_dAx_dg=np.sum(dAx_dg,axis=0)
H[iPar:jPar]+= + Sum_dAx_dg
ii+=ThisNParms
return H
# #############################
# FULLLLL
def d2logLdg2_Full(self,g0):
T=ClassTimeIt.ClassTimeIt()
T.disable()
GM=self.MassFunction.GammaMachine
GM.computeGammaCube(g0)
g=g0.reshape((-1,1))
T.timeit("Compute Gamma")
L_SqrtCov=GM.L_SqrtCov
L_NParms=GM.L_NParms
NParms=GM.NParms
Ns=self.CM.Cat_s.shape[0]
n_z=self.CM.DicoDATA["DicoSelFunc"]["n_z"]
n_zt=self.CM.Cat_s.n_zt
# Sum_z_Ax_1_z=np.sum(self.Ax_1_z,axis=0)
L_Ax_1_z=[]
for iSlice in range(self.NSlice):
Gamma_i=GM.GammaCube[iSlice].flat[self.IndexCube_xy_Slice]
Ax_1_z=n_zt[:,iSlice]*Gamma_i*self.CellRad_1**2
L_Ax_1_z.append(Ax_1_z)
Ax_1_z=np.array(L_Ax_1_z)
Sum_z_Ax_1_z=np.sum(Ax_1_z,axis=0)
DicoIndex={}
ii=0
for iSlice in range(self.NSlice):
ThisNParms=L_NParms[iSlice]
iPar=ii
jPar=iPar+ThisNParms
DicoIndex[iSlice]=(iPar,jPar)
ii+=ThisNParms
ii=0
H=np.zeros((NParms,NParms),np.float32)
dAidg=np.zeros((Ns,NParms),np.float32)
for iSlice in range(self.NSlice):
GammaSlice=GM.GammaCube[iSlice]
i0,i1=DicoIndex[iSlice]
ThisNParms=i1-i0
SqrtCov=L_SqrtCov[iSlice]
SqrtCov_xy=SqrtCov[self.IndexCube_xy_Slice,:]
iPar,jPar=i0,i1
ng=n_zt[:,iSlice].reshape((-1,1))* GammaSlice.flat[self.IndexCube_xy_Slice].reshape((-1,1))
dAidg[:,i0:i1] = ng * SqrtCov_xy.reshape((Ns,ThisNParms))
for iSlice in range(self.NSlice):
GammaSlice=GM.GammaCube[iSlice]
i0,i1=DicoIndex[iSlice]
ThisNParms=i1-i0
SqrtCov=L_SqrtCov[iSlice]
SqrtCov_xy=SqrtCov[self.IndexCube_xy_Slice,:]
iPar,jPar=i0,i1
# ##################################
dNx_0_dg=n_z[iSlice]\
* SqrtCov.reshape((self.NPix**2,ThisNParms,1))\
* SqrtCov.reshape((self.NPix**2,1,ThisNParms))\
* GammaSlice.reshape((-1,1,1))*self.CellRad_0**2*self.fNormLog
Sum_dNx_0_dg=np.sum(dNx_0_dg[self.IndexCube_Mask_Slice,...],axis=0)
H[iPar:jPar,iPar:jPar]+= - Sum_dNx_0_dg
# ##################################
dNx_1_dg=n_z[iSlice].reshape(-1,1,1)\
* SqrtCov_xy.reshape((Ns,ThisNParms,1))\
* SqrtCov_xy.reshape((Ns,1,ThisNParms))\
* GammaSlice.flat[self.IndexCube_xy_Slice].reshape((-1,1,1))*self.CellRad_1**2
Sum_dNx_1_dg=np.sum(dNx_1_dg,axis=0)
H[iPar:jPar,iPar:jPar]+= Sum_dNx_1_dg
ng=n_zt[:,iSlice].reshape((-1,1))* GammaSlice.flat[self.IndexCube_xy_Slice].reshape((-1,1))
dAx_dg_0 = ng.reshape(-1,1,1) * SqrtCov_xy.reshape((Ns,ThisNParms,1)) * SqrtCov_xy.reshape((Ns,1,ThisNParms))
H[iPar:jPar,iPar:jPar]+= np.sum(dAx_dg_0 * (Sum_z_Ax_1_z.reshape((-1,1,1)))**(-1),axis=0)*self.CellRad_1**2
for jSlice in range(self.NSlice):
GammaSlice_j=GM.GammaCube[jSlice]
j0,j1=DicoIndex[jSlice]
ThisNParms_j=j1-j0
SqrtCov_j=L_SqrtCov[jSlice]
SqrtCov_xy_j=SqrtCov_j[self.IndexCube_xy_Slice,:]
# ##################################
Js=dAidg[:,i0:i1]
Js_j=dAidg[:,j0:j1]
H[i0:i1,j0:j1]+= - np.sum(Js.reshape((Ns,ThisNParms,1))*Js_j.reshape((Ns,1,ThisNParms_j)) \
* (Sum_z_Ax_1_z.reshape((-1,1,1)))**(-2),axis=0)*self.CellRad_1**4
return H
def recenterNorm(self,X):
return X
return self.CSW.recenterNorm(X)
| UTF-8 | Python | false | false | 18,810 | py | 49 | ClassLogDiffLikelihoodMachine.py | 49 | 0.509091 | 0.477778 | 0 | 541 | 33.752311 | 157 |
KupynOrest/AmurTigerCVWC | 5,265,629,914,523 | b5ad41c2d7d9bac0fb5a938ee165f125b5922945 | 116395020bafdfc3b7ac56150dfff72507d83faa | /PyTorch/model_training/detection/detector.py | f8f846a7b91b983d12d604f4d6c2a747a2c9739a | []
| no_license | https://github.com/KupynOrest/AmurTigerCVWC | 4e8aee5f4b9b736a8d8fe24aae984c97dab22acd | 684e9c1480da92b4ce7ea24d31a55657256aee23 | refs/heads/master | 2020-06-26T09:53:40.176070 | 2019-08-05T14:15:04 | 2019-08-05T14:15:04 | 199,601,559 | 7 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import albumentations as albu
import numpy as np
import torch
# from model_training.detection.retinanet_v2 import RetinaNet
from model_training.detection.retinanet import RetinaNet
from .ssd import SSD
from .detector_postprocessing import DetectorPostProcessing
class Detector(object):
def __init__(self, config):
super(Detector, self).__init__()
self._init_model(config)
self.img_size = config['img_size']
self.multiclass_suppression = config.get('multiclass_suppression', True)
self.transform = self._get_transform()
self.post_processing = DetectorPostProcessing(config)
def __call__(self, img):
with torch.no_grad():
x = self._process_image(img)
if torch.cuda.is_available():
x = x.cuda()
loc, conf, priors = self.net(x)
priors = priors[0]
return self.post_processing(loc, conf, priors, img.shape, multiclass_suppression=self.multiclass_suppression)
def _process_image(self, img):
return torch.from_numpy(np.transpose(self.transform(img), (2, 0, 1))).unsqueeze(0)
def _init_model(self, config):
if config['name'] == 'ssd':
self.net = SSD(config)
else:
self.net = RetinaNet(config, pretrained=config.get('pretrained', False))
model_dict = torch.load(config['filepath'], map_location=None if torch.cuda.is_available() else 'cpu')
self.net.load_state_dict(model_dict['model'])
self.net = self.net.eval()
if torch.cuda.is_available():
self.net = self.net.cuda()
def _get_transform(self):
pipeline = albu.Compose([
albu.Resize(self.img_size, self.img_size),
albu.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
def process(a):
r = pipeline(image=a)
return r['image']
return process
| UTF-8 | Python | false | false | 1,930 | py | 39 | detector.py | 32 | 0.615544 | 0.6 | 0 | 54 | 34.740741 | 117 |
Yanqin25/pythonLab | 9,629,316,721,274 | e3086ecfe12d2ca0f7ad2320b1494235f763d01a | 0771e69408932bd128e71556ac8b3547c583bab0 | /18.algorithm/linear_algebra/matrix.py | a77140feff04a1d1d0c008b0deb1db3b43689e3f | []
| no_license | https://github.com/Yanqin25/pythonLab | bbbe7bc8eb7f9964b5070ea8b1c6a65313eae56d | 48f6ab729a635016c09208608c7223523f73d8f7 | refs/heads/master | 2020-08-10T23:40:05.760447 | 2020-02-26T10:07:30 | 2020-02-26T10:07:30 | 214,445,503 | 0 | 0 | null | false | 2020-02-18T01:13:04 | 2019-10-11T13:36:54 | 2020-02-18T01:12:41 | 2020-02-18T01:13:03 | 16,739 | 0 | 0 | 2 | Python | false | false | from vetor import *
class Matrix(object):
def __init__(self,matrix,w,h): # 矩阵的初始化
self.__matrix = matrix
self.__width = w
self.__height = h
def __str__(self): # 返回矩阵的字符表达形式
ans = ""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width -1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def changeComponent(self,x,y, value): # 改变矩阵特定位置的元素
if x >= 0 and x < self.__height and y >= 0 and y < self.__width:
self.__matrix[x][y] = value
else:
raise Exception ("changeComponent: indices out of bounds")
def component(self,x,y): # 返回矩阵特定位置的元素
if x >= 0 and x < self.__height and y >= 0 and y < self.__width:
return self.__matrix[x][y]
else:
raise Exception ("changeComponent: indices out of bounds")
def width(self): # 返回矩阵的宽度
return self.__width
def height(self): # 返回矩阵的高度
return self.__height
def __mul__(self,other): # 矩阵的乘积
if isinstance(other, Vector): # 判断与矩阵乘积的对象是向量
if (len(other) == self.__width):
ans = zeroVector(self.__height)
for i in range(self.__height):
summe = 0
for j in range(self.__width):
summe += other.component(j) * self.__matrix[i][j]
ans.changeComponent(i,summe)
summe = 0
return ans
else:
raise Exception("vector must have the same size as the " + "number of columns of the matrix!")
elif isinstance(other,int) or isinstance(other,float): #判断与矩阵乘积的是标量,即数乘
matrix = [[self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height)]
return Matrix(matrix,self.__width,self.__height)
def __add__(self,other): # 矩阵的加法
if (self.__width == other.width() and self.__height == other.height()):
matrix = []
for i in range(self.__height):
row = []
for j in range(self.__width):
row.append(self.__matrix[i][j] + other.component(i,j))
matrix.append(row)
return Matrix(matrix,self.__width,self.__height)
else:
raise Exception("matrix must have the same dimension!")
def __sub__(self,other): # 矩阵的减法
if (self.__width == other.width() and self.__height == other.height()):
matrix = []
for i in range(self.__height):
row = []
for j in range(self.__width):
row.append(self.__matrix[i][j] - other.component(i,j))
matrix.append(row)
return Matrix(matrix,self.__width,self.__height)
else:
raise Exception("matrix must have the same dimension!")
if __name__ == "__main__":
m=Matrix([[1,2,3],[4,5,6],[7,8,9]],3,3)
v=Vector([1,2,3])
print(m*v)
| UTF-8 | Python | false | false | 3,348 | py | 186 | matrix.py | 163 | 0.497782 | 0.491128 | 0 | 80 | 38.425 | 111 |
parkus/FLAIIL | 16,149,077,071,889 | b978cfc4f3020848f337a6704d53e2b6db1e35d6 | aed2049a6cc9cf2434221632b1b0d60da7d10d60 | /ranges.py | 9c3776c36af86bc0eca4e84beccc8ccb8f1c3cfc | [
"MIT"
]
| permissive | https://github.com/parkus/FLAIIL | 15d776c36cca48ead0e6bf65ba4f48f599173976 | 53698f9e6e2d933c091fb9c4b069bf255871541b | refs/heads/master | 2020-03-16T17:18:35.783996 | 2020-02-21T18:15:26 | 2020-02-21T18:15:26 | 132,826,181 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Utilities for handling numerical ranges.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
def rangeset_union(ranges0, ranges1):
"""
Return the union of two sets of *sorted* ranges.
Parameters
----------
ranges0 : Nx2 array
ranges1 : Nx2 array
Returns
-------
union_ranges : Nx2 array
"""
invrng0, invrng1 = list(map(rangeset_invert, [ranges0, ranges1]))
xinv = rangeset_intersect(invrng0, invrng1)
return rangeset_invert(xinv)
def rangeset_intersect(ranges0, ranges1, presorted=False):
"""
Return the intersection of two sets of ranges.
Parameters
----------
ranges0 : Nx2 array
ranges1 : Nx2 array
presorted : bool
If True, ranges are assumed to be sorted.
Returns
-------
intersecting_ranges : Nx2 array
"""
if len(ranges0) == 0 or len(ranges1) == 0:
return np.empty([0, 2])
rng0, rng1 = list(map(np.asarray, [ranges0, ranges1]))
rng0, rng1 = [np.reshape(a, [-1, 2]) for a in [rng0, rng1]]
if not presorted:
rng0, rng1 = [r[np.argsort(r[:,0])] for r in [rng0, rng1]]
for rng in [rng0, rng1]:
assert np.all(rng[1:] > rng[:-1])
l0, r0 = rng0.T
l1, r1 = rng1.T
f0, f1 = [rng.flatten() for rng in [rng0, rng1]]
lin0 = inranges(l0, f1, [1, 0])
rin0 = inranges(r0, f1, [0, 1])
lin1 = inranges(l1, f0, [0, 0])
rin1 = inranges(r1, f0, [0, 0])
#keep only those edges that are within a good area of the other range
l = weave(l0[lin0], l1[lin1])
r = weave(r0[rin0], r1[rin1])
return np.array([l, r]).T
def rangeset_invert(ranges):
"""
Return the inverse of the provided ranges.
Parameters
----------
ranges : Nx2 array
Returns
-------
inverted_ranges : Nx2 array
"""
if len(ranges) == 0:
return np.array([[-np.inf, np.inf]])
edges = ranges.ravel()
rnglist = [edges[1:-1].reshape([-1, 2])]
if edges[0] != -np.inf:
firstrng = [[-np.inf, edges[0]]]
rnglist.insert(0, firstrng)
if edges[-1] != np.inf:
lastrng = [[edges[-1], np.inf]]
rnglist.append(lastrng)
return np.vstack(rnglist)
def inranges(values, ranges, inclusive=[False, True]):
"""Determines whether values are in the supplied list of sorted ranges.
Parameters
----------
values : 1-D array-like
The values to be checked.
ranges : 1-D or 2-D array-like
The ranges used to check whether values are in or out.
If 2-D, ranges should have dimensions Nx2, where N is the number of
ranges. If 1-D, it should have length 2N. A 2xN array may also be used, but
note that it will be assumed to be Nx2 if N == 2.
inclusive : length 2 list of booleans
Whether to treat bounds as inclusive. Because it is the default
behavior of numpy.searchsorted, [False, True] is the default here as
well. Using [False, False] or [True, True] will require roughly triple
computation time.
Returns
------
inside : array
a boolean array indexing the values that are in the ranges.
"""
ranges = np.asarray(ranges)
if ranges.ndim == 2:
if ranges.shape[1] != 2:
ranges = ranges.T
ranges = ranges.ravel()
if inclusive == [0, 1]:
return (np.searchsorted(ranges, values) % 2 == 1)
if inclusive == [1, 0]:
return (np.searchsorted(ranges, values, side='right') % 2 == 1)
if inclusive == [1, 1]:
a = (np.searchsorted(ranges, values) % 2 == 1)
b = (np.searchsorted(ranges, values, side='right') % 2 == 1)
return (a | b)
if inclusive == [0, 0]:
a = (np.searchsorted(ranges, values) % 2 == 1)
b = (np.searchsorted(ranges, values, side='right') % 2 == 1)
return (a & b)
def weave(a, b):
"""
Insert values from b into a in a way that maintains their order. Both must
be sorted.
"""
mapba = np.searchsorted(a, b)
return np.insert(a, mapba, b) | UTF-8 | Python | false | false | 4,068 | py | 7 | ranges.py | 6 | 0.584317 | 0.552606 | 0 | 144 | 27.256944 | 83 |
urmithakkar/testrepo | 13,580,686,593,455 | 13a2be3124a6d79b1be53a72abe41c8ae8c3363f | ec77feaed2d190ded34799ea01c8072d955a0022 | /testbranch1.py | 9cc1f894fe66a595f686cfbcd7be9aa53540ae87 | []
| no_license | https://github.com/urmithakkar/testrepo | 8181ee263cf31d19f36041da4ecffa20779fc394 | 2e7e154b1ab44242ab889a0ebf74c8e18182d487 | refs/heads/main | 2023-02-05T16:56:38.494353 | 2020-12-26T15:43:46 | 2020-12-26T15:43:46 | 324,581,770 | 0 | 0 | null | false | 2020-12-26T15:43:47 | 2020-12-26T15:23:08 | 2020-12-26T15:26:22 | 2020-12-26T15:43:46 | 0 | 0 | 0 | 0 | Python | false | false | #creating a file in brach1
print("Inside branch 1")
| UTF-8 | Python | false | false | 53 | py | 2 | testbranch1.py | 2 | 0.735849 | 0.698113 | 0 | 3 | 16.666667 | 26 |
fd-facu/termodinamica | 18,030,272,737,461 | b9a8f8bae47d51cee31a962d4e9714f9fdc6a367 | aafae0baeaeda7409c47cf4933edcfde4040f018 | /grafico_temp_calor.py | 6582a93e95fb7d2f7ae1c0d50b3453b973d833ff | []
| no_license | https://github.com/fd-facu/termodinamica | 26adb1c46bcb43398daff558e37d6612dacf3df4 | 1a9a1d50a82d7a8542f26a97b68298d0be8cc40c | refs/heads/master | 2022-01-29T11:38:44.559874 | 2017-12-12T20:56:40 | 2017-12-12T20:56:40 | 95,333,793 | 0 | 0 | null | false | 2022-01-19T23:29:57 | 2017-06-25T02:29:31 | 2017-12-06T17:41:23 | 2022-01-19T23:29:56 | 63 | 0 | 0 | 1 | Python | false | false | import matplotlib.pyplot as plt
plt.ylabel('Temperatura (K)')
plt.xlabel('Tiempo (seg)')
plt.title('Grafico PV')
def generar_grafico(lista_etapas):
instancia_x = []
instancia_y = []
for tupla in lista_etapas:
instancia_x.append(tupla[0])
instancia_y.append(tupla[1])
plt.plot(instancia_x, instancia_y)
plt.show()
# Test
#generar_grafico() | UTF-8 | Python | false | false | 405 | py | 19 | grafico_temp_calor.py | 18 | 0.612346 | 0.607407 | 0 | 23 | 15.695652 | 38 |
Bihan-tifr/Exam | 6,743,098,678,767 | 8135bb23edb4c7f029af0082e6b21201459431a3 | 5bf5da35ccd39c6e99cf6f904f250ed117bc019c | /p9.py | 1e35676b173fd3fee99d955075f00cc83b633f8d | []
| no_license | https://github.com/Bihan-tifr/Exam | 52615027df4a86bbf980b303335094b1c50f4e59 | e254c346c8dde8def5cb033e1b4998652a6cd341 | refs/heads/master | 2022-10-08T21:40:56.950279 | 2020-06-05T15:49:10 | 2020-06-05T15:49:10 | 269,667,696 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
A=np.array([[2,1],[1,0],[0,1]])
print("The Matrix:\n{}".format(A))
print("Singular value decomposition:")
u,s,v=(np.linalg.svd(A))
print("u:\n{}".format(u))
print("s:\n{}".format(s))
print("v:\n{}".format(v))
input("press any key to evaluate svd of second matrix")
B=np.array([[1,1,0],[1,0,1],[0,1,1]])
print("The Matrix:\n{}".format(B))
print("Singular value decomposition")
u,s,v=(np.linalg.svd(B))
print("u:\n{}".format(u))
print("s:\n{}".format(s))
print("v:\n{}".format(v))
| UTF-8 | Python | false | false | 502 | py | 6 | p9.py | 5 | 0.615538 | 0.585657 | 0 | 19 | 25.368421 | 55 |
1017-MJ/1017_MJ | 18,236,431,163,772 | edf019e4e0f8b7c9e9478e2ffd0deb28d634e969 | c235a6d5ff3c118f6e89b7ee853e8d2cb8debeda | /27.py | 576e52ca08ad3e5b7cc6debe4f29de089114aee2 | []
| no_license | https://github.com/1017-MJ/1017_MJ | 7b9b7ac4322bf8ed17d8c200d98d2bdc7c2c1fad | a2f2775cc40f88fd5abdc0c0fa14cdf2f6d21460 | refs/heads/main | 2023-06-04T16:41:27.230568 | 2021-06-11T11:46:15 | 2021-06-11T11:46:15 | 365,239,005 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | x = [3,2,4,1]
for n in x:
print(n)
x = [3,2,4,1]
y = ["Hello", "There"]
for c in y:
print(c)
x = [3,2,4,1]
#0,1,2,3
y = ["Hello", "There"]
print(x.index(4))
print(y.index("Hello"))
x = [3,2,4,1]
#0,1,2,3
y = ["Hello", "There"]
print("bye" in y)
print("Hello" in y)
if "Hello" in y:
print("Hello가 있어요.")
if "bye" in y:
print("bye")
print("bye는 없어요.") | UTF-8 | Python | false | false | 496 | py | 26 | 27.py | 26 | 0.397917 | 0.345833 | 0 | 34 | 12.176471 | 25 |
zvonicek/IT3708 | 10,660,108,846,122 | 429ecd76fdaea5cc57bce42ed087fb1542f5b573 | 7b201f188e0e5073009dd84f0437cc6a1c41b920 | /task5_q_learning/q_flatland/reinforcement_flatland/flatland.py | 4c588cb74da5ee3de1de249a84fbc855bed021f1 | []
| no_license | https://github.com/zvonicek/IT3708 | d4e4653cb1579c0765bd4837677d798f696f57af | 990b448f9d01badc8b955ce49843e37c54f8fd7b | refs/heads/master | 2020-05-20T06:01:23.754233 | 2015-05-19T10:59:32 | 2015-05-19T10:59:32 | 30,255,328 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
class Cell:
Food, Poison, Empty, Agent = range(0, 4)
class Turn:
Left, Right, Up, Down = range(0, 4)
class Flatland():
def __init__(self, world, agent_coord, food_num, poison_num):
self.agent_init = agent_coord
self.agent_coord = agent_coord
self.grid = world
self.food_num = food_num
self.poison_num = poison_num
# copy the grid so as it can be restored for next generation
self.start_grid = [x[:] for x in self.grid]
@classmethod
def random_world(cls, length, fpd, agent_coord):
grid = [[Cell.Empty for _ in range(length)] for _ in range(length)]
grid[agent_coord[0]][agent_coord[1]] = Cell.Agent
food_positions = [(x, y) for x in range(length) for y in range(length) if x != y]
food_num = round(fpd[0] * length ** 2)
for pos in random.sample(food_positions, food_num):
grid[pos[0]][pos[1]] = Cell.Food
poison_positions = [(x, y) for x in range(length) for y in range(length) if
x != y and grid[x][y] == Cell.Empty]
poison_num = round(fpd[1] * (length ** 2 - food_num))
for pos in random.sample(poison_positions, poison_num):
grid[pos[0]][pos[1]] = Cell.Poison
return cls(grid, agent_coord, food_num, poison_num)
@classmethod
def from_file(cls, filename):
grid = []
food_num = 0
poison_num = 0
agent_coord = None
with open(filename, 'r') as f:
header = f.readline().split()
agent_coord = int(header[3]), int(header[2])
line = f.readline().split()
while line:
row = []
for cell in line:
if cell == "-2":
row.append(Cell.Agent)
elif cell == "-1":
row.append(Cell.Poison)
poison_num += 1
elif cell == "0":
row.append(Cell.Empty)
else:
row.append(Cell.Food)
food_num += 1
grid.append(row)
line = f.readline().split()
return cls(grid, agent_coord, food_num, poison_num)
def reset(self):
"""resets the world to initial state"""
self.grid = [x[:] for x in self.start_grid]
self.agent_coord = self.agent_init
def move(self, turn):
curr_row, curr_col = self.get_agent()
new_row, new_col = curr_row, curr_col
if turn == Turn.Up:
new_row = (new_row - 1) % len(self.grid)
elif turn == Turn.Right:
new_col = (new_col + 1) % len(self.grid[0])
elif turn == Turn.Down:
new_row = (new_row + 1) % len(self.grid)
elif turn == Turn.Left:
new_col = (new_col - 1) % len(self.grid[0])
cell_state = self.grid[new_row][new_col]
self.grid[curr_row][curr_col] = Cell.Empty
self.grid[new_row][new_col] = Cell.Agent
self.agent_coord = (new_row, new_col)
return cell_state
def get_agent(self):
return self.agent_coord
def print_stats(self):
food = 0
poison = 0
for row in range(len(self.grid)):
for col in range(len(self.grid)):
if self.grid[row][col] == Cell.Food:
food += 1
elif self.grid[row][col] == Cell.Poison:
poison += 1
print("food eaten:", self.food_num - food, "poison eaten:", self.poison_num - poison) | UTF-8 | Python | false | false | 3,620 | py | 57 | flatland.py | 37 | 0.508011 | 0.498895 | 0 | 114 | 30.763158 | 93 |
hypnguyen1209/infosec-algorithm | 154,618,845,393 | fb92f1769733165dfad4a83ca4d1e089cf7ae84f | fd836e5c699198ff32f6a6710a6720979a53ca43 | /a.py | 940803d21643dcec9bfd86d90b73ffb21428b594 | []
| no_license | https://github.com/hypnguyen1209/infosec-algorithm | af4552da0e937135e41e54538fa35807a989edb7 | 9b9a0b091cf1a2296cede4befcee625671ac5d98 | refs/heads/main | 2023-07-02T12:41:26.696442 | 2021-08-16T09:49:17 | 2021-08-16T09:49:17 | 364,528,414 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
a = 123456
W = 8
p = 2147483647
def solve(a, W, p):
result = []
m = round(math.log2(p))
t = round(m/W)
n = [pow(2, i*W) for i in range(t)]
for i in n[::-1]:
result.append(math.floor(a/i))
a = a%i
return result
if __name__ == "__main__":
print(solve(a, W, p)) | UTF-8 | Python | false | false | 301 | py | 32 | a.py | 31 | 0.524917 | 0.458472 | 0 | 18 | 14.833333 | 36 |
Ironman778/WebShell-C2 | 11,381,663,354,765 | a664157e42e52f2a89cb0e4572f38043d7fb07e5 | 5510fa4604cf9c72198c4cb590622ff22c8624a5 | /live_feedback_(OldVersion).py | 741e14cef580fe9956de2a36c6adabebc64908d6 | [
"MIT"
]
| permissive | https://github.com/Ironman778/WebShell-C2 | 028e351509a046f82774fb5066b6a370cd8bb4e6 | 5ed781b2797054f7c031191a238618a3cf441750 | refs/heads/main | 2023-08-24T18:33:53.822209 | 2021-11-03T13:57:18 | 2021-11-03T13:57:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import glob
session = input("Session ID :")
if session not in glob.glob("./data/*.*"):
with open(f"./data/{session}",'a') as file:
file.write('')
previous = ""
while True:
with open(f"./data/{session}",'r') as file:
data = file.read()
if data!=previous:
previous = data
print(previous)
time.sleep(5)
| UTF-8 | Python | false | false | 377 | py | 22 | live_feedback_(OldVersion).py | 10 | 0.551724 | 0.549072 | 0 | 19 | 18.842105 | 47 |
pulumi/pulumi-aws-native | 6,760,278,539,705 | db694e00dec7014e2d820026bf831bf36ff33c4e | 5a74500be5b851f1efd999778088fb228ffa75b2 | /sdk/python/pulumi_aws_native/configuration/get_configuration_recorder.py | 08bacf4d61bc34268fd7eebc68ae9c405a06f1cc | [
"Apache-2.0"
]
| permissive | https://github.com/pulumi/pulumi-aws-native | d0031db61fba441d62e83f14a1503093dcabbd5a | 360bc11ff2538e17bacfb34c512cd1b34ef7ba50 | refs/heads/master | 2023-08-16T04:46:09.356539 | 2023-08-10T06:13:07 | 2023-08-10T06:13:07 | 219,575,168 | 68 | 13 | Apache-2.0 | false | 2023-09-14T12:47:08 | 2019-11-04T19:00:32 | 2023-09-08T08:20:52 | 2023-09-14T12:47:07 | 174,365 | 70 | 12 | 111 | Go | false | false | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetConfigurationRecorderResult',
'AwaitableGetConfigurationRecorderResult',
'get_configuration_recorder',
'get_configuration_recorder_output',
]
@pulumi.output_type
class GetConfigurationRecorderResult:
def __init__(__self__, id=None, recording_group=None, role_arn=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if recording_group and not isinstance(recording_group, dict):
raise TypeError("Expected argument 'recording_group' to be a dict")
pulumi.set(__self__, "recording_group", recording_group)
if role_arn and not isinstance(role_arn, str):
raise TypeError("Expected argument 'role_arn' to be a str")
pulumi.set(__self__, "role_arn", role_arn)
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="recordingGroup")
def recording_group(self) -> Optional['outputs.ConfigurationRecorderRecordingGroup']:
return pulumi.get(self, "recording_group")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[str]:
return pulumi.get(self, "role_arn")
class AwaitableGetConfigurationRecorderResult(GetConfigurationRecorderResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConfigurationRecorderResult(
id=self.id,
recording_group=self.recording_group,
role_arn=self.role_arn)
def get_configuration_recorder(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConfigurationRecorderResult:
"""
Resource Type definition for AWS::Config::ConfigurationRecorder
"""
__args__ = dict()
__args__['id'] = id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws-native:configuration:getConfigurationRecorder', __args__, opts=opts, typ=GetConfigurationRecorderResult).value
return AwaitableGetConfigurationRecorderResult(
id=pulumi.get(__ret__, 'id'),
recording_group=pulumi.get(__ret__, 'recording_group'),
role_arn=pulumi.get(__ret__, 'role_arn'))
@_utilities.lift_output_func(get_configuration_recorder)
def get_configuration_recorder_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConfigurationRecorderResult]:
"""
Resource Type definition for AWS::Config::ConfigurationRecorder
"""
...
| UTF-8 | Python | false | false | 3,096 | py | 13,355 | get_configuration_recorder.py | 6,362 | 0.666667 | 0.666344 | 0 | 82 | 36.756098 | 151 |
AlexG31/EcgDiagnosis | 11,836,929,879,787 | 06fc69d38fed30deb9f9b476d857d3b8bd302ac7 | 9feaa24ec54d39270d87bb0039dc106aedf12709 | /mcmc/hermit_model.py | 2f1c53a3876b5e8d00c3a00fb702b0ecab49676f | []
| no_license | https://github.com/AlexG31/EcgDiagnosis | 65cb4a98aa1da1db916744e867e28bedf55bdf19 | 16b1c81351957393f27675e5eaea4920be787e28 | refs/heads/master | 2021-01-12T02:46:26.899927 | 2017-01-20T09:55:16 | 2017-01-20T09:55:16 | 78,096,065 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #encoding:utf8
# MCMC Model
import os
import sys
import pymc
import matplotlib.pyplot as plt
import scipy.signal as signal
from pymc import DiscreteUniform, Exponential, deterministic, Poisson, Uniform
import numpy as np
import json
# Load ECG segment array
with open('./segment.json', 'r') as fin:
raw_sig = json.load(fin)
raw_sig = np.array(raw_sig)
raw_sig = raw_sig[30:]
# Normalize
min_val = np.min(raw_sig)
max_val = np.max(raw_sig)
raw_sig -= min_val
raw_sig /= (max_val - min_val)
# Hermit functions
def HermitFunction(level, size):
'''Return hermit function.'''
if size < 0:
raise Exception('Size must be greater or equal to zero!')
def He0(x):
return 1.0
def He1(x):
return x
def He2(x):
return x * x - 1
def He3(x):
return x ** 3.0 - 3.0 * x
def He4(x):
return x ** 4.0 - 6.0 * x ** 2.0 + 3.0
def He5(x):
return x ** 5.0 - 10.0 * x ** 3.0 + 15.0 * x
# Mapping wave_width to range [-3,3]
wave_width = 50
x_ratio = 6.0 / wave_width
if level == 0:
hermit = He0
elif level == 1:
hermit = He1
elif level == 2:
hermit = He2
elif level == 3:
hermit = He3
elif level == 4:
hermit = He4
elif level == 5:
hermit = He5
data = [hermit((x - size / 2) * x_ratio) / 20.0 for x in xrange(0, size)]
return np.array(data)
# plt.plot(raw_sig)
# plt.plot(HermitFunction(5, len(raw_sig)))
# plt.show()
# Length of the ECG segment
len_sig = raw_sig.size
# wave_center = DiscreteUniform('wave_center', lower=0, upper=len_sig, doc='WaveCetner[index]')
hc0 = pymc.Normal('hc0', 1, 0.25)
hc1 = pymc.Normal('hc1', 1, 0.25)
hc2 = pymc.Normal('hc2', 1, 0.25)
hc3 = pymc.Normal('hc3', 1, 0.25)
hc4 = pymc.Normal('hc4', 1, 0.25)
hc5 = pymc.Normal('hc5', 1, 0.25)
@deterministic(plot=False)
def wave_shell(hc0=hc0,
hc1=hc1,
hc2=hc2,
hc3=hc3,
hc4=hc4,
hc5=hc5,
):
''' Concatenate wave.'''
coefs = [hc0, hc1, hc2, hc3, hc4, hc5,]
out = np.zeros(len_sig,)
for level, coef in zip(xrange(0,6), coefs):
out += HermitFunction(level, len_sig) * coef
return out
ecg = pymc.Normal('ecg', mu=wave_shell, tau = 4e4, value=raw_sig, observed=True)
def test():
'''Compare Gaussian Function.'''
import scipy.signal as signal
xlist = signal.gaussian(100, 7)
# plt.hist(xlist)
plt.plot(xlist)
plt.title('ECG Segment')
plt.show()
# test()
| UTF-8 | Python | false | false | 2,543 | py | 23 | hermit_model.py | 20 | 0.577271 | 0.529296 | 0 | 113 | 21.504425 | 95 |
PantherPrg/Rollercoaster_Height | 18,382,460,052,989 | 78198715a5a08a23b7ad73fdbcd6bf5bf3cf3f94 | 3bbffa332419f35ed617d79cc9d46160b75cbe6d | /TestCases/testCase3.py | 6de6b77fa81a2722fec625a5c567eede2b926228 | []
| no_license | https://github.com/PantherPrg/Rollercoaster_Height | d599a75e0c51960e174cea3915dd726de175b039 | a835dfca6cf45c17cee52cf5317f75b97a82872e | refs/heads/main | 2023-03-30T18:00:29.210400 | 2021-03-30T05:58:01 | 2021-03-30T05:58:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | 70
Sidney 64
Carlin 69
Oldra 59
Alvy 63
Gael 59
Harlea 67
Luba 66
Brooke 62 | UTF-8 | Python | false | false | 75 | py | 8 | testCase3.py | 7 | 0.786667 | 0.546667 | 0 | 9 | 7.444444 | 9 |
xieqiumei/data_analysis | 120,259,117,519 | fae403f1b9ab28b954b1c31ecac8ce6083f4ceb8 | 1a3e6deed6002bb4a0cb832edb743a8a566ce33f | /feature_analysis/model_analysis.py | efb8b41111ea98f01f2dce4e489c5293101a3782 | []
| no_license | https://github.com/xieqiumei/data_analysis | 7bc2157cfaa4e91b41effb58ff35c3bca0ba92e2 | 208fe84aad98f0ae834020aacbc3f5a21d1b8a50 | refs/heads/master | 2023-05-30T21:20:44.033699 | 2021-06-28T00:32:15 | 2021-06-28T00:32:15 | 380,514,178 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!D:/Code/python
# -*- coding: utf-8 -*-
# @File:model_analysis.py
# @Software:PyCharm
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.metrics import r2_score # 要注意预测评估函数,有回归和分类之分
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_regression, f_regression
from feature_analysis.train_config import svr_params_dict
from sklearn.ensemble import RandomForestRegressor
from utils.metrics import get_adjusted_r_squared, get_p_values
from utils.feature_select import backward_regression, select_by_recursive_feature_elimination, select_by_lasso, \
column_index
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
def select_features(x_train, y_train, x_test, x_cloumns, y_column, select_type, k=None):
"""
feature selection
:param x_train:
:param y_train:
:param x_test:
:param x_cloumns:
:param y_column:
:param select_type:
:param k:
:return:
"""
if select_type == 'backward':
features, feature_ids = backward_regression(x_train, y_train, x_cloumns, y_column)
return x_train[:, feature_ids], x_test[:, feature_ids], features
if select_type == 'rfe':
features, feature_ids = select_by_recursive_feature_elimination(x_train, y_train, x_cloumns)
return x_train[:, feature_ids], x_test[:, feature_ids], features
if select_type == 'lasso':
features, feature_ids = select_by_lasso(x_train, y_train, x_cloumns)
return x_train[:, feature_ids], x_test[:, feature_ids], features
if select_type in ['mi', 'f']:
if k is None:
return None, None, None
# configure to select a subset of features
if select_type == 'mi':
fs = SelectKBest(score_func=mutual_info_regression, k=k)
else:
fs = SelectKBest(score_func=f_regression, k=k)
# learn relationship from training dataf_regression
fs.fit(x_train, y_train)
# transform train input data
# print(fs.get_support())
x_train_fs = fs.transform(x_train)
# transform test input data
x_test_fs = fs.transform(x_test)
# 特征的重要性
support = fs.get_support()
index = np.where(support == 1)[0]
return x_train_fs, x_test_fs, np.array(x_cloumns)[index]
return None, None, None
def get_scale_data(x_train, x_test, y_train, y_test):
"""
对数据进行缩放,缩放后的数据用scaler.inverse_transform(test_S)还原
:return:
"""
scale_x = StandardScaler().fit(x_train)
x_train = scale_x.transform(x_train)
x_test = scale_x.transform(x_test)
scale_y = StandardScaler().fit(y_train)
y_train = scale_y.transform(y_train).ravel()
y_test = scale_y.transform(y_test).ravel()
return x_train, x_test, y_train, y_test, scale_y
def get_svr_best_params(x_train, y_train):
"""
网格搜索寻找最优的svr参数
:param x_train:
:param y_train:
:return:
"""
# 创建SVR实例
svr = SVR()
gscv = GridSearchCV(
estimator=svr,
param_grid=svr_params_dict,
n_jobs=2,
scoring='r2',
cv=6)
gscv.fit(x_train, y_train) # 寻找最优参数
best_params = gscv.best_params_
return best_params
def create_model(x_train, y_train, model_type):
if model_type == 'svr':
# 网格搜索寻找最优的svr参数
best_params = get_svr_best_params(x_train, y_train)
print(f"Best svr params are {best_params}.")
kernel = best_params['kernel']
return SVR(C=best_params['C'], kernel=kernel, gamma=best_params['gamma'], epsilon=best_params['epsilon'])
if model_type == 'linear':
return LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None,
normalize=False)
elif model_type == 'rf':
return RandomForestRegressor(n_estimators=50, oob_score=True, random_state=100)
else:
return None
def find(arr, min, max):
"""
用于查找预测数据落在±50mm范围的数据
:param arr:
:param min:
:param max:
:return:
"""
pos_min = arr >= min
pos_max = arr <= max
pos_rst = pos_min & pos_max
return np.where(pos_rst == True) # where的返回值刚好可以用[]来进行元素提取
def model_fit_and_score(x_train_fs, y_train, x_test_fs, y_test, model_type, selected_features, scale_y):
model = create_model(x_train_fs, y_train, model_type)
if model is None:
print(f"Currently can't predicate by this model type.")
return
model.fit(x_train_fs, y_train)
# evaluate the model
y_predicate = model.predict(x_test_fs)
# importance = model.coef_
y_test = scale_y.inverse_transform(y_test)
y_predicate = scale_y.inverse_transform(y_predicate)
tmp = y_predicate - y_test
print("误差±50mm的数量和百分比:", len(find(tmp, -50.0, 50.0)[0]), len(tmp), len(find(tmp, -50.0, 50.0)[0]) * 100 / len(tmp))
# evaluate predictions:explained_variance_score, r2_score,mean_absolute_error
r2score = r2_score(y_test, y_predicate)
adjusted_r2squared = get_adjusted_r_squared(x_test_fs, y_test, r2score)
print(
f'Test R^2 of selected features {selected_features} is {r2score}, \nand adjusted_R^2 is {adjusted_r2squared}.')
def svr_predicate_with_full_feature(x_array, y_value, y_name, feature_names, model_type, test_size=0.2):
"""
svr回归预测
:param x_array:
:param y_value:
:param y_name:
:param feature_names:
:param model_type:
:param test_size:
:return:
"""
# 按照比例分成测试语料和训练语料
x_train, x_test, y_train, y_test = train_test_split(x_array, y_value, test_size=test_size, random_state=999)
y_train = np.array(y_train).reshape(-1, 1)
y_test = np.array(y_test).reshape(-1, 1)
# 数据缩放
x_train, x_test, y_train, y_test, scale_y = get_scale_data(x_train, x_test, y_train, y_test)
model = create_model(x_train, y_train, model_type)
if model is None:
print(f"Currently can't predicate by this model type.")
return
model.fit(x_train, y_train)
y_predicate = model.predict(x_test)
# evaluate predictions
score = r2_score(y_test, y_predicate)
print(f'Test R^2 of {y_name} by features {feature_names[:-1]} is {score}')
def svr_predicate_with_feature_select(x_array, y_array, x_cloumns, y_column, model_type,
select_type, test_size=0.2, selected_feature=None):
"""
:param x_array:
:param y_value:
:param feature_names:
:param model_type:
:param k_num:
:param test_size:
:param select_type:
:return:
"""
# x_train, x_test, y_train, y_test = train_test_split(x_array, y_value, test_size=test_size)
n_train = int(x_array.shape[0] * (1 - test_size))
x_train = x_array[:n_train, :]
x_test = x_array[n_train:, :]
y_train = y_array[:n_train]
y_test = y_array[n_train:]
# 分隔输入X和输出y
# print("测试前:", y_test)
y_train = np.array(y_train).reshape(-1, 1)
y_test = np.array(y_test).reshape(-1, 1)
# 数据缩放
x_train, x_test, y_train, y_test, scale_y = get_scale_data(x_train, x_test, y_train, y_test)
if selected_feature is not None:
X_df = pd.DataFrame(data=x_array, columns=x_cloumns)
feature_ids = column_index(X_df, selected_feature)
model_fit_and_score(x_train[:, feature_ids], y_train, x_test[:, feature_ids], y_test, model_type,
selected_feature, scale_y)
return
if select_type is None or select_type not in ['f', 'mi', 'backward', 'rfe', 'lasso']:
print("The wrong feature select type!")
return
# 特征选择
if select_type in ['f', 'mi']:
k_num = [i for i in range(1, x_array.shape[1] + 1)]
# k_num = [i for i in range(1, 6)]
# k_num = [x_array.shape[1]]
for e in k_num:
x_train_fs, x_test_fs, features = select_features(x_train, y_train, x_test, x_cloumns, y_column,
select_type, k=e)
model_fit_and_score(x_train_fs, y_train, x_test_fs, y_test, model_type, features, scale_y)
print("\n")
else:
x_train_fs, x_test_fs, features = select_features(x_train, y_train, x_test, x_cloumns, y_column,
select_type)
model_fit_and_score(x_train_fs, y_train, x_test_fs, y_test, model_type, features, scale_y)
def outlier_treatment_by_median(df_data):
for column in df_data.columns.tolist():
if np.abs(df_data[column].skew()) > 1:
# median = df_data[column].quantile(0.5)
# _95percentile = df_data[column].quantile(0.95)
# # print(median, _95percentile)
# df[column] = np.where(df[column] >= _95percentile, median, df[column])
flooring = df_data[column].quantile(0.1)
capping = df_data[column].quantile(0.9)
df_data[column] = np.where(df_data[column] < flooring, flooring, df_data[column])
df_data[column] = np.where(df_data[column] > capping, capping, df_data[column])
return df_data
# df = outlier_treatment_by_median(df)
# print(df.corr()['P'].sort_values())
if __name__ == '__main__':
df = pd.read_excel("D:/工作项目/鸿玖/中泰数据 min级汇总/按分钟汇总数据/1min采样/C电极的参数汇总_1min__增加炉底温度.xlsx", sheet_name='Sheet1')
# 剔除电流异常数据
df.drop([894, 895, 896, 897, 898, 899, 900, 901, 903], inplace=True)
X = df[['焦炭实际配料值', '兰炭实际配料值', '石灰二实际配料值',
'石灰一实际配料值', '电流_电极电流 KA_IA',
'电流_电极电流 KA_IB', '电流_电极电流 KA_IC', '电压_电极电压 V_UA', '电压_电极电压 V_UB',
'电压_电极电压 V_UC', '功率_电极功率_Pa', '功率_电极功率_Pb', '功率_电极功率_Pc',
'功率_无功功率', '功率_有功功率', '温度_炉 底 ℃_3', '加热元件温度C']].values
Y = df['C电极长度mm(计算)'].values
svr_predicate_with_feature_select(X, Y, ['焦炭实际配料值', '兰炭实际配料值', '石灰二实际配料值',
'石灰一实际配料值', '电流_电极电流 KA_IA',
'电流_电极电流 KA_IB', '电流_电极电流 KA_IC', '电压_电极电压 V_UA', '电压_电极电压 V_UB',
'电压_电极电压 V_UC', '功率_电极功率_Pa', '功率_电极功率_Pb', '功率_电极功率_Pc',
'功率_无功功率', '功率_有功功率', '温度_炉 底 ℃_3', '加热元件温度C'], ['C电极长度mm(计算)'],
'rf',
'mi', test_size=0.3, selected_feature=None)
""""
linear预测明显效果优于svr
"""
| UTF-8 | Python | false | false | 11,415 | py | 16 | model_analysis.py | 15 | 0.59524 | 0.584052 | 0 | 298 | 34.392617 | 119 |
zodiac/competitive-programming | 1,769,526,567,929 | df9b2b798639a0479384bc379cf17c66edcc2f41 | 7da5ffee8fe9722fd06d28757dd5dfc57d2806ac | /codeforces/E99/C/C.py | 4c2ad6b9cb6d505b3b7190867e96e49760d7428a | []
| no_license | https://github.com/zodiac/competitive-programming | 1609aa8bc3ab69894eecad45f2ce9d798ddd6d92 | a35a2c0a10b5f2b0cd4a2cfa5926b6ebf5bff03f | refs/heads/master | 2020-12-29T02:49:36.244457 | 2020-12-28T14:34:40 | 2020-12-28T14:34:40 | 1,388,667 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env pypy3
T = int(input())
for _ in range(T):
x, y = input().split()
x = int(x)
y = int(y)
print(x-1, y)
| UTF-8 | Python | false | false | 121 | py | 832 | C.py | 811 | 0.528926 | 0.512397 | 0 | 8 | 14.125 | 23 |
janion/CARMA | 16,716,012,723,377 | 8e15fa82325d735cc5fa1fec7aa02f05f86171cf | 41e42c52e502e67ea06fcf66b7bf1acb1bd30be7 | /CARMA/src/backend/event/events/CheckSignalEvent.py | 50c30ca1264eb076f596ab73d56b698d00ca6aae | []
| no_license | https://github.com/janion/CARMA | 2e7840d9f55df0cccd0004e3a4a182a341f8fbf0 | 55bfafc2465371e2b0e37deda5a955e47d202270 | refs/heads/master | 2021-01-10T23:16:27.224636 | 2016-10-03T17:52:03 | 2016-10-03T17:52:03 | 70,622,618 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on 27 Sep 2016
@author: Janion
'''
class CheckSignalEvent():
'''
Event which relates to a train needing to check a signal.
'''
CODE = "C"
def __init__(self, sectionName):
'''
Constructor
'''
self.entityName = sectionName
################################################################################
def getEntityName(self):
return self.entityName
################################################################################
@staticmethod
def isToHardware():
return False;
################################################################################
@staticmethod
def isEvent(message):
return message.index(CheckSignalEvent.CODE) != -1
| UTF-8 | Python | false | false | 819 | py | 17 | CheckSignalEvent.py | 16 | 0.373626 | 0.365079 | 0 | 35 | 21.285714 | 80 |
SLLittrell/GameRater-server | 1,133,871,392,209 | 719636f364a112f4890d7746305d3ed0af289d9e | 15ae6080ac1ba504ec2c19dd02edc6ac585e13cb | /raterprojectapi/views/game.py | 729dae9aecf94f0625a3d43e312e7cdacb2e9709 | []
| no_license | https://github.com/SLLittrell/GameRater-server | 1d824df4d662567a944c2697c43e1a890af6ad1d | dbd55ab096408b738ade7c3407ad71bdcbd7a541 | refs/heads/main | 2023-05-09T00:24:43.183349 | 2021-05-26T19:05:03 | 2021-05-26T19:05:03 | 364,363,110 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.core.exceptions import ValidationError
from rest_framework import status
from django.http import HttpResponseServerError
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from rest_framework import serializers
from rest_framework import status
from raterprojectapi.models import Game, Player, Category, Review
class GameViewSet(ViewSet):
def create(self, request):
creator = Player.objects.get(user=request.auth.user)
game = Game()
game.title = request.data['title']
game.description = request.data['description']
game.release_year = request.data['releaseYear']
game.number_players = request.data['numberPlayers']
game.time_to_play = request.data['timeToPlay']
game.age = request.data['age']
game.creator = creator
try:
game.save()
categories = Category.objects.in_bulk(request.data['categories'])
game.categories.set(categories)
serializer = GameSerializer(game, context={'request': request})
return Response(serializer.data, status=status.HTTP_201_CREATED)
except ValidationError as ex:
return Response({"reason": ex.message}, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
try:
game = Game.objects.get(pk=pk)
serializer = GameSerializer(game, context={'request': request})
return Response(serializer.data)
except Exception as ex:
return HttpResponseServerError(ex)
def update(self, request, pk=None):
game = Game()
creator = Player.objects.get(user=request.auth.user)
if creator is not game.creator:
return Response({}, status=status.HTTP_403_FORBIDDEN)
game.title = request.data['title']
game.description = request.data['description']
game.release_year = request.data['releaseYear']
game.number_players = request.data['numberPlayers']
game.time_to_play = request.data['timeToPlay']
game.age = request.data['age']
game.creator = creator
categories = Category.objects.in_bulk(request.data['categories'])
game.categories.set(categories)
game.save()
return Response({}, status=status.HTTP_204_NO_CONTENT)
def destroy(self, request, pk=None):
try:
game = Game.objects.get(pk=pk)
game.delete()
return Response({}, status=status.HTTP_204_NO_CONTENT)
except Game.DoesNotExist as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)
except Exception as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def list(self, request):
games = Game.objects.all()
serializer = GameSerializer(
games, many=True, context={'request': request})
return Response(serializer.data)
class GameSerializer(serializers.ModelSerializer):
class Meta:
model = Game
fields = ('id', 'title', 'description', 'release_year', 'number_players', 'time_to_play', 'age', 'creator', 'categories', 'average_rating')
depth = 1
| UTF-8 | Python | false | false | 3,320 | py | 15 | game.py | 13 | 0.636747 | 0.629518 | 0 | 98 | 32.867347 | 147 |
darkoob12/POSTagger | 14,439,680,077,343 | d01a8bf21c7e5ae11c7e04d016e3c0554ab30544 | 08c7d5fadbfce44166f29d23fe8734e7e87c6094 | /program.py | c7097067b2e94a459228bce6238c3b5e846559cc | []
| no_license | https://github.com/darkoob12/POSTagger | 0ae08cf0fcd3ecbe6c1ad42e31a56b4c7158e933 | 8de77008f28661a1e7d600dae9ab2e859da1d424 | refs/heads/master | 2021-01-16T17:36:53.901293 | 2017-08-11T08:10:53 | 2017-08-11T08:10:53 | 100,008,605 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from POSTagger import *
import time
import random
def read_data(path):
""" str -> list
loads data from a csv file
removes redundant info
:param path:
:return: list of sentences
"""
data = []
with open(path, 'r') as f:
sentence = []
for line in f:
a = line.split(' ')
if len(a) < 3: # end of sentence
data.append(sentence)
sentence.insert(0, ['', '##'])
if sentence[-1][0] != '.':
sentence.append(['.', '.'])
sentence = []
else:
sentence.append([a[0].strip(), a[1].strip()])
return data
def concat_sentences(corpus):
seq = ['']
for sent in corpus:
words = [p[0] for p in sent]
seq.extend(words[1:-1])
seq.append('.')
return seq
def get_words(data):
ret = []
for s in data:
ret.append([p[0] for p in s])
return ret
def read_tags(path):
t = []
with open(path, 'r') as f:
for line in f:
tokens = line.strip().split(',')
t.append(tokens[0])
return t
def check_eol(data):
dot_count = 0
anomaly_count = 0
for sentence in data:
if sentence[-1][0] == '.':
dot_count += 1
else:
anomaly_count += 1
print("{0} out of {1} has '.' as their terminal state.".format(dot_count, len(data)))
def concat_chunks(record):
""" list -> str
convert words of a sentence to its values
:param record: list of words and their pos tag
:return: a sentence
"""
words = [x[0] for x in record]
return ' '.join(words)
def test_model(mod, data):
total = 0 # total number of words
correct = 0 # correctly tagged
s = np.zeros(len(data))
i = 0
for sentence in data:
ws = []
ts = []
for chunk in sentence:
ws.append(chunk[0])
ts.append(chunk[1])
predicted_tags = mod.decode(ws)
c = np.sum(np.array(ts[1:]) == np.array(predicted_tags[1:]))
correct += c
l = len(sentence) - 1
total += l
s[i] = c / l
print("{0} : [length => {1}, corrects => {2}] | Accuracy => {3:.2%}".format(i, l, c, s[i]))
i += 1
word_acc = correct / total
print("per word accuracy on test data is : {:.2%}".format(word_acc))
sentence_acc = np.mean(s)
print("average sentence accuracy on test data is : {:.2%}".format(sentence_acc))
return word_acc, sentence_acc
def experiment_p1():
train = read_data('train.txt')
test = read_data('test.txt')
dic = TextDictionary()
dic.add(train)
dic.add(test)
model = HMM(dic.get_tags(), dic.get_words())
tic = time.time()
model.estimate(train) # estimate the probabilities
toc = time.time()
print('estimation time: {0:.2f}'.format(toc - tic))
model.save()
test_model(model, test)
def experiment_p2():
train = read_data('train.txt')
test = read_data('test.txt')
dic = TextDictionary()
dic.add(train)
dic.add(test)
model = HMM(dic.get_tags(), dic.get_words())
model.rand_init()
random.shuffle(train)
model.estimate(train[:-10])
tic = time.time()
model.train(concat_sentences(train[-10:]), max_iter=50)
toc = time.time()
print('learning time: {0:.2f}'.format(toc - tic))
model.save()
test_model(model, test)
if __name__ == '__main__':
experiment_p2()
print('hello, world!')
| UTF-8 | Python | false | false | 3,498 | py | 3 | program.py | 2 | 0.534019 | 0.520011 | 0 | 139 | 24.165468 | 99 |
Ashyaa/aoc2020 | 6,657,199,332,578 | fdeda4671d22fb88224d67b201b1c1c7918d9a36 | e03c4147acf427ef26b73ad2c26e7a0fcb5f8de4 | /2021/day20/solution.py | 40e4764a267f69d5662e8871adaafa61be9ac517 | []
| no_license | https://github.com/Ashyaa/aoc2020 | 22b5051d3e77155b93e54df553c852d60f3f4767 | 844548cd99050f5227bd17ef13b88286cdce60a4 | refs/heads/main | 2023-02-21T04:30:05.618050 | 2022-12-25T09:21:41 | 2022-12-25T09:21:41 | 318,306,558 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import contextlib
from pathlib import Path
from typing import *
import numpy as np
from AoC.util import show
CWD = Path(__file__).parent
def read_input(filename: str = "input.txt") -> Tuple[str, np.ndarray]:
input_file = CWD.joinpath(filename)
with open(input_file, "r") as reader:
s = reader.readline().strip().replace('.', '0').replace('#', '1')
reader.readline()
res = []
for l in reader.readlines():
ll = l.strip().replace('.', '0').replace('#', '1')
res.append(np.array([c for c in ll], dtype=str))
res = np.array(res)
return s, res
def step(s: str, arr: np.ndarray, i: int) -> np.ndarray:
inverted = s[0] == '1' and s[-1] == '0'
cons = i%2 if inverted else 0
arr = np.pad(arr, ((2, 2), (2, 2)), 'constant', constant_values=cons)
if inverted and not(i%2):
res = np.ones(arr.shape, dtype=int).astype(str)
else:
res = np.zeros(arr.shape, dtype=int).astype(str)
for i in range(1, arr.shape[0]-1):
for j in range(1, arr.shape[1]-1):
idx = int("".join(arr[i-1:i+2, j-1:j+2].flatten()), 2)
res[i,j] = s[idx]
return res
@show
def first(s: str, inp: np.ndarray, steps: int) -> None:
arr = inp.copy()
for i in range(steps):
arr = step(s, arr, i)
return arr.astype(int).sum()
def test_example() -> None:
with contextlib.redirect_stdout(None):
s, inp = read_input("example.txt")
assert first(s, inp, 2) == 35
assert first(s, inp, 50) == 3351
if __name__ == "__main__":
test_example()
s, inp = read_input()
first(s, inp, 2) # 5065
first(s, inp, 50) # 14790
| UTF-8 | Python | false | false | 1,708 | py | 126 | solution.py | 123 | 0.556206 | 0.528103 | 0 | 60 | 27.466667 | 73 |
YANH216/myweb | 16,475,494,591,582 | 84d9db9b1020fc3b752906969d549354267a6703 | 95913618293ab052c7f75fe0411adae8379c4c3a | /SitesApp/migrations/0003_auto_20180605_1307.py | 9bac1c31b74b0342ca7c195acde009b4d94d1477 | []
| no_license | https://github.com/YANH216/myweb | 6553f9028c8e23fe57c9988fb94e2c88f5b44886 | c24600a99e0e6c3c1266251c2f6f18caed17f83c | refs/heads/master | 2023-06-27T03:18:13.718437 | 2021-07-29T09:19:28 | 2021-07-29T09:19:28 | 390,667,214 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-06-05 05:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('SitesApp', '0002_auto_20180605_1128'),
]
operations = [
migrations.AlterField(
model_name='candidate',
name='cEmail',
field=models.CharField(blank=True, default=None, max_length=20, null=True),
),
migrations.AlterField(
model_name='user',
name='uEmail',
field=models.CharField(blank=True, default=None, max_length=20, null=True),
),
]
| UTF-8 | Python | false | false | 680 | py | 45 | 0003_auto_20180605_1307.py | 28 | 0.591176 | 0.536765 | 0 | 25 | 26.2 | 87 |
larsbak/fletch | 8,624,294,346,937 | ce4922248d60fbd850cf516c699aa8d31fd3100b | 4fffc216d36563b5db4a5ef230d95b6bda3be0e4 | /src/vm/vm.gyp | 6262c463f1339822da528907eb156cf599a81a51 | [
"BSD-3-Clause"
]
| permissive | https://github.com/larsbak/fletch | e18ec6cdfe306f75553638433109a2549b589c7a | 6ca18fb7c55f052ef5537c650025811434b9a323 | refs/heads/master | 2018-05-19T05:37:25.789701 | 2015-01-29T11:19:50 | 2015-01-29T11:19:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright (c) 2015, the Fletch project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE.md file.
{
'target_defaults': {
'include_dirs': [
'../../',
],
'actions': [
{
# TODO(ahe): Move to .gypi file that is included by the other .gyp
# files.
'action_name': 'lint_>(_target_name)',
'inputs': [
'../../third_party/cpplint/cpplint.py',
'>@(_sources)',
],
'outputs': [ '>(INTERMEDIATE_DIR)/lint_>(_target_name).log' ],
'action': [
"bash", "-c",
"python >(_inputs) && "
"LANG=POSIX date '+Lint checked on %+' > <(_outputs)",
],
},
]
},
'targets': [
{
'target_name': 'fletch_vm_base',
'type': 'static_library',
'dependencies': [
'../shared/shared.gyp:fletch_shared',
'../double_conversion.gyp:double_conversion',
],
'sources': [
# TODO(ahe): Add header (.h) files.
'assembler_x86.cc',
'assembler_x86_macos.cc',
'assembler_x64.cc',
'assembler_x64_macos.cc',
'event_handler.cc',
'event_handler_macos.cc',
'ffi.cc',
'fletch.cc',
'fletch_api_impl.cc',
'heap.cc',
'interpreter.cc',
'intrinsics.cc',
'lookup_cache.cc',
'natives.cc',
'object.cc',
'object_list.cc',
'object_map.cc',
'object_memory.cc',
'platform_posix.cc',
'port.cc',
'process.cc',
'program.cc',
'scheduler.cc',
'service_api_impl.cc',
'session.cc',
'snapshot.cc',
'stack_walker.cc',
'thread_pool.cc',
'thread_posix.cc',
'weak_pointer.cc',
],
},
{
'target_name': 'fletch_vm',
'type': 'static_library',
'dependencies': [
'fletch_vm_base',
'../shared/shared.gyp:fletch_shared',
'../double_conversion.gyp:double_conversion',
],
'sources': [
'<(INTERMEDIATE_DIR)/generated.S',
],
'actions': [
{
'action_name': 'generate_generated_S',
'inputs': [
'<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)'
'fletch_vm_generator'
'<(EXECUTABLE_SUFFIX)',
],
'outputs': [
'<(INTERMEDIATE_DIR)/generated.S',
],
'action': [
# TODO(ahe): Change generator to accept command line argument for
# output file. Using file redirection may not work well on Windows.
'bash', '-c', '<(_inputs) > <(_outputs)',
],
},
],
},
{
'target_name': 'fletch_vm_generator',
'type': 'executable',
'dependencies': [
'fletch_vm_base',
'../shared/shared.gyp:fletch_shared',
'../double_conversion.gyp:double_conversion',
],
'sources': [
# TODO(ahe): Add header (.h) files.
'generator.cc',
],
},
{
'target_name': 'fletch',
'type': 'executable',
'dependencies': [
'fletch_vm',
],
'sources': [
# TODO(ahe): Add header (.h) files.
'main.cc',
],
},
{
'target_name': 'vm_run_tests',
'type': 'executable',
'dependencies': [
'fletch_vm',
],
'defines': [
'TESTING',
# TODO(ahe): Remove this when GYP is the default.
'GYP',
],
'sources': [
# TODO(ahe): Add header (.h) files.
'foreign_ports_test.cc',
'object_map_test.cc',
'object_memory_test.cc',
'object_test.cc',
'platform_test.cc',
'../shared/test_main.cc',
],
},
],
}
| UTF-8 | Python | false | false | 3,846 | gyp | 58 | vm.gyp | 26 | 0.471659 | 0.468539 | 0 | 147 | 25.163265 | 79 |
LuisHVieira/valorant_games | 11,269,994,186,642 | 1fbde5dfabbc5a903b62e336d4dcf1a817572e06 | d054c44279a959f03fc76147f4dde75b20dd5f6a | /datasValorant.py | 0710f807c0d508a259d9b0c3fbbadd6e800b4c9f | []
| no_license | https://github.com/LuisHVieira/valorant_games | 03f5991083a3d0394f1acd072be3cfb13d634172 | 8e8af388a607ff4c76aede1c7d52b7aed549097e | refs/heads/master | 2023-08-15T03:23:25.527484 | 2021-09-30T23:54:14 | 2021-09-30T23:54:14 | 411,813,213 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import json
import datetime
import embeds as e
import discord
def get_games_today():
#Get current date and convert from string
today = datetime.datetime.now()
year = str(today.year)
month = str(today.month) if today.month >= 10 else '0' + str(today.month)
day = str(today.day) if today.day >= 10 else '0' + str(today.day)
#String complete and only date formartBR
dateBr = day + '/' + month + '/' + year
stringRangeToday = year + '-' + month + '-' + day + 'T'
#Request
linkRequest = 'https://api.pandascore.co/valorant/matches/upcoming?&range[begin_at]=' + stringRangeToday + '00:00:00Z,' + stringRangeToday + '23:59:59Z&token=CZ2gKXBwL8LYIKbj_VzKC8cjujXZcVF-vlwPf8P3qC-GcHnSoRQ'
#rTest = 'https://api.pandascore.co/valorant/matches/upcoming?&range[begin_at]=2021-09-13T00:00:00Z,2021-09-13T23:59:59Z&token=CZ2gKXBwL8LYIKbj_VzKC8cjujXZcVF-vlwPf8P3qC-GcHnSoRQ'
r = requests.get(linkRequest)
responseJson = r.json()
#list and count
a = []
count = 0
count2 = 0
for i in responseJson:
idC = count
dateTime = i['begin_at']
serieName = i['serie']['name']
thumb = i['league']['image_url']
idSerie = i['serie']['id']
hour = int(dateTime[11:13]) - 3
minSec = dateTime[13:16]
dateTimeFormated = dateBr + ' - ' + str(hour) + minSec
datas = dict(id=idC, serieName=serieName, dateTime=dateTimeFormated, thumb=thumb, serieID=idSerie)
for j in i['opponents']:
teamName = j['opponent']['name']
idTeam = j['opponent']['id']
if count % 2 == 0:
teams = dict(teamName0=teamName, id0=idTeam)
else:
teams = dict(teamName1=teamName, id1=idTeam)
datas.update(teams)
count += 1
count2 += 1
a.append(datas)
return a
| UTF-8 | Python | false | false | 1,701 | py | 7 | datasValorant.py | 3 | 0.673721 | 0.628454 | 0 | 65 | 25.153846 | 211 |
chaitualuru/algorithms | 10,196,252,397,575 | aa80d07c26f450f42b4189a9755446298101607c | 19241f750f9bef7f2d8529bb6d3096eea46d83b5 | /eip/sudoku_checker.py | 095fbd59f128f3ccad973e90e379edb20854d63e | []
| no_license | https://github.com/chaitualuru/algorithms | ad372146dc9a286963131a59889ee5c83445db17 | af58921e23f9c97650f87bfadbdeac51f433a249 | refs/heads/master | 2022-07-12T03:58:25.129841 | 2015-08-31T06:00:33 | 2015-08-31T06:00:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Sudoku Checker"""
def sudoku_checker(A):
for i in range(len(A)):
if not valid_line(A[i]):
return False
unzipped_A = list(zip(*A))
for j in range(len(unzipped_A)):
if not valid_line(unzipped_A[j]):
return False
for k in range(0, len(A), 3):
for l in range(0, len(A), 3):
grid = []
for x in A[k:k+3]:
for y in x[l:l+3]:
grid.append(y)
if not valid_line(grid):
return False
return True
def valid_line(line):
return (len(line) == 9 and sum(line) == sum(set(line)))
def main():
"""Entry point of the program."""
# valid
if sudoku_checker([
[5,3,0,0,7,0,0,0,0],
[6,0,0,1,9,5,0,0,0],
[0,9,8,0,0,0,0,6,0],
[8,0,0,0,6,0,0,0,3],
[4,0,0,8,0,3,0,0,1],
[7,0,0,0,2,0,0,0,6],
[0,6,0,0,0,0,2,8,0],
[0,0,0,4,1,9,0,0,5],
[0,0,0,0,8,0,0,7,9]]):
print "Valid"
else:
print "Not Valid"
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 875 | py | 57 | sudoku_checker.py | 57 | 0.544 | 0.443429 | 0 | 45 | 18.444444 | 56 |
moskalikbogdan/CDV | 5,257,039,992,144 | 1241c1a272387bd5034d0044af2b2351952cd4b6 | a9d4ba71e1c847e0f4a7f26d76ee1fb6c08f8c76 | /Python/Warsztaty/Week02/W02.Z04.2.py | ddfae2e158abb6e57bcba39f1739d0a711780b19 | []
| no_license | https://github.com/moskalikbogdan/CDV | 569c257b779e14c42fe9e1ff19cfd105bb624afb | 90d14a597e66f9e5902ecf2b103baf737ed0e4ea | refs/heads/master | 2020-12-15T01:42:18.842169 | 2020-07-13T00:40:20 | 2020-07-13T00:40:20 | 234,949,055 | 0 | 1 | null | false | 2020-02-09T08:30:12 | 2020-01-19T18:55:48 | 2020-01-21T15:38:26 | 2020-02-09T08:29:32 | 17 | 0 | 0 | 1 | Python | false | false | """
@author: Bogdan Moskalik,
DataScience,
Niestacjonarne,
Grupa 2
"""
import numpy as np
print('''Policzę srednią i odchylenie standardowe z listy którą wprowadzisz
Z ilu elementów ma się składać lista?:
''')
a = int(input())
print('''Wprowadź pojedyńczo elementy twojej listy:''')
x = []
for i in range(0,a):
x.append(int(input('Wprowadź element %i:'%(i+1))))
print ()
print('Średnia twojej listy to:')
print(np.mean(x))
print('Odchylenie standardowe twojej listy to:')
print(round(np.std(x),2)) | UTF-8 | Python | false | false | 531 | py | 43 | W02.Z04.2.py | 36 | 0.684008 | 0.676301 | 0 | 25 | 19.8 | 75 |
m-nosrati/springboard-course | 7,610,682,086,991 | be8e030dbfe6707ba2b450fc7a4d679a8f959f68 | ff986f9509dbdbe843c223f630a82e80e9dbfebc | /machine-learning/naive_bayes/src/naive_bayes.py | 125630359e284d4f2b44167a2543df0d39046f5d | []
| no_license | https://github.com/m-nosrati/springboard-course | 65ce414411525ac49c3bd36e0da00164af75b153 | bd7da238f32bcf6d5716984dab6e887c90ac94ad | refs/heads/master | 2022-02-27T05:55:31.642308 | 2019-08-13T02:06:51 | 2019-08-13T02:06:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #%% [markdown]
# # Basic Text Classification with Naive Bayes submitted by Ahrim Han (2019/3/25)
# ***
# In the mini-project, you'll learn the basics of text analysis using a subset of movie reviews from the rotten tomatoes database. You'll also use a fundamental technique in Bayesian inference, called Naive Bayes. This mini-project is based on [Lab 10 of Harvard's CS109](https://github.com/cs109/2015lab10) class. Please free to go to the original lab for additional exercises and solutions.
#%%
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from six.moves import range
# Setup Pandas
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
# Setup Seaborn
sns.set_style("whitegrid")
sns.set_context("poster")
#%% [markdown]
# # Table of Contents
#
# * [Rotten Tomatoes Dataset](#Rotten-Tomatoes-Dataset)
# * "fresh" or "rotton": The critics upload their reviews to the movie page on the website, and need to mark their review "fresh" if it's generally favorable or "rotten" otherwise. It is necessary for the critic to do so as some reviews are qualitative and do not grant a numeric score, making it impossible for the system to be automatic.
# * Movies and TV shows are Certified Fresh with a steady Tomatometer of 75% or higher after a set amount of reviews (80 for wide-release movies, 40 for limited-release movies, 20 for TV shows), including 5 reviews from Top Critics.
# * [Explore](#Explore)
# * [The Vector Space Model and a Search Engine](#The-Vector-Space-Model-and-a-Search-Engine)
# * [In Code](#In-Code)
# * [Naive Bayes](#Naive-Bayes)
# * [Multinomial Naive Bayes and Other Likelihood Functions](#Multinomial-Naive-Bayes-and-Other-Likelihood-Functions)
# * [Picking Hyperparameters for Naive Bayes and Text Maintenance](#Picking-Hyperparameters-for-Naive-Bayes-and-Text-Maintenance)
# * [Interpretation](#Interpretation)
#
#%% [markdown]
# ## Rotten Tomatoes Dataset
#%%
critics = pd.read_csv('./critics.csv')
#let's drop rows with missing quotes
critics = critics[~critics.quote.isnull()]
critics.head()
#%% [markdown]
# ### Explore
#%%
n_reviews = len(critics)
n_movies = critics.rtid.unique().size
n_critics = critics.critic.unique().size
print("Number of reviews: {:d}".format(n_reviews))
print("Number of critics: {:d}".format(n_critics))
print("Number of movies: {:d}".format(n_movies))
#%%
critics.fresh.value_counts()
#%%
df = critics.copy()
df['fresh'] = df.fresh == 'fresh'
grp = df.groupby('critic')
counts = grp.critic.count() # number of reviews by each critic
means = grp.fresh.mean() # average freshness for each critic
means[counts > 100].hist(bins=10, edgecolor='w', lw=1)
plt.xlabel("Average Rating per critic")
plt.ylabel("Number of Critics")
plt.yticks([0, 2, 4, 6, 8, 10]);
#%% [markdown]
# <div class="span5 alert alert-info">
# <h3>Exercise Set I</h3>
# <br/>
# <b>Exercise:</b> Look at the histogram above. Tell a story about the average ratings per critic. What shape does the distribution look like? What is interesting about the distribution? What might explain these interesting things?
# </div>
#%% [markdown]
# The distribution looks roughly normal with a strange gap around 0.55. Except the gap, the distribution definitely look normal. There appear to be two distinct groups of raters: harsh raters and generous raters.
#%% [markdown]
# ## The Vector Space Model and a Search Engine
#%% [markdown]
# All the diagrams here are snipped from [*Introduction to Information Retrieval* by Manning et. al.]( http://nlp.stanford.edu/IR-book/) which is a great resource on text processing. For additional information on text mining and natural language processing, see [*Foundations of Statistical Natural Language Processing* by Manning and Schutze](http://nlp.stanford.edu/fsnlp/).
#
# Also check out Python packages [`nltk`](http://www.nltk.org/), [`spaCy`](https://spacy.io/), [`pattern`](http://www.clips.ua.ac.be/pattern), and their associated resources. Also see [`word2vec`](https://en.wikipedia.org/wiki/Word2vec).
#
# Let us define the vector derived from document $d$ by $\bar V(d)$. What does this mean? Each document is treated as a vector containing information about the words contained in it. Each vector has the same length and each entry "slot" in the vector contains some kind of data about the words that appear in the document such as presence/absence (1/0), count (an integer) or some other statistic. Each vector has the same length because each document shared the same vocabulary across the full collection of documents -- this collection is called a *corpus*.
#
# To define the vocabulary, we take a union of all words we have seen in all documents. We then just associate an array index with them. So "hello" may be at index 5 and "world" at index 99.
#
# Suppose we have the following corpus:
#
# `A Fox one day spied a beautiful bunch of ripe grapes hanging from a vine trained along the branches of a tree. The grapes seemed ready to burst with juice, and the Fox's mouth watered as he gazed longingly at them.`
#
# Suppose we treat each sentence as a document $d$. The vocabulary (often called the *lexicon*) is the following:
#
# $V = \left\{\right.$ `a, along, and, as, at, beautiful, branches, bunch, burst, day, fox, fox's, from, gazed, grapes, hanging, he, juice, longingly, mouth, of, one, ready, ripe, seemed, spied, the, them, to, trained, tree, vine, watered, with`$\left.\right\}$
#
# Then the document
#
# `A Fox one day spied a beautiful bunch of ripe grapes hanging from a vine trained along the branches of a tree`
#
# may be represented as the following sparse vector of word counts:
#
# $$\bar V(d) = \left( 4,1,0,0,0,1,1,1,0,1,1,0,1,0,1,1,0,0,0,0,2,1,0,1,0,0,1,0,0,1,1,1,0,0 \right)$$
#
# or more succinctly as
#
# `[(0, 4), (1, 1), (5, 1), (6, 1), (7, 1), (9, 1), (10, 1), (12, 1), (14, 1), (15, 1), (20, 2), (21, 1), (23, 1),`
# `(26, 1), (29,1), (30, 1), (31, 1)]`
#
# along with a dictionary
#
# ``
# {
# 0: a, 1: along, 5: beautiful, 6: branches, 7: bunch, 9: day, 10: fox, 12: from, 14: grapes, 15: hanging, 19: mouth, 20: of, 21: one, 23: ripe, 24: seemed, 25: spied, 26: the, 29:trained, 30: tree, 31: vine,
# }
# ``
#
# Then, a set of documents becomes, in the usual `sklearn` style, a sparse matrix with rows being sparse arrays representing documents and columns representing the features/words in the vocabulary.
#
# Notice that this representation loses the relative ordering of the terms in the document. That is "cat ate rat" and "rat ate cat" are the same. Thus, this representation is also known as the Bag-Of-Words representation.
#
# Here is another example, from the book quoted above, although the matrix is transposed here so that documents are columns:
#
# 
#
# Such a matrix is also catted a Term-Document Matrix. Here, the terms being indexed could be stemmed before indexing; for instance, `jealous` and `jealousy` after stemming are the same feature. One could also make use of other "Natural Language Processing" transformations in constructing the vocabulary. We could use Lemmatization, which reduces words to lemmas: work, working, worked would all reduce to work. We could remove "stopwords" from our vocabulary, such as common words like "the". We could look for particular parts of speech, such as adjectives. This is often done in Sentiment Analysis. And so on. It all depends on our application.
#
# From the book:
# >The standard way of quantifying the similarity between two documents $d_1$ and $d_2$ is to compute the cosine similarity of their vector representations $\bar V(d_1)$ and $\bar V(d_2)$:
#
# $$S_{12} = \frac{\bar V(d_1) \cdot \bar V(d_2)}{|\bar V(d_1)| \times |\bar V(d_2)|}$$
#
# 
#
#
# >There is a far more compelling reason to represent documents as vectors: we can also view a query as a vector. Consider the query q = jealous gossip. This query turns into the unit vector $\bar V(q)$ = (0, 0.707, 0.707) on the three coordinates below.
#
# 
#
# >The key idea now: to assign to each document d a score equal to the dot product:
#
# $$\bar V(q) \cdot \bar V(d)$$
#
# Then we can use this simple Vector Model as a Search engine.
#%% [markdown]
# ### In Code
#%%
from sklearn.feature_extraction.text import CountVectorizer
text = ['Hop on pop', 'Hop off pop', 'Hop Hop hop']
print("Original text is\n{}".format('\n'.join(text)))
vectorizer = CountVectorizer(min_df=0)
# call `fit` to build the vocabulary
vectorizer.fit(text)
# call `transform` to convert text to a bag of words
x = vectorizer.transform(text)
# CountVectorizer uses a sparse array to save memory, but it's easier in this assignment to
# convert back to a "normal" numpy array
x = x.toarray()
print("")
print("Transformed text vector is \n{}".format(x))
# `get_feature_names` tracks which word is associated with each column of the transformed x
print("")
print("Words for each feature:")
print(vectorizer.get_feature_names())
# Notice that the bag of words treatment doesn't preserve information about the *order* of words,
# just their frequency
#%% [markdown]
# [Coordinate Format (COO)](https://scipy-lectures.org/advanced/scipy_sparse/coo_matrix.html)
#%%
def make_xy(critics, vectorizer=None):
#Your code here
if vectorizer is None:
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(critics.quote)
X = X.tocsc() # some versions of sklearn return COO format
y = (critics.fresh == 'fresh').values.astype(np.int)
return X, y
X, y = make_xy(critics)
#%% [markdown]
# ## Naive Bayes
#%% [markdown]
# From Bayes' Theorem, we have that
#
# $$P(c \vert f) = \frac{P(c \cap f)}{P(f)}$$
#
# where $c$ represents a *class* or category, and $f$ represents a feature vector, such as $\bar V(d)$ as above. **We are computing the probability that a document (or whatever we are classifying) belongs to category *c* given the features in the document.** $P(f)$ is really just a normalization constant, so the literature usually writes Bayes' Theorem in context of Naive Bayes as
#
# $$P(c \vert f) \propto P(f \vert c) P(c) $$
#
# $P(c)$ is called the *prior* and is simply the probability of seeing class $c$. But what is $P(f \vert c)$? This is the probability that we see feature set $f$ given that this document is actually in class $c$. This is called the *likelihood* and comes from the data. One of the major assumptions of the Naive Bayes model is that the features are *conditionally independent* given the class. While the presence of a particular discriminative word may uniquely identify the document as being part of class $c$ and thus violate general feature independence, conditional independence means that the presence of that term is independent of all the other words that appear *within that class*. This is a very important distinction. Recall that if two events are independent, then:
#
# $$P(A \cap B) = P(A) \cdot P(B)$$
#
# Thus, conditional independence implies
#
# $$P(f \vert c) = \prod_i P(f_i | c) $$
#
# where $f_i$ is an individual feature (a word in this example).
#
# To make a classification, we then choose the class $c$ such that $P(c \vert f)$ is maximal.
#
# There is a small caveat when computing these probabilities. For [floating point underflow](http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html) we change the product into a sum by going into log space. This is called the LogSumExp trick. So:
#
# $$\log P(f \vert c) = \sum_i \log P(f_i \vert c) $$
#
# There is another caveat. What if we see a term that didn't exist in the training data? This means that $P(f_i \vert c) = 0$ for that term, and thus $P(f \vert c) = \prod_i P(f_i | c) = 0$, which doesn't help us at all. Instead of using zeros, we add a small negligible value called $\alpha$ to each count. This is called Laplace Smoothing.
#
# $$P(f_i \vert c) = \frac{N_{ic}+\alpha}{N_c + \alpha N_i}$$
#
# where $N_{ic}$ is the number of times feature $i$ was seen in class $c$, $N_c$ is the number of times class $c$ was seen and $N_i$ is the number of times feature $i$ was seen globally. $\alpha$ is sometimes called a regularization parameter.
#%% [markdown]
# ### Multinomial Naive Bayes and Other Likelihood Functions
#
# Since we are modeling word counts, we are using variation of Naive Bayes called Multinomial Naive Bayes. This is because the likelihood function actually takes the form of the multinomial distribution.
#
# $$P(f \vert c) = \frac{\left( \sum_i f_i \right)!}{\prod_i f_i!} \prod_{f_i} P(f_i \vert c)^{f_i} \propto \prod_{i} P(f_i \vert c)$$
#
# where the nasty term out front is absorbed as a normalization constant such that probabilities sum to 1.
#
# There are many other variations of Naive Bayes, all which depend on what type of value $f_i$ takes. If $f_i$ is continuous, we may be able to use *Gaussian Naive Bayes*. First compute the mean and variance for each class $c$. Then the likelihood, $P(f \vert c)$ is given as follows
#
# $$P(f_i = v \vert c) = \frac{1}{\sqrt{2\pi \sigma^2_c}} e^{- \frac{\left( v - \mu_c \right)^2}{2 \sigma^2_c}}$$
#%% [markdown]
# <div class="span5 alert alert-info">
# <h3>Exercise Set II</h3>
#
# <p><b>Exercise:</b> Implement a simple Naive Bayes classifier:</p>
#
# <ol>
# <li> split the data set into a training and test set
# <li> Use `scikit-learn`'s `MultinomialNB()` classifier with default parameters.
# <li> train the classifier over the training set and test on the test set
# <li> print the accuracy scores for both the training and the test sets
# </ol>
#
# What do you notice? Is this a good classifier? If not, why not?
# </div>
#%%
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
def xy_train_test(critics, vectorizer)
#%% [markdown]
# ### Picking Hyperparameters for Naive Bayes and Text Maintenance
#%% [markdown]
# We need to know what value to use for $\alpha$, and we also need to know which words to include in the vocabulary. As mentioned earlier, some words are obvious stopwords. Other words appear so infrequently that they serve as noise, and other words in addition to stopwords appear so frequently that they may also serve as noise.
#%% [markdown]
# First, let's find an appropriate value for `min_df` for the `CountVectorizer`. `min_df` can be either an integer or a float/decimal. If it is an integer, `min_df` represents the minimum number of documents a word must appear in for it to be included in the vocabulary. If it is a float, it represents the minimum *percentage* of documents a word must appear in to be included in the vocabulary. From the documentation:
#%% [markdown]
# >min_df: When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None.
#%% [markdown]
# <div class="span5 alert alert-info">
# <h3>Exercise Set III</h3>
#
# <p><b>Exercise:</b> Construct the cumulative distribution of document frequencies (df). The $x$-axis is a document count $x_i$ and the $y$-axis is the percentage of words that appear less than $x_i$ times. For example, at $x=5$, plot a point representing the percentage or number of words that appear in 5 or fewer documents.</p>
#
# <p><b>Exercise:</b> Look for the point at which the curve begins climbing steeply. This may be a good value for `min_df`. If we were interested in also picking `max_df`, we would likely pick the value where the curve starts to plateau. What value did you choose?</p>
# </div>
#%%
# Your turn.
#%% [markdown]
# The parameter $\alpha$ is chosen to be a small value that simply avoids having zeros in the probability computations. This value can sometimes be chosen arbitrarily with domain expertise, but we will use K-fold cross validation. In K-fold cross-validation, we divide the data into $K$ non-overlapping parts. We train on $K-1$ of the folds and test on the remaining fold. We then iterate, so that each fold serves as the test fold exactly once. The function `cv_score` performs the K-fold cross-validation algorithm for us, but we need to pass a function that measures the performance of the algorithm on each fold.
#%%
from sklearn.model_selection import KFold
def cv_score(clf, X, y, scorefunc):
result = 0.
nfold = 5
for train, test in KFold(nfold).split(X): # split data into train/test groups, 5 times
clf.fit(X[train], y[train]) # fit the classifier, passed is as clf.
result += scorefunc(clf, X[test], y[test]) # evaluate score function on held-out data
return result / nfold # average
#%% [markdown]
# We use the log-likelihood as the score here in `scorefunc`. The higher the log-likelihood, the better. Indeed, what we do in `cv_score` above is to implement the cross-validation part of `GridSearchCV`.
#
# The custom scoring function `scorefunc` allows us to use different metrics depending on the decision risk we care about (precision, accuracy, profit etc.) directly on the validation set. You will often find people using `roc_auc`, precision, recall, or `F1-score` as the scoring function.
#%%
def log_likelihood(clf, x, y):
prob = clf.predict_log_proba(x)
rotten = y == 0
fresh = ~rotten
return prob[rotten, 0].sum() + prob[fresh, 1].sum()
#%% [markdown]
# We'll cross-validate over the regularization parameter $\alpha$.
#%% [markdown]
# Let's set up the train and test masks first, and then we can run the cross-validation procedure.
#%%
from sklearn.model_selection import train_test_split
_, itest = train_test_split(range(critics.shape[0]), train_size=0.7)
mask = np.zeros(critics.shape[0], dtype=np.bool)
mask[itest] = True
#%% [markdown]
# <div class="span5 alert alert-info">
# <h3>Exercise Set IV</h3>
#
# <p><b>Exercise:</b> What does using the function `log_likelihood` as the score mean? What are we trying to optimize for?</p>
#
# <p><b>Exercise:</b> Without writing any code, what do you think would happen if you choose a value of $\alpha$ that is too high?</p>
#
# <p><b>Exercise:</b> Using the skeleton code below, find the best values of the parameter `alpha`, and use the value of `min_df` you chose in the previous exercise set. Use the `cv_score` function above with the `log_likelihood` function for scoring.</p>
# </div>
#%%
from sklearn.naive_bayes import MultinomialNB
#the grid of parameters to search over
alphas = [.1, 1, 5, 10, 50]
best_min_df = None # YOUR TURN: put your value of min_df here.
#Find the best value for alpha and min_df, and the best classifier
best_alpha = None
maxscore=-np.inf
for alpha in alphas:
vectorizer = CountVectorizer(min_df=best_min_df)
Xthis, ythis = make_xy(critics, vectorizer)
Xtrainthis = Xthis[mask]
ytrainthis = ythis[mask]
# your turn
#%%
print("alpha: {}".format(best_alpha))
#%% [markdown]
# <div class="span5 alert alert-info">
# <h3>Exercise Set V: Working with the Best Parameters</h3>
#
# <p><b>Exercise:</b> Using the best value of `alpha` you just found, calculate the accuracy on the training and test sets. Is this classifier better? Why (not)?</p>
#
# </div>
#%%
vectorizer = CountVectorizer(min_df=best_min_df)
X, y = make_xy(critics, vectorizer)
xtrain=X[mask]
ytrain=y[mask]
xtest=X[~mask]
ytest=y[~mask]
clf = MultinomialNB(alpha=best_alpha).fit(xtrain, ytrain)
#your turn. Print the accuracy on the test and training dataset
training_accuracy = clf.score(xtrain, ytrain)
test_accuracy = clf.score(xtest, ytest)
print("Accuracy on training data: {:2f}".format(training_accuracy))
print("Accuracy on test data: {:2f}".format(test_accuracy))
#%%
from sklearn.metrics import confusion_matrix
print(confusion_matrix(ytest, clf.predict(xtest)))
#%% [markdown]
# ## Interpretation
#%% [markdown]
# ### What are the strongly predictive features?
#
# We use a neat trick to identify strongly predictive features (i.e. words).
#
# * first, create a data set such that each row has exactly one feature. This is represented by the identity matrix.
# * use the trained classifier to make predictions on this matrix
# * sort the rows by predicted probabilities, and pick the top and bottom $K$ rows
#%%
words = np.array(vectorizer.get_feature_names())
x = np.eye(xtest.shape[1])
probs = clf.predict_log_proba(x)[:, 0]
ind = np.argsort(probs)
good_words = words[ind[:10]]
bad_words = words[ind[-10:]]
good_prob = probs[ind[:10]]
bad_prob = probs[ind[-10:]]
print("Good words\t P(fresh | word)")
for w, p in zip(good_words, good_prob):
print("{:>20}".format(w), "{:.2f}".format(1 - np.exp(p)))
print("Bad words\t P(fresh | word)")
for w, p in zip(bad_words, bad_prob):
print("{:>20}".format(w), "{:.2f}".format(1 - np.exp(p)))
#%% [markdown]
# <div class="span5 alert alert-info">
# <h3>Exercise Set VI</h3>
#
# <p><b>Exercise:</b> Why does this method work? What does the probability for each row in the identity matrix represent</p>
#
# </div>
#%% [markdown]
# The above exercise is an example of *feature selection*. There are many other feature selection methods. A list of feature selection methods available in `sklearn` is [here](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.feature_selection). The most common feature selection technique for text mining is the chi-squared $\left( \chi^2 \right)$ [method](http://nlp.stanford.edu/IR-book/html/htmledition/feature-selectionchi2-feature-selection-1.html).
#%% [markdown]
# ### Prediction Errors
#
# We can see mis-predictions as well.
#%%
x, y = make_xy(critics, vectorizer)
prob = clf.predict_proba(x)[:, 0]
predict = clf.predict(x)
bad_rotten = np.argsort(prob[y == 0])[:5]
bad_fresh = np.argsort(prob[y == 1])[-5:]
print("Mis-predicted Rotten quotes")
print('---------------------------')
for row in bad_rotten:
print(critics[y == 0].quote.iloc[row])
print("")
print("Mis-predicted Fresh quotes")
print('--------------------------')
for row in bad_fresh:
print(critics[y == 1].quote.iloc[row])
print("")
#%% [markdown]
# <div class="span5 alert alert-info">
# <h3>Exercise Set VII: Predicting the Freshness for a New Review</h3>
# <br/>
# <div>
# <b>Exercise:</b>
# <ul>
# <li> Using your best trained classifier, predict the freshness of the following sentence: *'This movie is not remarkable, touching, or superb in any way'*
# <li> Is the result what you'd expect? Why (not)?
# </ul>
# </div>
# </div>
#%%
#your turn
#%% [markdown]
# ### Aside: TF-IDF Weighting for Term Importance
#
# TF-IDF stands for
#
# `Term-Frequency X Inverse Document Frequency`.
#
# In the standard `CountVectorizer` model above, we used just the term frequency in a document of words in our vocabulary. In TF-IDF, we weight this term frequency by the inverse of its popularity in all documents. For example, if the word "movie" showed up in all the documents, it would not have much predictive value. It could actually be considered a stopword. By weighing its counts by 1 divided by its overall frequency, we downweight it. We can then use this TF-IDF weighted features as inputs to any classifier. **TF-IDF is essentially a measure of term importance, and of how discriminative a word is in a corpus.** There are a variety of nuances involved in computing TF-IDF, mainly involving where to add the smoothing term to avoid division by 0, or log of 0 errors. The formula for TF-IDF in `scikit-learn` differs from that of most textbooks:
#
# $$\mbox{TF-IDF}(t, d) = \mbox{TF}(t, d)\times \mbox{IDF}(t) = n_{td} \log{\left( \frac{\vert D \vert}{\vert d : t \in d \vert} + 1 \right)}$$
#
# where $n_{td}$ is the number of times term $t$ occurs in document $d$, $\vert D \vert$ is the number of documents, and $\vert d : t \in d \vert$ is the number of documents that contain $t$
#%%
# http://scikit-learn.org/dev/modules/feature_extraction.html#text-feature-extraction
# http://scikit-learn.org/dev/modules/classes.html#text-feature-extraction-ref
from sklearn.feature_extraction.text import TfidfVectorizer
tfidfvectorizer = TfidfVectorizer(min_df=1, stop_words='english')
Xtfidf=tfidfvectorizer.fit_transform(critics.quote)
#%% [markdown]
# <div class="span5 alert alert-info">
# <h3>Exercise Set VIII: Enrichment <b>(Optional)</b></h3>
#
# <p>
# There are several additional things we could try. Try some of these as exercises:
# <ol>
# <li> Build a Naive Bayes model where the features are n-grams instead of words. N-grams are phrases containing n words next to each other: a bigram contains 2 words, a trigram contains 3 words, and 6-gram contains 6 words. This is useful because "not good" and "so good" mean very different things. On the other hand, as n increases, the model does not scale well since the feature set becomes more sparse.
# <li> Try a model besides Naive Bayes, one that would allow for interactions between words -- for example, a Random Forest classifier.
# <li> Try adding supplemental features -- information about genre, director, cast, etc.
# <li> Use word2vec or [Latent Dirichlet Allocation](https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation) to group words into topics and use those topics for prediction.
# <li> Use TF-IDF weighting instead of word counts.
# </ol>
# </p>
#
# <b>Exercise:</b> Try at least one of these ideas to improve the model (or any other ideas of your own). Implement here and report on the result.
# </div>
#%%
# Your turn
| UTF-8 | Python | false | false | 25,840 | py | 19 | naive_bayes.py | 5 | 0.713235 | 0.70209 | 0 | 508 | 49.862205 | 857 |
noltron000/CS-1-2_tweet-generator | 5,609,227,289,814 | 77f8235f455e91f8d95a896cd589b58597305a5d | 0c0712ff1c7a06bf75dcc9fbfdac8d6d3b8df876 | /misc/prefactored.py | 10f81c92eacee63b7e195570f07a08ce4cff19d1 | []
| no_license | https://github.com/noltron000/CS-1-2_tweet-generator | fde70ce31188e2143cda06824292218d0adf527e | 11c1afcf81632aa453818e2320937b9854891fa7 | refs/heads/master | 2020-04-05T09:12:06.165258 | 2019-04-14T20:26:16 | 2019-04-14T20:26:16 | 154,562,627 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask
app = Flask(__name__)
import urllib.request # imports internet textfile reader
import random # psuedo-random number generator module
import sys # module allows program to access terminal parameters
import re # the powerful "regular expression" for decompartimentalizing strings
def dictionary_main():
# opens primary dictionary on the machine.
'''
This shouldn't vary too much machine to machine, but it may.
It is the primary dictionary on your machine, so it holds many words.
It isn't originally in list format.
'''
file = open("/usr/share/dict/words", "r")
text = file.read()
file.close()
return text
def publication_random():
book = round((random.random()*49544)+1)
link = f"http://www.gutenberg.org/cache/epub/{book}/pg{book}.txt"
with urllib.request.urlopen(link) as response:
text = response.read()
print("200 SUCCESS")
text = str(text)
text = text.replace("\\r\\n", "")
return text
def publication_example():
# opens primary dictionary on the machine.
'''
It is the primary dictionary on your machine, so it holds many words.
It isn't originally in list format.
'''
file = open("./example.txt", "r")
text = file.read()
file.close()
return text
def user_number(feed):
print(feed)
while True:
try:
number = int(input("enter integer:\n>> "))
assert isinstance(number, int) # throws an error if its not an integer
assert number != None
assert number != ""
return number
except:
print("INVALID INPUT.\nplease try again.\n")
def user_string(feed):
print(feed)
while True:
try:
string = str(input("enter string:\n>> "))
assert isinstance(string, str) # throws an error if its not an string
return string
except:
print("INVALID INPUT.\nplease try again.\n")
def listify_data(input_data):
# takes data and returns its list version.
'''
if its already a list, nothing happens.
if its a string, it uses REGULAR EXPRESSIONS to transform it into a list.
if its an unexpected data type, it throws an assertion error
'''
input_data = str(input_data)
if (type(input_data) is str):
input_data = re.findall(r"[\w']+", input_data)
# I still must configure the thing to remove /n /r /xc3
assert type(input_data) is list
return input_data
def undupli_list(input_list):
# removes duplicates from a list.
output_list = []
for word in input_list:
if word not in output_list:
output_list += [word]
return output_list
def randify_list(input_list, iterations):
# randomly shuffles a list.
'''
picks a random digit, min 0 max current length of input list
pops current digit into output list
the `iterations` variable determines the number of words to output
'''
output_list = []
while len(input_list) > 0 and (len(output_list) < iterations or iterations == 0):
digit_rand = random.randint(0, len(input_list) - 1)
input_rand = input_list.pop(digit_rand)
output_list += [input_rand]
return output_list
def textify_list(input_list):
# turns a list into a space-deliminated string.
'''
output_text is the final return product.
each item is added with a space to output_text.
the final output_text string is returned
'''
output_text = ''
for input_item in input_list:
if output_text != '':
output_text += ' '
output_text += input_item
input_list = output_text
return input_list
def textify_dict(input_dict):
output_text = ''
for w in sorted(input_dict, key=input_dict.get, reverse=False):
if input_dict[w] > 0:
output_text += str(input_dict[w]) + ' ' + str(w) + "\n"
return output_text
def lowerfy_list(input_list):
# takes a list of strings and runs lowercase on each item.
'''
output_text is the final return product.
each item is added with a space to output_text.
the final output_text string is returned
'''
output_list = []
for string in input_list:
string = string.lower()
output_list += [string]
return output_list
def reverse_text(old_word):
# simply reverses a text.
new_word = ""
i = 1
for _ in old_word:
new_word += old_word[len(old_word)-i]
i+=1
return new_word
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
'''
Now setting up specific functions. These will each return complete jobs.
These functions will use the previously declared helpers to make code readable.
'''
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def rearrange(iterations, input_data = dictionary_main()):
data = input_data
data = listify_data(data)
data = lowerfy_list(data)
# data = undupli_list(data) ## This function takes too long!
data = randify_list(data, iterations)
data = textify_list(data)
return(data)
### ANAGRAMS
def anagrams (input_word, input_data = dictionary_main()):
data = input_data
data = listify_data(data)
data = lowerfy_list(data)
new_data = []
i_word = input_word
for v_word in data:
if verify_anagram(i_word,v_word):
new_data += [v_word]
new_data = textify_list(new_data)
return new_data
def verify_anagram(input_word, verify_word):
input_list = list(input_word)
verify_list = list(verify_word)
input_list.sort()
verify_list.sort()
if input_list == verify_list:
return True
else:
return False
### PALINDROMES
def palindromes(input_data = dictionary_main()):
data = input_data
data = listify_data(data)
data = lowerfy_list(data)
new_data = []
for word in data:
if verify_palindrome(word):
new_data += [word]
new_data = undupli_list(new_data)
new_data = textify_list(new_data)
return new_data
def verify_palindrome(input_word):
i = 1
while (i <= len(input_word)/2):
if input_word[i-1] != input_word[-i]:
return False
i += 1
return True
# HISTOGRAM
def histogram(input_data = dictionary_main()):
data = listify_data(input_data)
data = lowerfy_list(data)
# data = undupli_list(data) ## This removes duplicates. If this is correct, there will be no outputs when this is uncommented.
hist = {}
for word in data:
if word in hist:
hist[word] += 1
else:
hist[word] = 1
return hist
def histogram_arrays(input_data = dictionary_main()):
data = listify_data(input_data)
data = lowerfy_list(data)
hist = []
for word in data:
found = False
for item in hist:
if word == item[0]:
found = True
if found:
item[1] += 1
else:
item[0] = word
item[1] = 1
return hist
def display_dict(input_data = dictionary_main()):
hist = histogram(input_data)
output = textify_dict(hist)
return output
def display_weight(input_data):
hist = histogram(input_data)
hist = calculate_weight(hist) # testing calculate weight
string = prettify(hist)
return string
def prettify(input_data):
string = ''
for item in input_data:
string += item
string += ': '
string += str(input_data[item] * 1000)
string += '‰\n'
return string
# WEIGHTED FUNCTION
def calculate_weight(input_dict):
weight_dict = {}
dict_total = 0
for word in input_dict:
dict_total += input_dict[word]
for word in input_dict:
weight_dict[word] = input_dict[word] / dict_total
return weight_dict
def random_choice(weight_dict):
cumulative = 0
rand_select = random.random()
for word in weight_dict:
if (cumulative <= rand_select < weight_dict[word] + cumulative):
print("SELECTED: " + word)
return word
cumulative += weight_dict[word]
print(rand_select)
# for word in weight_dict:
# if weight_dict
### STARTER KIT
# MUST DO RANDOM WEIGHTING
@app.route('/')
def hello_world():
return display_weight(publication_random())
if __name__ == '__main__':
# print("\nIt looks like you used a parameter when you ran this file.\nI'm going to assume that this is a file, and I'm going to run")
entry = user_number("\nI generate several words.\nHow many should I create?")
print("\nRandom Words:\n" + rearrange(entry))
entry = user_string("\nI make anagrams, so I need a word.\nWhat base word should I use today?")
print("\nAnagrams:\n" + anagrams(entry))
print("\nI do palindromes too, but I\ndon't need any inputs for that!\nI'll go ahead and get started.")
print("\nPalindromes:\n" + palindromes())
print("\nI'm going to spit out a large histogram of words, \nsorted by the PERMILLE (‰) of the text that it is used in.")
input("press enter to continue: ")
my_dict = publication_random()
print("\nDisplay Weight:\n" + display_weight(my_dict)) | UTF-8 | Python | false | false | 8,304 | py | 12 | prefactored.py | 10 | 0.678675 | 0.674337 | 0 | 305 | 26.216393 | 135 |
State-Representation/code | 3,298,534,887,485 | 6ddbedf7fa2f86e63527bc4ab206c8b8bcc1c3c8 | 566aabb186fc6950959240202b7db596a64fec4a | /algorithms/AE_Algorithm.py | 5d0cbaa14c70dbe0500e008b7633326d344c3378 | []
| no_license | https://github.com/State-Representation/code | 3393c34a831c209f9d89793543b3a56e4d1fb18a | 88c8e80a491db56204e3b9211ae10aa87d661178 | refs/heads/main | 2023-07-24T22:52:03.918718 | 2021-09-07T15:14:55 | 2021-09-07T15:14:55 | 399,847,904 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# State Representation Learning with Task-Irrelevant Factors of Variation in Robotics
# Anonymous Authors 2021
import numpy as np
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import torch
import torch.optim as optim
from scipy.stats import norm
import sys
sys.path.insert(0,'..')
import importlib
import algorithms.EarlyStopping as ES
import torch.utils.data as data
# ---
# ====================== Training functions ====================== #
# ---
class AE_Algorithm():
def __init__(self, opt):
# Save the whole config
self.opt = opt
# Training parameters
self.batch_size = opt['batch_size']
self.epochs = opt['epochs']
self.current_epoch = None
self.loss_fn = opt['loss_fn']
self.snapshot = opt['snapshot']
self.console_print = opt['console_print']
self.lr_schedule = opt['lr_schedule']
self.init_lr_schedule = opt['lr_schedule']
self.model = None
self.vae_optimiser = None
# Beta scheduling
self.beta = opt['beta_min']
self.beta_range = opt['beta_max'] - opt['beta_min'] + 1
self.beta_steps = opt['beta_steps'] - 1
self.beta_idx = 0
# Gamma scheduling
self.gamma_warmup = opt['gamma_warmup']
self.gamma = 0 if self.gamma_warmup > 0 else opt['gamma_min']
self.gamma_min = opt['gamma_min']
self.gamma_idx = 0
self.gamma_update_step = (opt['gamma_max'] - opt['gamma_min']) / opt['gamma_steps']
self.gamma_update_epoch_step = (self.epochs - self.gamma_warmup - 1) / opt['gamma_steps']
# Action loss parameters
self.min_dist = opt['min_dist_samples']
self.weight_dist_loss = opt['weight_dist_loss']
self.distance_type = opt['distance_type'] if 'distance_type' in opt.keys() else '2'
self.batch_dist_dict = {}
self.epoch_dist_dict = {}
self.min_epochs = opt['min_epochs'] if 'min_epochs' in opt.keys() else 499
self.max_epochs = opt['max_epochs'] if 'max_epochs' in opt.keys() else 499
# Other parameters
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.opt['device'] = self.device
print(' *- Chosen device: ', self.device)
torch.manual_seed(opt['random_seed'])
np.random.seed(opt['random_seed'])
print(' *- Chosen random seed: ', self.device)
if self.device == 'cuda': torch.cuda.manual_seed(opt['random_seed'])
self.save_path = self.opt['exp_dir'] + '/' + self.opt['filename']
self.model_path = self.save_path + '_model.pt'
def count_parameters(self):
"""
Counts the total number of trainable parameters in the model.
"""
return sum(p.numel() for p in self.model.parameters() if p.requires_grad)
def plot_grid(self, images, n=5,name="dec"):
"""
Plots an nxn grid of images of size digit_size. Used to monitor the
reconstruction of decoded images.
"""
digit_size = int(np.sqrt(self.opt['input_dim']/self.opt['input_channels']))
filename = self.save_path +name + '_checkpointRecon_{0}'.format(self.current_epoch)
figure = np.zeros((digit_size * n, digit_size * n, self.opt['input_channels']))
# Construct grid of latent variable values
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
# decode for each square in the grid
counter = 0
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
digit = images[counter].permute(1,2,0).detach().cpu().numpy()
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
counter += 1
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='bone')
plt.savefig(filename)
plt.clf()
plt.close()
def latent_mean_dist(self, mean1, mean2, action,
distance_type='1'):
"""
Computed the average d distance between the action and no action pairs
in the given batch.
"""
sample1 = mean1
sample2 = mean2
dist = torch.norm(sample1 - sample2, p=float(self.distance_type), dim=1) # Batch size
dist=dist**2
# Distances between pairs with an action
dist_action = torch.mul(dist, action) # Batch size
dist_action = dist_action[dist_action.nonzero()] # num_action, 1
dist_action_mean = torch.mean(dist_action)
dist_action_std = torch.std(dist_action)
# Distances between pairs without an action
dist_no_action = torch.mul(dist, (1-action))
dist_no_action = dist_no_action[dist_no_action.nonzero()]
dist_no_action_mean = torch.mean(dist_no_action)
dist_no_action_std = torch.std(dist_no_action)
# Compute the action loss
zeros = torch.zeros(dist.size()).to(self.device)
batch_dist = (1 - action) * dist + action * torch.max(zeros, self.min_dist - dist)
dist_loss = torch.mean(batch_dist)
# Weight the action loss
batch_loss = self.weight_dist_loss*self.gamma * batch_dist
avg_batch_loss = torch.mean(batch_loss)
# Save the result in order to compute average epoch distance
# If training, save to training distances
if self.model.training:
self.epoch_action_dist.append(dist_action.cpu().detach().numpy())
self.epoch_noaction_dist.append(dist_no_action.cpu().detach().numpy())
# If evaluation, save to test distances
else:
self.test_action_dist.append(dist_action.cpu().detach().numpy())
self.test_noaction_dist.append(dist_no_action.cpu().detach().numpy())
return (avg_batch_loss, dist_loss, dist_action_mean, dist_action_std,
dist_no_action_mean, dist_no_action_std)
def compute_loss(self, x, dec_mu, enc_mu):
"""
Computes the usual VAE loss on the training batch given the criterion.
"""
# Reconstruction loss
loss = torch.nn.MSELoss(reduction = 'sum')
batch_rec=loss(x,dec_mu)
return batch_rec
def format_loss(self, losses_list):
"""Rounds the loss and returns an np array for logging."""
reformatted = list(map(lambda x: round(x.item(), 2), losses_list))
reformatted.append(int(self.current_epoch))
return np.array(reformatted)
def init_model(self):
"""Initialises the VAE model."""
vae = importlib.import_module("architectures.{0}".format(self.opt['model']))
print(' *- Imported module: ', vae)
try:
class_ = getattr(vae, self.opt['model'])
instance = class_(self.opt).to(self.device)
return instance
except:
raise NotImplementedError(
'Model {0} not recognized'.format(self.opt['model']))
def init_optimiser(self):
"""Initialises the optimiser."""
print(self.model.parameters())
if self.opt['optim_type'] == 'Adam':
print(' *- Initialised Adam optimiser.')
vae_optim = optim.Adam(self.model.parameters(), lr=self.lr)
return vae_optim
else:
raise NotImplementedError(
'Optimiser {0} not recognized'.format(self.opt['optim_type']))
def update_learning_rate(self, optimiser):
"""Annealing schedule for the learning rate."""
if self.current_epoch == self.lr_update_epoch:
for param_group in optimiser.param_groups:
self.lr = self.new_lr
param_group['lr'] = self.lr
print(' *- Learning rate updated - new value:', self.lr)
try:
self.lr_update_epoch, self.new_lr = self.lr_schedule.pop(0)
except:
print(' *- Reached the end of the update schedule.')
print(' *- Remaning lr schedule:', self.lr_schedule)
def update_gamma(self):
"""Annealing schedule for the distance term."""
epoch_to_update = self.gamma_idx * self.gamma_update_epoch_step + self.gamma_warmup
if (self.current_epoch + 1) > epoch_to_update:
self.gamma = self.gamma_min + self.gamma_idx * self.gamma_update_step
self.gamma_idx += 1
print (' *- Gamma updated - new value:', self.gamma)
def train(self, train_dataset, test_dataset, num_workers=0, chpnt_path=''):
"""Trains a model with given hyperparameters."""
dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=self.batch_size, shuffle=True,
num_workers=num_workers, drop_last=True)#, sampler=train_sampler)
n_data = len(train_dataset)
assert(train_dataset.dataset_name == test_dataset.dataset_name)
print(('\nPrinting model specifications...\n' +
' *- Path to the model: {0}\n' +
' *- Training dataset: {1}\n' +
' *- Number of training samples: {2}\n' +
' *- Number of epochs: {3}\n' +
' *- Loss criterion: {4}\n' +
' *- Batch size: {5}\n'
).format(self.model_path, train_dataset.dataset_name, n_data,
self.epochs, self.loss_fn, self.batch_size))
if chpnt_path:
# Pick up the last epochs specs
self.load_checkpoint(chpnt_path)
else:
# Initialise the model
self.model = self.init_model()
self.start_epoch, self.lr = self.lr_schedule.pop(0)
try:
self.lr_update_epoch, self.new_lr = self.lr_schedule.pop(0)
except:
self.lr_update_epoch, self.new_lr = self.start_epoch - 1, self.lr
self.vae_optimiser = self.init_optimiser()
self.valid_losses = []
self.epoch_losses = []
# To track the average epoch action loss
self.epoch_action_dist_list = []
self.epoch_noaction_dist_list = []
self.test_action_dist_list = []
self.test_noaction_dist_list = []
print((' *- Learning rate: {0}\n' +
' *- Next lr update at {1} to the value {2}\n' +
' *- Remaining lr schedule: {3}'
).format(self.lr, self.lr_update_epoch, self.new_lr,
self.lr_schedule))
es = ES.EarlyStopping(patience=300)
num_parameters = self.count_parameters()
self.opt['num_parameters'] = num_parameters
print(' *- Model parameter/training samples: {0}'.format(
num_parameters/len(train_dataset)))
print(' *- Model parameters: {0}'.format(num_parameters))
for name, param in self.model.named_parameters():
if param.requires_grad:
spacing = 1
print('{0:>2}{1}\n\t of dimension {2}'.format('', name, spacing),
list(param.shape))
print('\nStarting to train the model...\n' )
for self.current_epoch in range(self.start_epoch, self.epochs):
# Restart the epoch distances
self.epoch_action_dist = []
self.epoch_noaction_dist = []
# Update hyperparameters
self.model.train()
#self.update_beta()
self.update_gamma()
self.update_learning_rate(self.vae_optimiser)
epoch_loss = np.zeros(11)
for batch_idx, (img1, img2, action) in enumerate(dataloader):
img1 = img1.to(self.device)
img2 = img2.to(self.device)
action = action.to(self.device)
# VAE loss on img1
dec_mean1, enc_mean1 = self.model(img1)
loss1= self.compute_loss(
img1, dec_mean1, enc_mean1)
# VAE loss on img2
dec_mean2, enc_mean2 = self.model(img2)
loss2 = self.compute_loss(
img2, dec_mean2, enc_mean2)
# Average VAE loss for the pair of image batches
loss = (loss1 + loss2) / 2
# Action loss between the latent samples
(w_dist_loss, pure_dist_loss,
dist_action_mean, dist_action_std,
dist_no_action_mean, dist_no_action_std) = self.latent_mean_dist(
enc_mean1, enc_mean2, action)
# Optimise the VAE for the complete loss
the_loss = loss + w_dist_loss
self.vae_optimiser.zero_grad()
the_loss.backward()
self.vae_optimiser.step()
valid_loss = np.zeros(10)
# Check that the at least 350 epochs are done
if (es.step(valid_loss[0]) and self.current_epoch > self.min_epochs) \
or (self.current_epoch > self.min_epochs and self.epoch_gap > 0) \
or self.current_epoch > self.max_epochs:
self.plot_dist_hists(
self.epoch_action_dist, self.epoch_noaction_dist, 'training')
break
# Update the checkpoint only if no early stopping was done
self.save_checkpoint(epoch_loss[0])
# Print current loss values every epoch
if (self.current_epoch + 1) % self.console_print == 0:
print('Epoch {0}:'.format(self.current_epoch))
print(' Train loss: {0:.3f} recon loss: {1:.3f} KL loss: {2:.3f} dist: {3:.3f}'.format(
epoch_loss[0], epoch_loss[1], epoch_loss[2], epoch_loss[4]))
print(' Valid loss: {0:.3f} recon loss: {1:.3f} KL loss: {2:.3f} dist: {3:.3f}'.format(
valid_loss[0], valid_loss[1], valid_loss[2], valid_loss[4]))
print(' Beta: {0:.6e}'.format(self.beta))
print(' Gamma: {0:.6e}'.format(self.gamma))
print(' LR: {0:.6e}'.format(self.lr))
print(' MD: {0:.6e}'.format(self.min_dist))
#print(' Gap: {0:.6e}\n'.format(self.epoch_gap))
# Print validation results when specified
if (self.current_epoch + 1) % self.snapshot == 0:
# Plot reconstructions
self.plot_grid(dec_mean1)
self.plot_grid(img1, name="input")
self.model.eval()
print('Training completed.')
#self.plot_model_loss()
self.model.eval()
# Measure other d distances at the end of training for comparisson
print('Calculating other distances...')
original_distance_type = self.distance_type
all_distance_types = ['1', '2', 'inf']
self.batch_dist_dict = {}
self.epoch_dist_dict = {}
for dist_type in all_distance_types:
self.distance_type = dist_type
self.batch_dist_dict[dist_type] = {}
self.epoch_dist_dict[dist_type] = {}
print(' *- Distance type set to ', self.distance_type)
after_training_train = self.compute_test_loss(train_dataset)
self.batch_dist_dict[dist_type]['train'] = list(map(lambda x: round(x, 3),
after_training_train))
#self.epoch_dist_dict[dist_type]['train_action'] = round(self.test_action_dist / self.test_action_pairs, 2)
self.epoch_dist_dict[dist_type]['train_noaction'] = round(self.test_noaction_dist / self.test_noaction_pairs, 2)
after_training_test = self.compute_test_loss(test_dataset)
self.batch_dist_dict[dist_type]['test'] = list(map(lambda x: round(x, 3),
after_training_test))
#self.epoch_dist_dict[dist_type]['test_action'] = round(self.test_action_dist / self.test_action_pairs, 2)
self.epoch_dist_dict[dist_type]['test_noaction'] = round(self.test_noaction_dist / self.test_noaction_pairs, 2)
self.distance_type = original_distance_type
# Save the model
torch.save(self.model.state_dict(), self.model_path)
def save_checkpoint(self, epoch_ml, keep=False):
"""
Saves a checkpoint during the training.
"""
if keep:
path = self.save_path + '_checkpoint{0}.pth'.format(self.current_epoch)
checkpoint_type = 'epoch'
else:
path = self.save_path + '_lastCheckpoint.pth'
checkpoint_type = 'last'
training_dict = {
'last_epoch': self.current_epoch,
'model_state_dict': self.model.state_dict(),
'vae_optimiser_state_dict': self.vae_optimiser.state_dict(),
'last_epoch_loss': epoch_ml,
'valid_losses': self.valid_losses,
'epoch_losses': self.epoch_losses,
'epoch_action_dist_list': self.epoch_action_dist_list,
'epoch_noaction_dist_list': self.epoch_noaction_dist_list,
'test_action_dist_list': self.test_action_dist_list,
'test_noaction_dist_list': self.test_noaction_dist_list,
'beta': self.beta,
'beta_range': self.beta_range,
'beta_steps': self.beta_steps,
'beta_idx': self.beta_idx,
'gamma_warmup': self.gamma_warmup,
'gamma': self.gamma,
'gamma_min': self.gamma_min,
'gamma_idx': self.gamma_idx,
'gamma_update_step': self.gamma_update_step,
'gamma_update_epoch_step': self.gamma_update_epoch_step,
'snapshot': self.snapshot,
'console_print': self.console_print,
'current_lr': self.lr,
'lr_update_epoch': self.lr_update_epoch,
'new_lr': self.new_lr,
'lr_schedule': self.lr_schedule
}
torch.save({**training_dict, **self.opt}, path)
print(' *- Saved {1} checkpoint {0}.'.format(self.current_epoch, checkpoint_type))
def load_checkpoint(self, path, eval=False):
"""
Loads a checkpoint and initialises the models to continue training.
"""
checkpoint = torch.load(path, map_location=self.device)
self.model = self.init_model()
self.model.load_state_dict(checkpoint['model_state_dict'])
self.lr = checkpoint['current_lr']
self.lr_update_epoch = checkpoint['lr_update_epoch']
self.new_lr = checkpoint['new_lr']
self.lr_schedule = checkpoint['lr_schedule']
self.vae_optimiser= self.init_optimiser()
self.vae_optimiser.load_state_dict(checkpoint['vae_optimiser_state_dict'])
self.start_epoch = checkpoint['last_epoch'] + 1
self.snapshot = checkpoint['snapshot']
self.valid_losses = checkpoint['valid_losses']
self.epoch_losses = checkpoint['epoch_losses']
if 'epoch_action_dist_list' in checkpoint.keys():
self.epoch_action_dist_list = checkpoint['epoch_action_dist_list']
self.epoch_noaction_dist_list = checkpoint['epoch_noaction_dist_list']
self.test_action_dist_list = checkpoint['test_action_dist_list']
self.test_noaction_dist_list = checkpoint['test_noaction_dist_list']
self.beta = checkpoint['beta']
self.beta_range = checkpoint['beta_range']
self.beta_steps = checkpoint['beta_steps']
self.beta_idx = checkpoint['beta_idx']
self.gamma_warmup = checkpoint['gamma_warmup']
self.gamma = checkpoint['gamma']
self.gamma_min = checkpoint['gamma_min']
self.gamma_idx = checkpoint['gamma_idx']
self.gamma_update_step = checkpoint['gamma_update_step']
self.gamma_update_epoch_step = checkpoint['gamma_update_epoch_step']
self.snapshot = checkpoint['snapshot']
self.console_print = checkpoint['console_print']
print(('\nCheckpoint loaded.\n' +
' *- Last epoch {0} with loss {1}.\n'
).format(checkpoint['last_epoch'],
checkpoint['last_epoch_loss']))
print(' *- Current lr {0}, next update on epoch {1} to the value {2}'.format(
self.lr, self.lr_update_epoch, self.new_lr)
)
if eval == False:
self.model.train()
else:
self.model.eval()
| UTF-8 | Python | false | false | 20,883 | py | 17 | AE_Algorithm.py | 15 | 0.557918 | 0.549011 | 0 | 501 | 40.676647 | 124 |
StevenClontz/mathematics-of-data | 1,288,490,210,696 | 5e8b15b9788d0904d6c638fa2c4d9e302d246203 | e9cf8a5b25f6e22d2b13b4e557f0fb5adb16d7ce | /source/files/measurements-spread-deviations.py | 083dec31359cdda2ec9066c94a76d8eb5a92c301 | [
"MIT"
]
| permissive | https://github.com/StevenClontz/mathematics-of-data | fe0a910fcd4b99c0bc8dff43c20f8085e5fdade9 | dd35724cafd4f7581ade5358828332f7e9fcb6b3 | refs/heads/main | 2023-04-04T11:05:00.228022 | 2022-06-15T21:05:38 | 2022-06-15T21:05:38 | 267,642,578 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | data_one = [] # fill in values from first dataset
data_two = [] # fill in this too
# if means are different, data was entered incorrectly
from statistics import mean
assert mean(data_one) == mean(data_two), "Means should be the same"
print("The mean for each dataset is FIXME") # print out the mean of the datasets
deviations_one = [
abs( value - mean(data_one) ) # measure distance of value from the mean
for value in data_one # do this for each value of data
]
print(deviations_one)
deviations_two = [] # TODO
print(deviations_two) # prints [5, 4, 3, 2, 1, 0, 0, 0, 0, 0, 1, 1, 3, 4, 6] | UTF-8 | Python | false | false | 598 | py | 562 | measurements-spread-deviations.py | 24 | 0.688963 | 0.66388 | 0 | 15 | 38.933333 | 80 |
wayabi/bvh2nn_data__q2e | 12,524,124,669,126 | bf52fba0b6f5a13fac06a1a9604d3594e1cae6be | ee8f2d1a738528c8ae300a50b0861e0fedebf551 | /pandas3Dscatter.py | e62b08b28d53f2bde1562b855750f769f4da2341 | []
| no_license | https://github.com/wayabi/bvh2nn_data__q2e | 1a612b9ef4897c6d9d55a28887661d43c71b76fb | 2ec84e4d71166b19a19f2a6c806718fd2452d658 | refs/heads/master | 2020-03-22T19:02:19.190245 | 2018-07-11T00:10:17 | 2018-07-11T00:10:17 | 140,499,741 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
df = pd.read_csv('~/a', parse_dates=True)
#print(df.head())
#df['H-L'] = df.High - df.Low
#df['100MA'] = pd.rolling_mean(df['Close'], 100)
threedee = plt.figure().gca(projection='3d')
threedee.scatter(df['x'], df['y'], df['z'])
threedee.set_xlabel('x')
threedee.set_ylabel('y')
threedee.set_zlabel('z')
plt.show()
| UTF-8 | Python | false | false | 437 | py | 9 | pandas3Dscatter.py | 8 | 0.688787 | 0.668192 | 0 | 16 | 26.3125 | 48 |
mesonbuild/meson | 15,195,594,324,002 | 8c855c54f7660368ed011739de7e60f3e7ff6054 | 85ccd32aa73eecf274a937f1fc3b6f4d484b77da | /test cases/common/262 generator chain/stage2.py | 7f82592a1efe5f47c09cdff8a731e74087c2d87d | [
"Apache-2.0"
]
| permissive | https://github.com/mesonbuild/meson | 48321cf4235dfcc0194fed90ff43a57367592bf7 | cf5adf0c646474f0259d123fad60ca5ed38ec891 | refs/heads/master | 2023-09-01T05:58:50.807952 | 2023-03-17T20:27:37 | 2023-08-31T11:52:41 | 19,784,232 | 5,122 | 1,848 | Apache-2.0 | false | 2023-09-14T15:47:23 | 2014-05-14T15:08:16 | 2023-09-14T09:59:12 | 2023-09-14T15:47:22 | 40,706 | 4,875 | 1,436 | 2,172 | Python | false | false | #!/usr/bin/env python3
import sys
from pathlib import Path
assert(Path(sys.argv[1]).read_text() == 'stage2\n')
Path(sys.argv[2]).write_text('int main(void){}\n')
| UTF-8 | Python | false | false | 163 | py | 3,350 | stage2.py | 683 | 0.687117 | 0.662577 | 0 | 6 | 26.166667 | 51 |
ulucsahin/Thesis | 16,879,221,485,506 | c1450d0d58662a515250e9b45b338041218f17c4 | 00e256c739659169972fca251a3611d137c913a4 | /annotation_manager.py | d0fd267cd8abb24265a4e0e222a1cd8ede71ca4f | []
| no_license | https://github.com/ulucsahin/Thesis | 3a5378a0f5b5e526c199dae7c4d72ef3330bd573 | 0f9b85b3ac7284174a011e619fdd6f6e3971360c | refs/heads/master | 2023-03-14T20:58:43.282707 | 2021-03-14T12:41:41 | 2021-03-14T12:41:41 | 243,502,559 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from random import randint
# Annotation indexes
bald = 4
bangs = 5
black_hair = 8
blond_hair = 9
brown_hair = 11
gray_hair = 17
receding_hairline = 28
straight_hair = 32
wavy_hair = 33
# gender and beard
goatee = 16
male = 20
mustache = 22
no_beard = 24
sideburns = 34
# face
chubby = 13
high_cheekbones = 19
oval_face = 25
rosy_cheeks = 29
double_chin = 14
# nose
big_nose = 7
pointy_nose = 27
# mouth
mouth_open = 21
smiling = 31
# eye area
arched_eyebrows = 1
bags_under_eyes = 3
bushy_eyebrows = 12
narrow_eyes = 23
# attractive and makeup + young
attractive = 2
make_up = 18
young = 39
# skin
pale = 26
# not added: 6,15,30,35,36,37,38
def get_introduction_info(annotations):
intro = ""
skin = ""
age = ""
gender = ""
if annotations[39] == "1\n":
age = " young"
else:
rand = randint(0, 1)
ages = ["n old", " middle aged"]
age = ages[rand]
if annotations[male] == "1":
gender = "male"
else:
gender = "female"
if annotations[pale] == "1":
rand = randint(0, 2)
skin1 = " with pale skin and"
skin2 = " who has a pale skin and"
skin3 = " that has pale skin"
skins = [skin1, skin2, skin3]
skin = skins[rand]
intro = f"A{age} {gender} {skin}"
return intro
def get_gender_auxiliary(annotations):
if annotations[male] == "1":
return "he"
else:
return "she"
def get_gender_auxiliary2(annotations):
if annotations[male] == "1":
return "his"
else:
return "her"
def get_hair(annotations):
hair_color = get_hair_color(annotations)
hair_type = get_hair_type(annotations)
gender = "woman"
if annotations[20] == "1":
gender = "man"
result = f" with {hair_color} {hair_type} hair "
if annotations[receding_hairline] == "1":
rand = randint(0, 1)
x = [" and a receding hairline", f" , the {gender} has a receding hairline"]
result += x[rand]
if annotations[bangs] == "1":
result += " with bangs"
if annotations[20] == "1" and annotations[sideburns] == "1":
result += " and sideburns"
return result + " ."
def get_hair_color(annotations):
if annotations[bald] == "1":
return "balding"
elif annotations[black_hair] == "1":
return "black"
elif annotations[blond_hair] == "1":
return "blond"
elif annotations[brown_hair] == "1":
return "brown"
elif annotations[gray_hair] == "1":
return "gray"
else:
return ""
def get_hair_type(annotations):
if annotations[straight_hair] == "1":
return "straight"
elif annotations[wavy_hair] == "1":
return "wavy"
else:
return ""
def get_face(annotations):
face = ""
face_middle = "a face with "
face_end = ""
if annotations[chubby] == "1":
face = " chubby"
face_middle = " face with"
face_end = " face"
if annotations[oval_face] == "1":
face += " oval"
face_middle = " face with"
face_end = " face"
# Almost all of them has this so I removed it. Bad annotations
# if annotations[high_cheekbones] == "1":
# face += f" {face_middle} high cheekbones"
# face_middle = "and"
# face_end = ""
if annotations[rosy_cheeks] == "1":
face += f" {face_middle} rosy cheeks "
face_middle = " and"
face_end = ""
if annotations[double_chin] == "1":
face += f" {face_middle} double chin "
face_end = ""
if face != "":
face = face + face_end + " ."
return face
def get_nose(annotations):
nose = ""
nose_middle = ""
if annotations[pointy_nose] == "1":
nose = " pointy"
nose_middle = " and"
if annotations[big_nose] == "1":
nose += f"{nose_middle} big"
if nose != "":
nose += " nose"
return nose
def get_beard(annotations):
beard = ""
if annotations[goatee] == "1":
beard = " a goatee"
if annotations[mustache] == "1":
beard = " a mustache"
if annotations[no_beard] == "1":
beard = " no beard"
return beard
def get_eye_information(annotations, he_she, his_her):
bags = False
narrow = False
eyes = ""
eyes_middle = ""
eyes_end = ""
bushy = ""
if annotations[narrow_eyes] == "1":
eyes = f" {he_she} has narrow eyes"
narrow = True
if annotations[bags_under_eyes] == "1":
if not narrow:
eyes = f" {he_she} has bags under {his_her} eyes"
else:
eyes += f" with bags under {his_her} eyes"
bags = True
if annotations[bushy_eyebrows] == "1":
bushy = " , bushy"
if annotations[arched_eyebrows] == "1":
if bags:
eyes += f" and {he_she} has arched {bushy} eyebrows"
else:
eyes = f" {he_she} has arched {bushy} eyebrows"
return eyes + " ."
def get_makeup(annotations):
makeup_ = ""
young_ = ""
gender_ = " woman"
attractive_ = ""
is_none = True
if annotations[young] == "1":
_ = " young"
if annotations[20] == "1":
gender_ = " man"
if annotations[attractive] == "1":
attractive_ = " attractive"
is_none = False
if annotations[make_up] == "1":
makeup_ = f" The {young_} {attractive_} {gender_} is wearing heavy make up ."
is_none = False
else:
makeup_ = f" The {young_} {gender_} is {attractive_} ."
if is_none:
return ""
else:
return makeup_
def get_mouth_area(annotations, he_she):
mouth_middle = ""
gender = " woman"
is_none = True
mouth_open = False
if annotations[20] == "1":
gender = " man"
mouth = f" The {gender}"
if annotations[mouth_open] == "1":
mouth += f" has a slightly open mouth"
mouth_middle = " and"
is_none = False
mouth_open = True
if annotations[smiling] == "1":
if not mouth_open:
he_she = ""
mouth += f" {mouth_middle} {he_she} is smiling ."
is_none = False
else:
x = randint(0,9)
# add serious with 10% chance because annotations are bad and not all not smiling people are serious looking
if x==1:
if not mouth_open:
he_she = ""
mouth += f" {mouth_middle} {he_she} looks serious ."
is_none = False
if is_none:
return ""
else:
return mouth
| UTF-8 | Python | false | false | 6,550 | py | 15 | annotation_manager.py | 12 | 0.540153 | 0.519847 | 0 | 308 | 20.266234 | 116 |
pardusnimr/adelscrapper | 2,723,009,266,699 | 7585f0f8451324cdbc6807bb71226fcd7d5f6a5f | fcb5b41c20330ae17093d5449f209d113b2a1775 | /wed2.py | 51e5d2ab8e7649f4ced88765488a0c60c6ed03ba | []
| no_license | https://github.com/pardusnimr/adelscrapper | 0c520650881b2dd05d1cd79265065dd708a769bd | 83c66ba82cf6c058cd4e6febc33a70012fb39dc4 | refs/heads/master | 2021-01-10T05:25:07.229791 | 2015-12-09T20:33:30 | 2015-12-09T20:33:30 | 47,716,780 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
import lxml.html
import pymongo
client = pymongo.MongoClient("mongodb://nimr:d7hero@ds041871.mongolab.com:41871/adel")
db = client['adel']
f = open('allrabea2.html')
ff = f.read()
doc = lxml.html.document_fromstring(ff)
region = ''
city = ''
district = ''
scheme = ''
land = ''
deal = ''
deal_price = ''
land_area = ''
price_per_sqr_meter = ''
records = db.records
for row in doc.xpath("//tr"):
chiled = row.getchildren()
if chiled[0].attrib['style'] == 'padding-left: 5px':
region = chiled[0].text
elif chiled[0].attrib['style'] == 'padding-left: 17px':
city = chiled[0].text
elif chiled[0].attrib['style'] == 'padding-left: 29px':
district = chiled[0].text
elif chiled[0].attrib['style'] == 'padding-left: 41px':
scheme = chiled[0].text
elif chiled[0].attrib['style'] == 'padding-left: 53px':
land = chiled[0].text
elif chiled[0].attrib['style'] == 'padding-left: 65px':
deal = chiled[0].text
deal_price = chiled[1].text
land_area = chiled[2].text
price_per_sqr_meter = chiled[3].text
record = {"region" : region,
"city" : city,
"distric" : district,
"scheme" : scheme,
"land" : land,
"deal" : deal,
"deal_price" : deal_price,
"land_area" : land_area,
"price_per_sqr_meter" : price_per_sqr_meter}
records.insert(record)
| UTF-8 | Python | false | false | 1,526 | py | 78 | wed2.py | 5 | 0.543906 | 0.517693 | 0 | 56 | 26.089286 | 86 |
cho-jae-seong/Algorithm | 798,863,933,104 | 99dbc7e94cb5a714ff1c19d5d9bc3e240e4b05d1 | 6feda26060cfc9f79a98acdedc8ef4bd82bf0587 | /BOJ17140.py | 288582525c2c43ac445712f9f9b4d16524acda4d | []
| no_license | https://github.com/cho-jae-seong/Algorithm | 1ff188ec1dbba370af6cfbf0186728ab536930b6 | 436dadb8be5c308c276562de53a7f4bf25d03bfa | refs/heads/master | 2023-07-04T04:27:33.741983 | 2021-08-10T10:12:04 | 2021-08-10T10:12:04 | 260,405,007 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
from collections import Counter
from functools import reduce
def R(array):
mx=0
for i in range(len(array)):
X=Counter(array[i])
del X[0]
X=list(X.items())
X.sort(key=lambda x: (x[1],x[0]))
if len(X)>50:
X=X[:50]
array[i]=reduce(lambda x, y: list(x)+list(y), X[1:], list(X[0]))
mx=max(mx,len(array[i]))
for i in range(len(array)):
if len(array[i])<mx:
array[i].extend([0]*(mx-len(array[i])))
r,c,k=map(int, sys.stdin.readline().split())
r,c=r-1,c-1
a=[list(map(int, sys.stdin.readline().split()))for _ in range(3)]
if r<len(a) and c<len(a[0]) and a[r][c]==k:
print(0)
exit(0)
time=0
while True:
row_len=len(a)
col_len=len(a[0])
if row_len>=col_len:
R(a)
else:
a=list(map(list, zip(*a)))
R(a)
a=list(map(list, zip(*a)))
time+=1
if time>100:
print(-1)
exit(0)
if r<len(a) and c<len(a[0]) and a[r][c]==k:
print(time)
exit(0)
| UTF-8 | Python | false | false | 1,086 | py | 123 | BOJ17140.py | 123 | 0.486188 | 0.461326 | 0 | 43 | 23.255814 | 72 |
fattredd/fattredd.github.io | 16,621,523,470,920 | 0b2ad160523e9b476f4f943353278d81f4b705fb | f1fed01b75e8968a1bf1ab194a12b2c3ad7ccb95 | /py/textOffset.py | 9f36744b36ed7d5cb9ad4d9953f0838d932cb2da | []
| no_license | https://github.com/fattredd/fattredd.github.io | c0553f938df005b822b3640cdc56d6c0bb453c8c | d45da47ae3a2f5f40848b89f47b97e6127996d48 | refs/heads/master | 2021-01-19T21:28:11.700195 | 2019-05-13T14:11:28 | 2019-05-13T14:11:28 | 21,010,252 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import msvcrt as m
import time
alpha = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def wait():
key = m.getch()
def offsetStr(string, num=1):
out = '' #23
for x in string:
if x in alpha:
newNum = ord(x)-65+num
if newNum > 25:
newNum = newNum%26
elif newNum < 0:
newNum += 26
out += chr(newNum+65)
else:
out += x
return out
def fileR(name='gf.txt'):
f = open(name,'r')
out = f.read().split('\n')
f.close()
return out
def fileW(textL, name='gf2.txt'):
f = open(name, 'w+')
f.write('\n'.join(textL))
f.close()
def tryRange(string, num, inum=0):
for i in range(inum,num):
print i, offsetStr(string, i)
wait()
def doAll(L,off):
out = []
for x in L:
out.append(offsetStr(x,off))
fileW(out) | UTF-8 | Python | false | false | 721 | py | 71 | textOffset.py | 60 | 0.613037 | 0.590846 | 0 | 42 | 16.190476 | 36 |
raulastu/Algorithm-Competitions-EWS | 19,224,273,620,446 | 9e21c959008307071eb15529af013301085bc6d9 | fc4897c00ad5a6a641e12e4affe374d686ae7714 | /coderperu/Round13/D.py | c2565b3875920ac9f34256e178908044246de2fa | []
| no_license | https://github.com/raulastu/Algorithm-Competitions-EWS | b651c0af684b7e4a1afc7cfabdc6fd9d1dc8e45d | 07801fe774778dc5c81682f6a893f97a79ed4022 | refs/heads/master | 2020-04-10T12:18:50.923422 | 2015-08-31T01:45:15 | 2015-08-31T01:45:15 | 7,485,600 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.stdin = open("D.in", "r")
def expo(a, b, MOD):
if (b==1):
return a%MOD;
if (b==2):
return (a*a)%MOD;
if (b%2==0):
return expo(expo(a%MOD,b/2,MOD),2,MOD)%MOD
else:
return a*expo(expo(a%MOD,(b-1)/2,MOD),2,MOD)%MOD;
def solve():
A = raw_input().split(" ")
a = int(A[0]);
b = int(A[1]);
k = int(A[2]);
r = pow(a,b)
# print(r)
if(r<10000 and k >= 4):
print(-1)
return
if(r<1000 and k >= 3):
print(-1)
return
if(r<100 and k >= 2):
print(-1)
return
if(r<10 and k >= 1):
print(-1)
return
s=str(r%10000)
if(k>=len(s)):
print(0)
else:
print(s[len(s)-1-k])
N = int(raw_input())
while(N>0):
A = raw_input().split(" ")
a = int(A[0]);
b = int(A[1]);
k = int(A[2]);
r = pow(a,b)
# print(r)
if(r<10000 and k >= 4):
print(-1)
continue
if(r<1000 and k >= 3):
print(-1)
continue
if(r<100 and k >= 2):
print(-1)
continue
if(r<10 and k >= 1):
print(-1)
continue
s=str(r%10000)
if(k>=len(s)):
print(0)
else:
print(s[len(s)-1-k])
# print(r)
N -= 1 | UTF-8 | Python | false | false | 1,276 | py | 324 | D.py | 286 | 0.418495 | 0.359718 | 0 | 69 | 17.507246 | 58 |
eqiihuu/EE512 | 9,577,777,106,043 | 1b09bd7c3831b1f01408ec4f9ce8124e76bd73f2 | a8f778fed3e42cad4554c57ad607c19d10fbc697 | /EE512-graphical-model-master/test.py | a0e42ae50f71d4dfe58d3c29c225fd88b799a41a | []
| no_license | https://github.com/eqiihuu/EE512 | 75b38842979aaae2b90e218a57eaa0c97a7b7183 | 0ce92f60f6e2a7e212ced7fc30bf988d81faf223 | refs/heads/master | 2021-08-28T03:34:42.352695 | 2017-12-05T11:45:11 | 2017-12-05T11:45:11 | 113,174,681 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import exact_inference as EI
from read_data import read_uai
#from node_clique import Node, Clique, Graph, Junction_tree_cloud
graph = read_uai("../data/test.uai")
for clique in graph.cliques:
print("-------------------")
for node in clique.nodes:
print(node.index)
EI.min_fill(graph)
for node in graph.nodes:
print(node.get_neighbor_index())
max_cliques = EI.MSC(graph)
for clique in max_cliques:
print("------------------")
for node in clique:
print(node.index)
clouds = EI.generate_junction_tree(max_cliques)
EI.rotate_axis(max_cliques[0], graph.cliques[9])
| UTF-8 | Python | false | false | 602 | py | 5 | test.py | 4 | 0.652824 | 0.649502 | 0 | 24 | 24.083333 | 65 |
beverast/LS-DS-Unit-CS | 4,947,802,351,867 | 9ae6cda83b6790e6395ad57070494ffb32de8775 | 623fa21c3e24373d3e90f7f40a2cef983a32a6b5 | /Whiteboard-Lectures/skip_list.py | 8df9b745930cc074f51c86e20fb22f18b3678bd1 | []
| no_license | https://github.com/beverast/LS-DS-Unit-CS | 4c138ba427c87e9ef753512362d8814acc6aba16 | e0fb759da5ffa49aa5a04e267bb150d00db119e2 | refs/heads/master | 2023-05-29T22:47:10.151353 | 2020-05-16T19:00:58 | 2020-05-16T19:00:58 | 264,509,235 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Skip list: probabilistic data structure based on the Linked list
# Description of a skip list: https://brilliant.org/wiki/skip-lists/
class SkipNode(object):
def __init__(self, elem=None):
self.elem = elem
# 'Square' representation (not list, station-like representation)
self.prev = None
self.next = None
self.below = None
self.above = None
class SkipList(object):
def __init__(self, max_height):
# Should have pointers to every single station in first node
self.max_height = max_height
self.head = [SkipNode() for i in range(max_height)]
if len(self.head) > 1:
# Connect all nodes, loop through all nodes in head
for i in range(max_height):
# For the ith node, connect it's 'below' to the i-1 node
if i == 0:
self.head[i].above = self.head[i + 1]
elif i == len(self.head) - 1:
self.head[i].below = self.head[i - 1]
else:
self.head[i].above = self.head[i + 1]
self.head[i].below = self.head[i - 1]
def search(self, target):
'''
Starts at the top node of the head
If the next node along this line > target:
Move down a layer, check next node along line again
If the next node along line <= target:
Move right to next node
Stop criteria: target is found
'''
# Start at the last node in self.head
start = self.head[-1]
# If below exists go there
while start.below:
current = start.below
# Go right if next < target
while target >= current.next:
current = current.next
return current
def insert(self, val):
# Search the skip list for the largest value < val
# Assuming the value doesn't exist in the list
# Create a new node with the val and connect it to the found node
# Add layers to this new node by flipping coins to determine # of layers
# Connect layers to nodes in the same layers
pass
| UTF-8 | Python | false | false | 2,195 | py | 20 | skip_list.py | 17 | 0.560364 | 0.556264 | 0 | 61 | 34.983607 | 80 |
abhusnurmath/rando | 15,470,472,227,060 | 32000b9ec633b8ae10abff2854ce83813fb4b0ac | a6df3ca677cb301ae67220d1a2bec37f5045d270 | /cit590Examples/fancySort.py | 501c66aca4a36a1659d160958626deaf33bbe827 | []
| no_license | https://github.com/abhusnurmath/rando | 53c6fdf5ef8ecbe8440c09acb79636db7f0bbd42 | 84260a35399d983ed0154a5e004cadd970d2be6a | refs/heads/master | 2020-05-19T11:07:09.372441 | 2015-02-21T01:35:25 | 2015-02-21T01:35:25 | 2,403,652 | 7 | 10 | null | false | 2015-01-25T02:02:12 | 2011-09-17T05:34:24 | 2015-01-24T02:43:10 | 2015-01-24T02:43:10 | 651 | 14 | 14 | 1 | Java | null | null | def compareBasedOnLastComponent(x,y):
if x[-1] < y[-1]: return -1
elif x[-1] == y[-1]: return 0
return 1
data = [['arv','Penn', 33], ['fed', 'West Point', 45],
['Ben', 'Cornell', 25], ['Steven', 'UVA', 24]]
| UTF-8 | Python | false | false | 233 | py | 153 | fancySort.py | 153 | 0.502146 | 0.437768 | 0 | 7 | 31.571429 | 54 |
santiago-dc/MyLearningPath | 18,030,272,713,282 | 7f502f1af7de2e13253bdc93805f3e428b45e9dd | acc7d1568e286afeae2841b218761943ae9e21ba | /skin_cancer/.ipynb_checkpoints/CNN_dataset2-checkpoint.py | cca594931a63a4d3befe3df9b594070cc259510c | []
| no_license | https://github.com/santiago-dc/MyLearningPath | b0b826397b185855492fdbf793cc26232dfb06d8 | 414809a1770cc58d66c132bafe4eb2c75e83e99d | refs/heads/master | 2021-09-11T12:05:17.503457 | 2021-09-07T17:03:26 | 2021-09-07T17:03:26 | 241,696,158 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import pandas as pd
from PIL import Image
import os
import glob
import matplotlib.pyplot as plt
from keras.utils.np_utils import to_categorical
import tensorflow as tf
np.random.seed(5)
benign_train_folder = 'data/melanoma/DermMel/train/NotMelanoma'
malignant_train_folder = 'data/melanoma/DermMel/train/Melanoma'
benign_test_folder = 'data/melanoma/DermMel/test/NotMelanoma'
malignant_test_folder = 'data/melanoma/DermMel/test/Melanoma'
def read(folder_path):
data_path = os.path.join(folder_path,'*jpg')
folder = glob.glob(data_path)
matrix = []
for f in folder:
img = np.asarray(Image.open(f).convert("RGB"))
matrix.append(img)
matrix = np.asarray(matrix)
return matrix
#Create data
X_benign_train = read(benign_train_folder)
X_malignant_train = read(malignant_train_folder)
X_benign_test = read(benign_test_folder)
X_malignant_test = read(malignant_test_folder)
#Create labels
Y_benign_train = np.zeros(X_benign_train.shape[0])
Y_malignant_train = np.ones(X_malignant_train.shape[0])
Y_benign_test = np.zeros(X_benign_test.shape[0])
Y_malignant_test = np.ones(X_malignant_test.shape[0])
#Merge and Shuffle data
X_train = np.concatenate((X_benign_train, X_malignant_train), axis = 0)
Y_train = np.concatenate((Y_benign_train, Y_malignant_train), axis = 0)
# s = np.arange(X_train.shape[0])
# np.random.shuffle(s)
# X_train = X_train[s]
# Y_train = Y_train[s]
# train_dataset = tf.data.Dataset.from_tensor_slices((X_train, Y_train))
# train_dataset = train_dataset.shuffle(buffer_size=6000).batch(10)
X_test = np.concatenate((X_benign_test, X_malignant_test), axis = 0)
Y_test = np.concatenate((Y_benign_test, Y_malignant_test), axis = 0)
# s = np.arange(X_test.shape[0])
# np.random.shuffle(s)
# X_test = X_test[s]
# Y_test = Y_test[s]
# test_dataset = tf.data.Dataset.from_tensor_slices((X_test, Y_test))
# test_dataset = test_dataset.batch(10)
# X =np.concatenate((X_train, X_test), axis = 0)
# Y =np.concatenate((Y_train, Y_test), axis = 0)
# s = np.arange(X.shape[0])
# np.random.shuffle(s)
# X = X[s]
# Y = Y[s]
# X_train = X[0:3500]#2637
# X_test = X[3500:]
# Y_train = Y[0:3500]
# Y_test = Y[3500:]
# Display first 15 images of moles, and how they are classified
# w=40
# h=30
# fig=plt.figure(figsize=(12, 8))
# columns = 5
# rows = 3
# for i in range(1, columns*rows +1):
# ax = fig.add_subplot(rows, columns, i)
# if Y_train[i] == 0:
# ax.title.set_text('Benign')
# else:
# ax.title.set_text('Malignant')
# plt.imshow(X_train[i],interpolation='nearest')
# plt.show()
#Turn labels into one hot encoding (ya veremos si esto hace falta o si lo dejo)
Y_train = to_categorical(Y_train, num_classes= 2)
Y_test = to_categorical(Y_test, num_classes= 2)
# Normalization
# X_train = X_train/255.
# X_test = X_test/255.
#Model
from keras.models import Sequential
from keras import regularizers
from keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, AveragePooling2D, BatchNormalization, Dropout
model = Sequential()
model.add(BatchNormalization())
model.add(Conv2D(3, kernel_size=10 ,padding='same', strides=2, activation='relu', input_shape=(450,600,3)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2, padding='same', data_format=None))
#model.add(BatchNormalization())
#model.add(Dropout(0.25))
# model.add(Conv2D(3, kernel_size=3,padding='same', strides=2, activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2), strides=2, padding='same', data_format=None))
#model.add(BatchNormalization())
#model.add(Dropout(0.25))
# model.add(Flatten())
# #model.add(Dropout(0.25))
# model.add(Dense(450, activation='relu',kernel_regularizer=regularizers.l2(0.01)))
# #model.add(Dropout(0.25))
# model.add(Dense(450, activation='relu',kernel_regularizer=regularizers.l2(0.01)))
# # model.add(Dropout(0.25))
# model.add(Dense(200, activation='relu',kernel_regularizer=regularizers.l2(0.01)))
# #model.add(Dropout(0.25))
# model.add(Dense(100, activation='relu',kernel_regularizer=regularizers.l2(0.01)))
# #model.add(Dropout(0.25))
# model.add(Dense(30, activation='relu',kernel_regularizer=regularizers.l2(0.01)))
# model.add(Dense(2, activation='softmax'))
model.add(Flatten())
# model.add(Dense(450, activation='relu'))
# model.add(Dropout(0.25))
model.add(Dense(450, activation='relu'))
#model.add(Dropout(0.25))
model.add(Dense(200, activation='relu'))
#model.add(Dropout(0.25))
model.add(Dense(100, activation='relu'))
#model.add(Dropout(0.25))
model.add(Dense(30, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(train_dataset, validation_data=test_dataset, batch_size=10, epochs=10)
test_loss, test_acc = model.evaluate(test_dataset, batch_size=10)
print('Test Acc: ', test_acc)
print(':)')
| UTF-8 | Python | false | false | 4,839 | py | 46 | CNN_dataset2-checkpoint.py | 6 | 0.704691 | 0.66708 | 0 | 145 | 32.372414 | 108 |
yamini94/imdb | 6,975,026,891,233 | 793e3b215c83227c4228f998ce76ffb8a07721c4 | f7ea2b73e87508ae9a145d79752d0251c362c6ab | /movie/documents.py | c9780389bc7a1494611c42797a2e9cd99ae1e506 | []
| no_license | https://github.com/yamini94/imdb | ec730d4a04c266e71ca8d9e01edffe99506daef4 | 66e341675f5c1976d9f44378be179751e9169d98 | refs/heads/master | 2022-12-10T10:17:11.509859 | 2020-08-28T03:11:20 | 2020-08-28T03:11:20 | 290,918,417 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django_elasticsearch_dsl import Document
from django_elasticsearch_dsl.registries import registry
from .models import Imdb,Genre
from elasticsearch_dsl import Index
from django_elasticsearch_dsl import Document, fields
# @registry.register_document
imdb_index = Index('imdb')
@registry.register_document
@imdb_index.document
class ImdbDocument(Document):
genre = fields.NestedField(properties={
'name': fields.TextField()
})
class Django:
model = Imdb
related_models = [Genre]
fields = [
'id',
'name',
'popularity',
'director',
'imdb_score',
'created_date',
'modified_date'
]
def get_instances_from_related(self, related_instance):
return related_instance.imdb_set.all()
# class Index:
# # Index name
# name = 'imdb'
# settings = {'number_of_shards': 1,
# 'number_of_replicas': 0}
# class Django:
# model = Imdb # django model name
# # Model fields to display
# fields = [
# 'name',
# 'popularity',
# 'director',
# 'imdb_score',
# 'genre',
# 'created_date',
# 'modified_date'
# ]
| UTF-8 | Python | false | false | 1,148 | py | 15 | documents.py | 13 | 0.621951 | 0.620209 | 0 | 51 | 21.470588 | 56 |
ChinaChenp/Knowledge | 4,956,392,282,476 | 7adb242787d6d0c807ff3953c1a387a2b2def07f | 5e3fb85de7caa7e9529114e97a992f1f88067d6c | /interview/interview_python/jianzhioffer2/16.py | 02935955e68e34bcc942e9fdd00ea01bbb4d38d5 | []
| no_license | https://github.com/ChinaChenp/Knowledge | ad3f0b290fadf18b1a4be11246041efa81736df7 | 64bb5286856ad5c01e682fc5e93477c6aa7299b9 | refs/heads/master | 2021-06-05T03:13:32.071326 | 2019-12-25T10:45:51 | 2019-12-25T10:45:51 | 45,509,734 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 数的N次方
def Power(base, exp):
# 0的n次方没有意义,0的-n求倒数会出错
if base >= -1e-9 and base <= 1e-9 and exp < 0:
return 0.0
# 次方为负数要用正数n次方求倒数
abs_exp = abs(exp)
# 计算次方
re = 1.0
for _ in range(0, abs_exp):
re *= base
# 次方为负数要求倒数
if exp < 0:
return 1.0 / re
return re
print(Power(2, 3))
print(Power(1, 2))
print(Power(2, -3))
print(Power(-2, -3))
print(Power(0, -3))
| UTF-8 | Python | false | false | 522 | py | 368 | 16.py | 300 | 0.518605 | 0.460465 | 0 | 26 | 15.538462 | 50 |
tomaxh/todo | 10,161,892,646,172 | 2a3a550063bb199e8a875e22dcb7ec4a8277ba4c | be00691fd152502981c6a221d4636db0a0b93e7e | /backend/src/articles/admin.py | b563cb1b766d3b4bde410aa1420b0957a2f97342 | []
| no_license | https://github.com/tomaxh/todo | 8ca34e9200e114753a671ea391c3d7ee0dfdb71a | 2c04aa272be6c3cd56bcd7edd8be287a01c31c12 | refs/heads/master | 2020-05-18T11:05:47.890825 | 2019-05-15T22:09:57 | 2019-05-15T22:09:57 | 184,369,447 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import Todo
# Register your models he
admin.site.register(Todo) | UTF-8 | Python | false | false | 111 | py | 16 | admin.py | 12 | 0.801802 | 0.801802 | 0 | 5 | 21.4 | 32 |
codingEnzo/YunfangWork | 171,798,741,664 | bd47edb4791d1e62727386f35abfe4ff2434c2d0 | d7ade211dd818f9653c5a1de6edba343dff1514c | /developer_data_push/start_task.py | 288489272d9ec9b801cbf338d851d32e8e420703 | []
| no_license | https://github.com/codingEnzo/YunfangWork | ee6382feedd663d1906ff54f41988df814395444 | da6c3b622ea10e28802bcdead01d422a90784cb5 | refs/heads/master | 2017-09-10T02:45:10.013828 | 2017-08-25T03:36:44 | 2017-08-25T03:36:44 | 81,432,516 | 0 | 1 | null | false | 2017-03-23T12:29:25 | 2017-02-09T09:22:26 | 2017-02-09T09:49:47 | 2017-03-23T12:29:25 | 458 | 0 | 1 | 0 | HTML | null | null | #coding=utf-8
from __future__ import absolute_import
import sqlalchemy
import __builtin__
from db_orm import data_handle_instance,db_instance
from celery import Celery
from settings import *
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
from task_env import app
def _init():
celery = Celery(broker="redis://192.168.6.34/2",backend="redis://192.168.6.34/3")
celery.conf.broker_transport_options = {'visibility_timeout': 240000}
celery.conf.worker_prefetch_multiplier = 1
try:
_mysql_format = "mysql+pymysql://{db_user_name}:{db_passwd}@{db_address}:{db_port}/{db_name}?connect_timeout=1800&charset=utf8"
_mysql_source_path = _mysql_format.format(db_user_name=db_user_name,
db_passwd=db_passwd,
db_address=db_source,
db_port=db_port,
db_name=db_source_name)
source_engine = create_engine(_mysql_source_path,echo=False)
source_conn = source_engine.connect()
source_metadata = MetaData(source_engine)
source_Session = sessionmaker(bind=source_engine)
source_session = source_Session(bind=source_conn)
try:
source_Base = declarative_base(metadata=source_metadata)
source_Base.metadata.reflect(source_engine, only=[db_source_schema,])
schema_instance = source_Base.metadata.tables[db_source_schema]
source_schema = __builtin__.type('source_schema',(source_Base,),{'__table__':schema_instance,'__mapper_args__':{'primary_key':[schema_instance.c.TID,]}})
return source_session,source_schema,celery
except Exception as e:
print Exception,":",e
if isinstance(e,sqlalchemy.exc.NoSuchTableError):
print "Cannot find the table."
return
else:
return
except Exception as e:
print Exception,":",e
return
@app.task(bind=True)
def begin_task(self):
source_session,source_schema,celery = _init()
source_max_id = _get_source_max_id()
source_min_id = _get_source_min_id()
_reset_table()
source_begin_id = source_min_id
while True:
if source_begin_id+db_source_step >= source_max_id:
celery.send_task("start_task.create_new_task",
args=[source_begin_id,source_max_id],queue="develop_task")
break
else:
celery.send_task("start_task.create_new_task",
args=[source_begin_id,source_begin_id+db_source_step],queue="develop_task")
source_begin_id += db_source_step
@app.task(bind=True)
def create_new_task(self,begin_num,end_num):
source_session,source_schema,celery = _init()
pdi = data_handle_instance.pandas_instance(dbUserName=db_user_name,
dbPasswd=db_passwd,
dbAddress=db_source,
dbPort=db_port,
schema_list=[db_source_schema,],
dbName=db_source_name)
query_sql_for_new = 'select * from %s where TID >= %s and TID< %s '%(db_source_schema,begin_num,end_num)
pdi.create_dataframe('df_for_new', query_sql_for_new.decode('utf-8'))
pdi.create_task_new(pdi.df_for_new)
def _reset_table(table_name="house_count"):
reset_db = db_instance.mysql_DB_instance(dbUserName='root',
dbPasswd='',
dbAddress='192.168.6.8',
dbName='develop_data')
reset_db.conn.execute('truncate {table}'.format(table=table_name))
def _get_source_max_id():
source_session,source_schema,celery = _init()
res = source_session.query(func.max(source_schema.TID).label('max_id')).all()[0].max_id
print "max id is : ",res
return res
def _get_source_min_id():
source_session,source_schema,celery = _init()
res = source_session.query(func.min(source_schema.TID).label('min_id')).all()[0].min_id
print "min id is : ",res
return res
| UTF-8 | Python | false | false | 4,317 | py | 233 | start_task.py | 209 | 0.576789 | 0.566597 | 0 | 103 | 40.912621 | 165 |
HPCD/room-number-ocr | 3,272,765,127,997 | 918b26caaf578aead2bee87ced834a314b678b92 | 039370bec9ba63a49bc48d3f7e11463dc10da032 | /testpost.py | 468fb715578abaea06f6067cbe13989d741e23b9 | []
| no_license | https://github.com/HPCD/room-number-ocr | 96660b72e2a83a773de198de25d41ea0393c6fca | d7548c52a83351f8b056ae43e8459a70bf32b962 | refs/heads/master | 2023-03-24T22:43:20.236234 | 2021-03-19T03:08:26 | 2021-03-19T03:08:26 | 349,283,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
@author:abner
@file:testpost.py
@ datetime:2020/8/26 16:45
@software: PyCharm
"""
import cv2
import base64
import requests
import unittest
from datetime import datetime
import json
import os
#内部测试地址
in_url_ip = "http://10.74.150.75:32666"
# 测试环境外部访问地址
# out_url_ip = "http://ai.gdii-yueyun.com:30067/"
# out_url_ip ="http://10.16.153.31:32488/"
out_url_ip = "http://ai.gdii-yueyun.com:30092"
docker_ip = "http://10.16.153.36:32088/"
ip = "http://10.200.117.108:32088/"
class DriverLicenseRequestUnitTest(unittest.TestCase):
def test_roomnum_ocr(self):
"""
车牌号码识别测试样例
"""
car_img_path = 'D:\\abner\\project\\dataset\\room\\43.jpg'
img = cv2.imread(car_img_path)
img_encode = cv2.imencode('.jpg', img)[1]
byte_image = str(base64.b64encode(img_encode))[2:-1]
# print(byte_image)
#post请求
req = {"user_name": "yueyun", "device_id": "A1010", "image": byte_image}
print(req)
start_time = datetime.now()
in_url = docker_ip+"/ai/room/ocr"
in_url = "https://open-api.gdii-yueyun.com/ai/room/ocr"
print("dfff")
TOKEN = requests.post('https://open-api.gdii-yueyun.com/login',
json={"username": "AI", "password": "123456"},verify=True).json().get('data')
print("请求TOKEN!!")
headers = {"AG-X-Access-Token": TOKEN, "Content-Type": 'application/json'}
print(TOKEN)
res = requests.post(in_url,headers=headers, json=req)
result = json.loads(res.text)
print(result)
if __name__ == "__main__":
unittest.main()
| UTF-8 | Python | false | false | 1,737 | py | 22 | testpost.py | 14 | 0.591751 | 0.529588 | 0 | 69 | 23.246377 | 107 |
Ysnsn/python | 7,404,523,624,478 | 373ff21a836a36261f802c2e6c6e9419db7b8e7e | f89a96461b24dd448007ef221926dd83f5c54562 | /基本.py | 82af16231743c1b7f42a53d88404c9883d7e0bea | []
| no_license | https://github.com/Ysnsn/python | 4b34e96edbd21dfc255f3fcb0256cc23048dc994 | ccf6d4f3510d54724c7571ab22bd423d1e7c2d27 | refs/heads/master | 2023-01-04T13:28:24.013264 | 2020-08-04T02:59:47 | 2020-08-04T02:59:47 | 280,341,353 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ls=[7,4,5,2,3,5,6]
print("列表为",end="")
print(ls)
print("列表长度为{}".format(len(ls)))
print("列表中有{}个5".format(ls.count(5)))
print(ls[1:5:2])
new=eval(input("请输入一个新的元素:"))
ls.append(new)
ls[3:5]=[11,12,13]
print("列表为",end="")
print(ls)
print("列表中的最大值为:{}".format(max(ls)))
print("列表中的最小值为:{}".format(min(ls)))
n=eval(input("请输入要删除的元素符号:"))
del ls[n]
print("列表为",end="")
print(ls)
ls=sorted(ls,reverse=True)
print(ls)
ls=sorted(ls,reverse=False)
print(ls)
| UTF-8 | Python | false | false | 568 | py | 36 | 基本.py | 33 | 0.639738 | 0.59607 | 0 | 29 | 14.793103 | 37 |
damc/TextEditor | 13,520,557,082,282 | 3bed9bc228d353edbf82aad2eb18ac1655f4615f | bfdf5da40942676f17300f48ece3348cf50a67dd | /text_editor/__init__.py | f903ce9bdaf80bda6f5eb89ebc0b7188a416e065 | []
| no_license | https://github.com/damc/TextEditor | c81209163a09900e14ab33065cc03391a4cf3ab3 | 5801b09f490a2845f9ffcb67beb8fc2a39d19e94 | refs/heads/master | 2023-07-03T17:58:07.829734 | 2021-02-19T13:58:29 | 2021-02-19T13:58:29 | 395,630,579 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .text_editor import TextEditor | UTF-8 | Python | false | false | 35 | py | 6 | __init__.py | 5 | 0.857143 | 0.857143 | 0 | 1 | 35 | 35 |
OzupeSir/credit_card | 5,944,234,746,840 | 3ac2be34527b4b5a79671f5f93fcf151ce32b512 | 74ddde6c4d75e82761551190b3347bca38f9a90f | /credit_card/auto_bin.py | 9c4fe036c94e346563f99ed96a5a758b55034ea2 | []
| no_license | https://github.com/OzupeSir/credit_card | 551b5d1e84ce5ae9f7d89ac192a2024705f1bbcd | c7ff733629164d8050c286f3ba95cbfc031fe171 | refs/heads/master | 2020-07-26T20:56:23.394379 | 2019-09-25T12:08:56 | 2019-09-25T12:08:56 | 208,763,691 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # utf8
# Hsiaofei Tsien
import numpy as np
import pandas as pd
import scipy
class AutoBins:
def __init__(self, frame, y):
self._frame = frame.copy()
self._y = y
def _column_qcut(self, column,q):
# 进行初始化分箱,先分成 20 个箱体
_drop, bins = pd.qcut(self._frame[column], q=q, retbins=True, duplicates="drop")
# 排除因为开闭区间导致的临界值取值问题
bins = list(bins)
bins.insert(0, -float("inf"))
bins.append(float("inf"))
# 按照添加了最大最小值后的箱体重新分箱
self._frame[column+"_qcut"] = pd.cut(self._frame[column], bins=bins)
init_counts = list(self._frame[column+"_qcut"].value_counts().sort_index())
# 查看首尾箱体是否占比超过 2%,如果没有那么将之与相邻的箱体进行合并,用于处理添加 inf 后导致的空字段的问题
if init_counts[0] < (len(self._frame)/50):
bins.pop(1)
if init_counts[-1] < (len(self._frame)/50):
bins.pop(-2)
# 使用这个箱体的数据进行分箱并做后续的最优化合并
self._frame[column+"_qcut"] = pd.cut(self._frame[column], bins=bins)
# 统计每个分段 0,1的数量
inf_init_bins = self._frame.groupby([column+"_qcut", self._y])[self._y].count().unstack()
# num_bins值分别为每个区间的上界,下界,0的频次,1的频次
num_bins = [*zip(bins, bins[1:], inf_init_bins[0], inf_init_bins[1])]
return num_bins
def _merge_zero_bins(self,num_bins):
# 用于确保所有的分组均包含两种分类
while 0 in num_bins[0][2:]:
# 如果是第一个组某个分类为 0,向后合并
num_bins =self._merger_bins(num_bins,0)
idx=1
while idx<len(num_bins):
# 如果后面的组某个分类为 0 ,向前合并,合并后 num_bins 变短
# 所以需要继续查看当前的 idx 的位置
if 0 in num_bins[idx][2:]:
num_bins =self._merger_bins(num_bins, idx-1)
else:
# 如果没有出现某个分类统计为 0 ,查看下一个 idx 的位置
idx += 1
return num_bins
def _merger_bins(self, num_bins, x):
# 合并 num_bins x 索引和 x+1 索引的分组数据
num_bins[x: x+2] = [(
num_bins[x][0],
num_bins[x+1][1],
num_bins[x][2]+num_bins[x+1][2],
num_bins[x][3]+num_bins[x+1][3]
)]
return num_bins
# 创建计算 iv 值函数
def _get_iv(self, woe_df):
rate = ((woe_df.count_0/woe_df.count_0.sum()) -
(woe_df.count_1/woe_df.count_1.sum()))
iv = np.sum(rate * woe_df.woe)
return iv
# 定义计算 woe 的函数
def _get_woe(self, num_bins):
# 通过 num_bins 数据计算 woe
columns = ["min", "max", "count_0", "count_1"]
df = pd.DataFrame(num_bins, columns=columns)
df["total"] = df.count_0 + df.count_1
df["percentage"] = df.total / df.total.sum()
df["bad_rate"] = df.count_1 / df.total
df["woe"] = np.log(
(df.count_0 / df.count_0.sum()) /
(df.count_1 / df.count_1.sum())
)
return df
def _chi2_merge(self, num_bins):
p_values = []
# 获取 num_bins 两两之间的卡方检验的置信度(或卡方值)
for i in range(len(num_bins)-1):
x1 = num_bins[i][2:]
x2 = num_bins[i+1][2:]
# 0 返回 chi2 值,1 返回 p 值。
pv = scipy.stats.chi2_contingency([x1, x2])[1]
# chi2 = scipy.stats.chi2_contingency([x1, x2])[0]
p_values.append(pv)
# 通过 p 值进行处理。合并 p 值最大的两组
idx = p_values.index(max(p_values))
num_bins = self._merger_bins(num_bins, idx)
return num_bins
def auto_bins(self, column, n=2, show_iv=True,q=20):
print(f"对 {column} 列进行分箱: ")
# 初始化分箱
num_bins = self._column_qcut(column,q)
# 合并没有包含两类的分箱
num_bins = self._merge_zero_bins(num_bins)
# 通过 chi2_merge 不断合并最相似的相邻箱体
while len(num_bins) > n:
num_bins = self._chi2_merge(num_bins)
woe_df = self._get_woe(num_bins)
iv = self._get_iv(woe_df)
if all(woe_df['percentage']>=(1/q)):
if show_iv:
print(f"分组个数: {len(num_bins):02d} \tiv值: {iv}")
print(f"woe情况:\n",woe_df)
return num_bins, woe_df,iv
# if __name__ == "__main__":
# model_data = pd.read_csv("ChiMergeData.csv", encoding="utf8")
# bins_data = AutoBins(model_data, "SeriousDlqin2yrs")
# bins_data.auto_bins("age", n=4)
| UTF-8 | Python | false | false | 4,951 | py | 2 | auto_bin.py | 1 | 0.52143 | 0.501066 | 0 | 131 | 31.236641 | 97 |
nayamama/asycio-programming | 18,038,862,644,295 | 09d3e2131b1f4a97e4b8ec8e54b48859755f2a8a | 1ab21f43ee50952693c06711df296ebf41ec9699 | /thread_programming/os_processes.py | dd6534155f6b05a2aaae41b1903c269c4ea8f53a | []
| no_license | https://github.com/nayamama/asycio-programming | 17cc478fda383a7d68212b1095e2cc79d789df03 | fdc52a7bab97c95e1b50018ab16f4fc99c263eed | refs/heads/master | 2020-05-27T06:27:07.759152 | 2019-05-25T04:46:54 | 2019-05-25T04:46:54 | 188,520,641 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from multiprocessing import Process
from syc_programming import io_bound, cpu_bound, timeit
@timeit()
def multiprocessed(n_thread, func, *args):
pros = []
for _ in range(n_thread):
p = Process(target=func, args=args)
pros.append(p)
# start the process
for p in pros:
p.start()
# ensure all processes have finished
for p in pros:
p.join()
if __name__ == '__main__':
a = 7777
b = 200000
urls = [
"http://google.com",
"http://yahoo.com",
"http://linkedin.com",
"http://facebook.com"
]
multiprocessed(10, cpu_bound, a, b)
multiprocessed(10, io_bound, urls) | UTF-8 | Python | false | false | 669 | py | 7 | os_processes.py | 6 | 0.572496 | 0.55157 | 0 | 30 | 21.333333 | 55 |
mobilase/python | 12,678,743,494,967 | f4a671d94d5442c60f91271df937eb9f2e0476a4 | 9c6382dec3012f29019d44619233293fbfc2125c | /08/01.py | 69798c23e721d25dc36959a71baab1a9f22f4805 | []
| no_license | https://github.com/mobilase/python | 7a56dd0d5fc63503aa4bad34d7ec677c5685eed7 | 5690d924c3ef0d9f0df25d184b1752cdc03399ca | refs/heads/master | 2022-04-25T05:09:12.692031 | 2020-04-28T17:55:30 | 2020-04-28T17:55:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import threading
import time
import random
def worker(number):
sleep = random.randrange(1, 10)
time.sleep(sleep)
print("Worker {} slept {} seconds\n".format(number, sleep))
for i in range(8):
t = threading.Thread(target=worker, args=(i,))
t.start()
print("All Treads are in queries\n")
| UTF-8 | Python | false | false | 311 | py | 146 | 01.py | 97 | 0.66881 | 0.655949 | 0 | 14 | 21.071429 | 63 |
nj7/SecureVault | 4,990,752,008,265 | 36ae039c7bc9a5b1887a6221b2fffdfa4e959282 | 1abb3698e84c61dfe0d8bac3dfbd29d26e3f1036 | /secure.py | 876be179ae4417b6e3479407e1168e505ab116b8 | []
| no_license | https://github.com/nj7/SecureVault | 784f14d8d5bbe81cdfe629decbbf132776ee9a0d | d1d5c6c9567a97c0754eadfc7ea29c7f7ea43be9 | refs/heads/master | 2021-01-23T00:14:46.576516 | 2017-03-30T01:12:14 | 2017-03-30T01:12:14 | 85,706,448 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import shutil
import os
import batchExecute
def main():
"""taking user input of view a file or adding a file to vault"""
while True:
choice = takeInput()
if choice == 1:
hideFile()
elif choice == 2:
findFile()
else:
print "Please Select Correct Output"
break
def takeInput():
while (True):
choice = int(raw_input("Select an option \n1.Hide a file \n2.Unhide a file\n"))
if choice == 1 or choice == 2:
return choice
def hideFile():
while(True):
file_path=raw_input("Enter File Path\n")
file_name=raw_input("Enter File Name\n")
src_of_file = file_path+"\\"
src_of_file += file_name
print "The Source of File you want to Secure :: "+ src_of_file
if raw_input("Do you want to Continue (Y/N)") == 'Y' or 'y':
break
#secure vault source
vault = "nj$SeCureVaUlt@ThIsIsTeStIngPhAse4ME"
dierectory = "C:\Secure_nj_project"
#creating secure vault for first run
if not os.path.exists(dierectory):
os.makedirs(dierectory)
dierectory += "\\"
dierectory += vault
if not os.path.exists(dierectory):
os.makedirs(dierectory)
print "true"
# copy file file from location to secure vault
shutil.copy2(src_of_file, dierectory)
#remove file from location
os.remove(src_of_file)
"""Hiding vault having file"""
#creating batch file to hide vault
dierectory += "\\batch.bat"
batch_file = open(dierectory,"w+")
batch_file.write("C:\ncd C:\Secure_nj_project\nattrib +h +r +s "+vault+"\necho y|cacls "+vault+" /p everyone:n")
batch_file.close()
#executing Batch File
batchExecute.open_batch(dierectory)
"""Hiding Project Folder having vault"""
#hiding Vault folder
batch_file = open("C:\\System.bat","w+")
batch_file.write("C:\nattrib +h +r +s Secure_nj_project\necho y|cacls Secure_nj_project /p everyone:n")
batch_file.close()
#executing Batch File
batchExecute.open_batch("C:\\System.bat")
def findFile():
while(True):
file_name=raw_input("Enter File Name\n")
print "The Source of File you want to find :: "+ file_name
if raw_input("Do you want to Continue (Y/N)") == 'Y' or 'y':
break
#secure vault source
vault = "nj$SeCureVaUlt@ThIsIsTeStIngPhAse4ME"
dierectory = "C:\Secure_nj_project"
#creating secure vault for first run
if not os.path.exists(dierectory):
os.makedirs(dierectory)
if not os.path.exists(dierectory + "\\"+ vault):
os.makedirs(dierectory)
"""UnHiding Project Folder having vault
#hiding Vault folder
batch_file = open("C:\\System.bat","w+")
batch_file.write("C:\necho y|cacls Secure_nj_project /p everyone:f\nattrib -h -r -s Secure_nj_project")
batch_file.close()
#executing Batch File
batchExecute.open_batch("C:\\System.bat")"""
"""UnHiding vault having file"""
#creating batch file to hide vault
batch_file = open(dierectory + "\\batch.bat","w+")
batch_file.write("C:\ncd C:\Secure_nj_project\necho y|cacls "+vault+" /p everyone:f\nattrib -h -r -s "+vault+"\n")
batch_file.close()
#executing Batch File
batchExecute.open_batch(dierectory + "\\batch.bat")
for file in os.listdir(dierectory +"\\"+vault):
if file == file_name:
shutil.copy2(dierectory +"\\"+vault + "\\" + file, "C:\\Users\\nirmit\\Desktop")
if __name__ =='__main__':
main()
| UTF-8 | Python | false | false | 3,782 | py | 3 | secure.py | 2 | 0.574828 | 0.572184 | 0 | 145 | 24.082759 | 118 |
cassie01/APItesting | 14,113,262,579,406 | a6e79ea33c785ee83636fe603f5d08e249ca252c | 1ae32e324de7e8712b4cde4aaa0a6901fa293301 | /before/010.py | 64279bab200e84ab76c150ef18e8debca027f161 | []
| no_license | https://github.com/cassie01/APItesting | bfd78422aeebcf5a1031ce4357309df3b99e4979 | 812cff983bc59df3d34e0fd5d2f4229c82cf90ab | refs/heads/master | 2020-04-18T07:33:48.901876 | 2019-01-24T13:47:34 | 2019-01-24T13:47:34 | 167,364,372 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from selenium import webdriver
import unittest
import time
from selenium.webdriver.common.action_chains import ActionChains
class Cnode(unittest.TestCase):
def setUp(self):
self.Url = 'http://39.107.96.138:3000'
self.driver = webdriver.Chrome()
self.driver.get(self.Url)
#登陆用户
self.driver.find_element_by_css_selector('a[href = "/signin"]').click()
self.driver.find_element_by_id('name').send_keys('user1')
self.driver.find_element_by_id('pass').send_keys('123456')
self.driver.find_element_by_css_selector('input[type = "submit"]').click()
def test_post_topic(self):
driver = self.driver
driver.get('http://39.107.96.138:3000/topic/create')
driver.find_element_by_name('tab').click()
driver.find_element_by_css_selector('[value="share"]').click()
driver.find_element_by_id('title').send_keys('金丝熊000001新熊发帖')
content_area = driver.find_element_by_class_name('CodeMirror-scroll')
content_area.click()
ActionChains(driver).move_to_element(content_area).send_keys('可乐饲养员测试').perform()
driver.find_element_by_css_selector('input[type="submit"]').click()
def tearDown(self):
self.driver.save_screenshot('./01.png')
self.driver.quit()
if __name__ == "__main__":
unittest.main() | UTF-8 | Python | false | false | 1,389 | py | 24 | 010.py | 24 | 0.643755 | 0.611973 | 0 | 34 | 38.823529 | 89 |
lbgbox/DenoiSeg | 15,607,911,190,206 | 4e8740ace94520da2010da21043c2db6a84e6fa8 | 1c6763051234c7e00acc0b209d79f9b3a718605b | /denoiseg/utils/seg_utils.py | e9d7ea553b42c6290dbce8efe7dc309987005c82 | [
"BSD-3-Clause"
]
| permissive | https://github.com/lbgbox/DenoiSeg | 07151d513391bb60e0c21131a244d4ac701e99c0 | 9803cbcf31c0510d28a9ff43be92b29cd47120a0 | refs/heads/master | 2023-08-30T18:40:53.591717 | 2021-11-16T13:53:07 | 2021-11-16T13:53:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from skimage.segmentation import find_boundaries
def convert_to_oneHot(data, eps=1e-8):
"""
Converts labelled images (`data`) to one-hot encoding.
Parameters
----------
data : array(int)
Array of lablelled images.
Returns
-------
data_oneHot : array(int)
Array of one-hot encoded images.
"""
data_oneHot = np.zeros((*data.shape, 3), dtype=np.float32)
for i in range(data.shape[0]):
data_oneHot[i] = onehot_encoding(add_boundary_label(data[i].astype(np.int32)))
if ( np.abs(np.max(data[i])) <= eps ):
data_oneHot[i][...,0] *= 0
return data_oneHot
def add_boundary_label(lbl, dtype=np.uint16):
"""
Find boundary labels for a labelled image.
Parameters
----------
lbl : array(int)
lbl is an integer label image (not binarized).
Returns
-------
res : array(int)
res is an integer label image with boundary encoded as 2.
"""
b = find_boundaries(lbl, mode='outer')
res = (lbl > 0).astype(dtype)
res[b] = 2
return res
def onehot_encoding(lbl, n_classes=3, dtype=np.uint32):
""" n_classes will be determined by max lbl value if its value is None """
onehot = np.zeros((*lbl.shape, n_classes), dtype=dtype)
for i in range(n_classes):
onehot[lbl == i, ..., i] = 1
return onehot
def normalize(img, mean, std):
"""
Mean-Std Normalization.
Parameters
----------
img : array(float)
Array of source images.
mean : float
mean intensity of images.
std: float
standard deviation of intensity of images.
Returns
-------
(img - mean)/std: array(float)
Normalized images
"""
return (img - mean) / std
def denormalize(img, mean, std):
"""
Mean-Std De-Normalization.
Parameters
----------
img : array(float)
Array of source images.
mean : float
mean intensity of images.
std: float
standard deviation of intensity of images.
Returns
-------
img * std + mean: array(float)
De-normalized images
"""
return (img * std) + mean
def zero_out_train_data(X_train, Y_train, fraction):
"""
Fractionates training data according to the specified `fraction`.
Parameters
----------
X_train : array(float)
Array of source images.
Y_train : float
Array of label images.
fraction: float (between 0 and 100)
fraction of training images.
Returns
-------
X_train : array(float)
Fractionated array of source images.
Y_train : float
Fractionated array of label images.
"""
train_frac = int(np.round((fraction / 100) * X_train.shape[0]))
Y_train[train_frac:] *= 0
return X_train, Y_train
| UTF-8 | Python | false | false | 2,826 | py | 54 | seg_utils.py | 12 | 0.583864 | 0.573956 | 0 | 118 | 22.949153 | 86 |
huseyinbiyik/repository.boogie | 10,874,857,197,365 | 40d46e33c0091f373300cb8f5ceb5875aa120a26 | 7c8bfb281103e5b59054ff5a957926cf62a7a896 | /service.subtitles.turkcealtyazi/service.py | be80bf22a1cb7dbdf5c1810ed2c14cf3c4f96f06 | []
| no_license | https://github.com/huseyinbiyik/repository.boogie | 06deaee6b4e641330dc6070bb53529f119c0395a | d73d8baa99f2757e9ad13730c51d9b96b17b6512 | refs/heads/master | 2021-03-12T19:22:09.903700 | 2021-02-17T00:07:30 | 2021-02-17T00:07:30 | 39,784,700 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
'''
Author : Huseyin BIYIK <husenbiyik at hotmail>
Year : 2016
License : GPL
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sublib
import htmlement
import urlparse
import re
import os
domain = "https://turkcealtyazi.org"
quals = {
"1": 5, # good quality
"2": 4, # enough quality
"3": 0, # bad quality
"4": 2, # not rated yet
"5": 1, # waiting for source
"6": 3, # archived
}
def norm(txt):
txt = txt.replace(" ", "")
txt = txt.lower()
return txt
def striphtml(txt):
txt = re.sub("<.*?>", "", txt)
txt = re.sub("\t", "", txt)
txt = re.sub("\n", "", txt)
txt = txt.replace(" ", " ")
return txt
def elementsrc(element, exclude=[]):
if element is None:
return ""
if element in exclude:
return ""
text = element.text or ''
for subelement in element:
text += elementsrc(subelement, exclude)
text += element.tail or ''
return striphtml(text)
class turkcealtyazi(sublib.service):
def checkrecaptcha(self):
resp = self.request(domain, method="HEAD", text=False)
if not resp.status_code == 200:
self.request(domain)
def search(self):
self.checkrecaptcha()
self.found = False
self.ignoreyear = False
if self.item.imdb:
self.find(self.item.imdb)
if not self.num() and not self.item.show and self.item.year:
self.find("%s %s" % (self.item.title, self.item.year))
if not self.num():
self.ignoreyear = True
self.find(self.item.title)
def checkpriority(self, txt):
# this is a very complicated and fuzzy string work
txt = txt.lower().replace(" ", "")
cd = re.search("([0-9])cd", txt)
# less the number of cds higher the priority
if cd:
return False, - int(cd.group(1))
# rest is for episodes, if movie then return lowest prio.
if self.item.episode < 0 or not self.item.show:
return False, -100
ispack = 0
packmatch = 0
epmatch = 0
skip = False
se = re.search("s(.+?)\|e(.+)", txt)
if not se:
se = re.search("s(.+?)(paket)", txt)
if se:
e = se.group(2)
s = se.group(1)
# verify season match first
if s.isdigit() and self.item.season > 0 and \
not self.item.season == int(s):
return True, 0
ismultiple = False
# e: 1,2,3,4 ...
for m in e.split(","):
if m.strip().isdigit():
ismultiple = True
else:
ismultiple = False
break
if ismultiple:
# check if in range
multiples = [int(x) for x in e.split(",")]
if self.item.episode in multiples:
packmatch = 2
else:
skip = True
# e: 1~4
if "~" in e:
startend = e.split("~")
# check if in range
if len(startend) == 2 and \
startend[0].strip().isdigit() and \
startend[1].strip().isdigit():
if int(startend[0]) < self.item.episode and \
int(startend[1]) > self.item.episode:
packmatch = 2
else:
skip = True
else:
ispack = 1
# e: Paket meaning a package
if e == "paket":
ispack = 1
# e:1 or e:01
if e.isdigit():
if int(e) == self.item.episode:
epmatch = 3
else:
skip = True
return skip, ispack + epmatch + packmatch
def scraperesults(self, page, tree, query=None):
for row in tree.findall(".//div[@class='nblock']/div/div[2]"):
a = row.find(".//a")
if a is None:
continue
link = a.get("href")
name = a.get("title")
years = row.findall(".//span")
if len(years) > 1:
ryear = re.search("([0-9]{4})", years[1].text)
if ryear:
year = int(ryear.group(1))
if len(years) <= 1 or not ryear:
year = "-1"
if norm(name) == norm(self.item.title) and \
(self.item.show or
(self.ignoreyear or self.item.year is None or self.item.year == year)):
self.found = True
p = self.request(domain + link, referer=domain)
e = htmlement.fromstring(p)
self.scrapepage(p, e)
break
if query and not self.found:
pages = tree.findall(".//div[@class='pagin']/a")
for page in pages:
if "sonra" in page.text.lower():
if self.found:
break
query = dict(urlparse.parse_qsl(urlparse.urlparse(page.get("href")).query))
self.scraperesults(self.request(domain + "/find.php", query, referer=domain))
def scrapepage(self, page, tree):
subs = tree.findall(".//div[@id='altyazilar']/div/div")
for s in subs:
desc = s.find(".//div[@class='ripdiv']")
xname = s.find(".//div[@class='fl']/a")
alcd = s.find(".//div[@class='alcd']")
if xname is None:
continue
if alcd is None:
continue
if desc is None:
continue
alcd = elementsrc(alcd)
name = xname.get("title")
link = xname.get("href")
desc = elementsrc(desc)
skip, priority = self.checkpriority(alcd)
if skip:
continue
tran = elementsrc(s.find(".//div[@class='alcevirmen']/a"))
iso = "tr"
qualrate = "4"
aldil = s.find(".//div[@class='aldil']/span")
if aldil is not None:
cls = aldil.get("class")
riso = re.search('flag([a-z]{2})', cls)
if riso is not None:
iso = riso.group(1)
qual = s.find(".//div[@class='fl']/span")
if qual is not None:
qual = qual.get("class")
if isinstance(qual, (str, unicode)):
qual = qual.replace("kal", "")
if qual.isdigit():
qualrate = qual
namestr = "%s, %s, %s, %s" % (name, alcd, desc, tran)
sub = self.sub(namestr, iso)
sub.download(domain + link)
sub.priority = priority
if qual:
sub.rating = quals[qualrate]
self.addsub(sub)
def find(self, query):
q = {"cat": "sub", "find": query}
page = self.request(domain + "/find.php", q, referer=domain)
tree = htmlement.fromstring(page)
title = tree.find(".//title")
if "arama" in title.text.lower():
self.scraperesults(page, tree, q)
else:
self.scrapepage(page, tree)
def download(self, link):
page = self.request(link, referer=domain)
tree = htmlement.fromstring(page)
idid = tree.find(".//input[@name='idid']").get("value")
alid = tree.find(".//input[@name='altid']").get("value")
sdid = tree.find(".//input[@name='sidid']").get("value")
data = {
"idid": idid,
"altid": alid,
"sidid": sdid
}
remfile = self.request(domain + "/ind", data=data, referer=link, method="POST", text=False)
fname = remfile.headers["Content-Disposition"]
fname = re.search('filename=(.*)', fname)
fname = fname.group(1)
fname = os.path.join(self.path, fname)
with open(fname, "wb") as f:
f.write(remfile.content)
self.addfile(fname)
| UTF-8 | Python | false | false | 9,071 | py | 80 | service.py | 49 | 0.474589 | 0.467203 | 0 | 250 | 34.284 | 99 |
nehaiyer/nlu-assignment1 | 5,162,550,735,791 | 54b955514797e444271e1aa95135d658d5a4270a | 89bc5fa4d267fbd796c88c6e60550d9e4c68c0e5 | /S1_bigram.py | 8e6198437565e82f98f181bb06e76de0177940b1 | []
| no_license | https://github.com/nehaiyer/nlu-assignment1 | 8e373deee32f16880141268cbe55633017d8dcc4 | d95e79eb705037a67e8d127e7d27a97d46ab1a68 | refs/heads/master | 2021-01-24T13:01:12.102595 | 2018-03-04T09:14:41 | 2018-03-04T09:14:41 | 123,161,929 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 27 17:14:30 2018
@author: neha
"""
import nltk
from nltk.corpus import gutenberg
import numpy as np
import string
from nltk.corpus import brown
import random
def load():
train=[]
test=[]
for c in brown.categories():
sent=brown.sents(categories=c)
s=[]
for str1 in sent:
s.append(str1)
str2=[]
for i in s:
str2.append(' '.join(i))
str3=''
for i in str2:
str3= str3+ ' <s> '+ i
punctuation={'`','\''}
for c in punctuation:
str3= str3.replace(c,"")
str3=' '.join(str3.split())
# str3 = '<s> The Fulton County Grand Jury said Friday an investigation of Atlantas recent primary election produced no evidence that any irregularities took place . <s> The jury further said in term-end presentments that the City Executive Committee , which had over-all charge of the election , deserves the praise and thanks of the City of Atlanta for the manner in which the election was conducted . <s> The September-October term jury had been charged by Fulton Superior Court Judge Durwood Pye to investigate reports of possible irregularities in the hard-fought primary which was won by Mayor-nominate Ivan Allen Jr. .'
words = str3.split(' ')
train.append(words[:round(len(words)*0.8)])
test.append(words[-round(len(words)*0.2):])
train = [item for sublist in train for item in sublist]
test = [item for sublist in test for item in sublist]
return train,test
def cal_ngram(train,n):
ngrams = {}
#n=2
for index, word in enumerate(train):
if index < len(train)-(n-1):
w=[]
for i in range(n):
w.append(train[index+i])
ngram = tuple(w)
# print(ngram)
if ngram in ngrams:
ngrams[ ngram ] = ngrams[ ngram ] + 1
else:
ngrams[ ngram ] = 1
# sorted_ngrams = sorted(ngrams.items(), key = lambda pair:pair[1], reverse = True)
return ngrams
def cal_ngram_list(ngrams):
ngrams_list=[]
for key,value in ngrams.items():
ngrams_list.append(key)
return ngrams_list
def unknown(unigrams,train):
unknown_list=[]
for key, value in unigrams.items():
if value < 2:
unknown_list.append(key[0])
for index, word in enumerate(unigrams):
if train[index] == key[0]:
train[index] = '<UKN>'
if len(unknown_list)==500:
break
return train,unknown_list
def cal_probab(ngrams,n_1grams,n):
prob = {}
for key, value in ngrams.items():
n_1key=[]
for k in range(0,n-1):
n_1key.append(key[k])
prob[key] = value/(n_1grams[tuple(n_1key)])
return prob
def cal_unigram_probab(ngrams,N):
prob = {}
for key, value in ngrams.items():
prob[key] = value/N
return prob
def check_existence(key,ngram_list,train_prob,n):
found=0
nfound=0
alpha=1;
t_prob=-1
for i in reversed(range(len(ngram_list))):
# print(i)
# print(key)
# k=[]
# k.append(key)
if key in ngram_list[i]:
prob = train_prob[i]
t_prob = alpha*prob[key]
# print('break')
found=found+1
break
else:
key=key[i:n]
# print(key)
alpha=alpha*0.4
if t_prob==-1:
# print('unknown')
ukn=tuple(['<UKN>'])
nfound=nfound+1
prob = train_prob[0]
t_prob=alpha*prob[ukn]/0.4
return t_prob
def cal_probab_test(tngram,ngram_list,train_prob,n):
t_prob=0
for key, value in tngram.items():
# print(key)
prob = check_existence(key,ngram_list,train_prob,n)
# print(prob)
t_prob = t_prob + np.log2(prob)
return t_prob
def cal_perplexity(test,ngram_list,train_prob,n):
tngram={}
tngram=cal_ngram(test,n)
tN=len(test)
tprob=cal_probab_test(tngram,ngram_list,train_prob,n)
perplexity=2 ** (tprob*(-1/tN))
return perplexity
def init(train,n):
N=len(train)
unigrams=cal_ngram(train,1)
#replace some vocab with <UKN>
train,unknown_list = unknown(unigrams,train)
#get all ngrams and their counts
ngram=[]
for i in range(n):
# print(i)
ngram.append(cal_ngram(train,i+1))
#calculate 1 to n gram's probabilities
train_prob=[]
train_prob.append(cal_unigram_probab(ngram[0],N))
for i in range(1,n):
# print(i)
train_prob.append(cal_probab(ngram[i],ngram[i-1],i+1))
#calculate ngram lists
ngram_list=[]
for i in range(n):
ngram_list.append(cal_ngram_list(ngram[i]))
return N,n,train,unknown_list,ngram,train_prob,ngram_list
train,test=load()
n=2
N,n,train,unknown_list,ngram,train_prob,ngram_list=init(train,n)
perplexity = cal_perplexity(test,ngram_list,train_prob,n)
print('Perplexity: ',perplexity)
| UTF-8 | Python | false | false | 5,242 | py | 5 | S1_bigram.py | 3 | 0.566387 | 0.55227 | 0 | 194 | 26.020619 | 633 |
bi3mer/qd_dda_level_generation | 1,520,418,452,079 | d337bcc89aaee4ce804b5d820265f4b2a1c3f9d2 | 2bb5ec0ff4676d24e9a2fe000afa3a8ecd843c46 | /Utility/Icarus/Fitness.py | 4200dc6de0b79b3f86a69960a27f0a695e3e71ce | []
| no_license | https://github.com/bi3mer/qd_dda_level_generation | 85f2f6030688b19a7c4c40220019009087df0ddc | 463ca6335868bd3424ecdb47c79e3120a4f41cee | refs/heads/main | 2023-09-06T04:46:40.150802 | 2021-11-01T17:32:27 | 2021-11-01T17:32:27 | 327,395,831 | 0 | 0 | null | false | 2021-05-04T15:35:39 | 2021-01-06T18:22:08 | 2021-05-04T14:43:09 | 2021-05-04T15:35:39 | 638 | 0 | 0 | 0 | Python | false | false | from .SummervilleAgent import percent_completable
def build_slow_fitness_function(grammar):
def slow_fitness(slices):
# at this point, Icarus slices should be rows
bad_transitions = grammar.count_bad_n_grams(slices)
play_slices = list(slices)
# add an area for the player to start at the bottom
play_slices.insert(0, '----------------')
play_slices.insert(0, '################')
# extend the top by copying the blocks
# should ensure the player can jump up above the top but not by landing on what was therex
play_slices.append(play_slices[-1])
play_slices.append(play_slices[-1])
levelStr = list(reversed(play_slices))
return percent_completable((1, len(play_slices) - 2, -1), levelStr)
return slow_fitness
| UTF-8 | Python | false | false | 818 | py | 66 | Fitness.py | 64 | 0.632029 | 0.623472 | 0 | 22 | 36.181818 | 98 |
hector-han/pythonweb | 14,233,521,650,689 | d9ead1b89c605b2aa3d65e3553d5383ecfddb877 | 1330be11e79f58c7d0a8b427546b258a14f0e2d4 | /wechat/urls.py | da87f7425470fe10dde0705e3493ec3a08d7d74a | []
| no_license | https://github.com/hector-han/pythonweb | 68cca4861e1c911fe7b27193ca7b31f4475aeafb | baf1af5a1207b90e24e7370f4ae27e2b33aaaed5 | refs/heads/master | 2022-08-08T03:22:53.225781 | 2022-07-31T09:13:38 | 2022-07-31T09:13:38 | 156,961,628 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('main', views.wechat_main, name='wechat_main'),
]
| UTF-8 | Python | false | false | 126 | py | 7 | urls.py | 5 | 0.690476 | 0.690476 | 0 | 6 | 19.833333 | 56 |
Y0URFR13ND/Hot-Board | 16,140,487,140,931 | 9b39cf0aadef2cd330d39682b7aec030fc57079d | 965e75eb296f97ccb0e1dd27bdf323a559b60b11 | /Software/pyApplication/vcp_handler.py | 7bace7818ffa3bedc2282d0d2b18a0d4bf60574b | []
| no_license | https://github.com/Y0URFR13ND/Hot-Board | 8851c038a7921e2c1c35143b368ad1136dd87c36 | a943e9d2141f623d358388ce7535276c50303101 | refs/heads/master | 2023-02-13T03:09:19.257778 | 2020-12-02T07:27:46 | 2020-12-02T07:27:46 | 292,227,353 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # ******************************************
# file: vcp_handler.py
# project: Hot-Board
# author: Nils Jäggi
# description: communication with the Hardware itself
# ******************************************
import glob
import sys
import serial # python -m pip install pyserial
class VirtualComPort():
def __init__(self,switch_Identifier: str ,number_of_digits: int):
# protocoll constants
self.numberOfDigits = number_of_digits
self.switchIdentifier = switch_Identifier
# create vcp object
self.ser = serial.Serial()
def start_hot_board(self, com: str, baud: int = 115200) -> bool:
""" opens the comport
:returns:
flag if it worked or not
"""
self.ser.port = com
self.ser.baudrate = baud
self.ser.timeout = 0
# try opening vcp
try:
self.ser.open()
return True
except serial.SerialException:
return False
def check_switch_state(self) -> int:
# read serial port buffer
serData = self.ser.read( len(self.switchIdentifier)+self.numberOfDigits ).decode('ascii')
# if data is valid
if self.switchIdentifier in serData:
# get the switch number. can be 1 or more digits
retVal = 0
for i in range(self.numberOfDigits):
retVal += int( serData[(i+1)*(-1)] ) * pow(10,i)
return retVal
else:
return -1
def get_buffer_value(self):
"""Returns serial buffer convertet to Ascii"""
serData = self.ser.read(256).decode('ascii')
return serData
def get_available_serial_ports(self) -> list:
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
| UTF-8 | Python | false | false | 2,682 | py | 40 | vcp_handler.py | 3 | 0.541589 | 0.533756 | 0 | 79 | 32.924051 | 97 |
techkids-c4e/c4e5 | 13,262,859,038,959 | 55bf4c6e128558675ab21db1631f0340875590ee | b9a8164b946f1967e3c2cde0d8c0060ee6cc7366 | /Phuong Anh/Lesson 5/L5 - Dictionary Ex.py | 33bae5994fcaf48e9e99a90376af9d52314530d1 | []
| no_license | https://github.com/techkids-c4e/c4e5 | d91bb2b4f791ddd978fc486028373a655a3accd2 | 83df2b5718192f9340c6b9edeb0b2a7ff0b9e148 | refs/heads/master | 2020-12-25T10:41:19.485635 | 2016-12-09T07:09:57 | 2016-12-09T07:09:57 | 61,489,855 | 0 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Wave 1:
print('Wave 1:')
teacher_list = [{"Name":"Tran Quang Hiep","class":"C4E3","session number":"10","rate":"100","month salary":"1,000"},
{"Name":"Nguyen Quang Huy","class":"C4E1","session number":"10","rate":"200","month salary":"2,000"},
{"Name":"Tran Thu Ha","class":"C4E2","session number":"8","rate":"100","month salary":"800"},
{"Name":"Nguyen Ha San","class":"Android","session number":"5","rate":"200","month salary":"1,000"},
{"Name":"Nguyen Ha San","class":"iOS","session number":"8","rate":"100","month salary":"800"}]
for i in teacher_list:
print(i["Name"], 'teached:', i["session number"], "salary:", i["month salary"]) #in gia tri trong list theo dang dict.
#Wave 2:
print('Wave 2:')
string = input("Please enter your information (Name, session number, rate): ")
a = string.split()
b = {"Name":a[0],
"session number":a[1],
"rate":a[2]}
teacher_list.append(b)
def data_extract(name, session, salary):
print(b["Name"], 'teached:', b["session number"], "salary:", b["rate"])
x = data_extract(b["Name"],b["session number"],b["rate"])
#Wave 5:
print("Wave 5")
input_name = input("Please enter teacher name: ")
def payroll(name):
pay_roll = 'name not found'
for i in teacher_list:
if input_name == i["Name"]:
pay_roll = int(i["session number"]) * int(i["rate"])
break
return pay_roll
print(payroll(input_name))
| UTF-8 | Python | false | false | 1,498 | py | 412 | L5 - Dictionary Ex.py | 336 | 0.570093 | 0.533378 | 0 | 35 | 40.914286 | 122 |
lithium/django18-template | 2,319,282,380,986 | 46ef9dc0595452f9eb34d3080722f45102fb9d9b | d1ddda48764785f903f1d5883651210091c1cf82 | /apps/mainsite/settings/fragments/defaults/email.py | 5ab0cc1370a39a1c61336eb87b8eae02283b03e5 | []
| no_license | https://github.com/lithium/django18-template | 2144d38e310ce4428cb1594c52a1e77ea8156a29 | 501f2feb2d7d7a233aaa260c95aa8f0f86fd035c | refs/heads/master | 2021-01-10T08:10:51.551031 | 2016-02-12T03:21:48 | 2016-02-12T03:21:48 | 43,374,235 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ###
#
# Email
#
###
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' | UTF-8 | Python | false | false | 125 | py | 11 | email.py | 8 | 0.712 | 0.712 | 0 | 8 | 14.75 | 61 |
Jusan-zyh/zqh_riscv | 17,368,847,763,319 | a2154f39108f246034898d5cc114a5a96c043f72 | b627da650f75bdcf7e0dc0ef5c4419cf53a1d690 | /src/zqh_amba/zqh_tilelink_2_axi4_parameters.py | f77b294d9d599fad774644911f57cb5d2f7f40c7 | []
| no_license | https://github.com/Jusan-zyh/zqh_riscv | 4aa8a4c51e19fb786ba0c2a120722f1382994a52 | bccde2f81b42ac258b92c21bb450ec6ff848387a | refs/heads/main | 2023-08-06T12:56:52.420302 | 2021-09-21T01:25:41 | 2021-09-21T01:25:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import os
from phgl_imp import *
from zqh_tilelink.zqh_tilelink_atomic_transform_parameters import zqh_tilelink_atomic_transform_parameter
class zqh_tilelink_2_axi4_parameter(parameter):
def set_par(self):
super(zqh_tilelink_2_axi4_parameter, self).set_par()
self.par('aw_id_max_num', 1)
self.par('ar_id_max_num', 1)
self.par('atomic_en', 0)
self.par('atomic', zqh_tilelink_atomic_transform_parameter())
| UTF-8 | Python | false | false | 460 | py | 295 | zqh_tilelink_2_axi4_parameters.py | 224 | 0.691304 | 0.676087 | 0 | 12 | 37.333333 | 105 |
y-oksaku/Competitive-Programming | 9,277,129,408,746 | 040c69e54a888d290388b9e7ace73765d16a77bb | bf99b1b14e9ca1ad40645a7423f23ef32f4a62e6 | /AtCoder/abc/007b.py | 7e3215a086df3151fe76cfda94be1a3ff43a1176 | []
| no_license | https://github.com/y-oksaku/Competitive-Programming | 3f9c1953956d1d1dfbf46d5a87b56550ff3ab3db | a3ff52f538329bed034d3008e051f30442aaadae | refs/heads/master | 2021-06-11T16:14:12.635947 | 2021-05-04T08:18:35 | 2021-05-04T08:18:35 | 188,639,647 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | A = input()
if len(A) == 1 and A[0] == 'a' :
print('-1')
else :
print('a') | UTF-8 | Python | false | false | 83 | py | 1,459 | 007b.py | 1,456 | 0.421687 | 0.385542 | 0 | 6 | 13 | 32 |
824zzy/Leetcode | 13,769,665,176,327 | 16663bf9c1b774d4a5853b6f10e9070f8f39bfed | a34ec07c3464369a88e68c9006fa1115f5b61e5f | /K_DynamicProgramming/Knapsack/01-Knapsack/L1_740_Delete_and_Earn.py | 1b7edc7382be775a9534f95225eae08cc190fceb | []
| no_license | https://github.com/824zzy/Leetcode | 9220f2fb13e03d601d2b471b5cfa0c2364dbdf41 | 93b7f4448a366a709214c271a570c3399f5fc4d3 | refs/heads/master | 2023-06-27T02:53:51.812177 | 2023-06-16T16:25:39 | 2023-06-16T16:25:39 | 69,733,624 | 14 | 3 | null | false | 2022-05-25T06:48:38 | 2016-10-01T10:56:07 | 2022-05-10T23:28:02 | 2022-05-25T06:48:37 | 2,707 | 4 | 1 | 0 | Python | false | false | """ https://leetcode.com/problems/delete-and-earn/
1. use hash table for calculating score and sorted keys as A
2. At i-th number, we can skip it or select it, if we select it then check if next number needs to be deleted.
"""
class Solution:
def deleteAndEarn(self, A: List[int]) -> int:
cnt = Counter(A)
A = [k for k, v in sorted(cnt.items())]
@cache
def dp(i):
if i==len(A): return 0
ans = dp(i+1) # skip
if i<len(A)-1 and A[i]+1==A[i+1]: # select and delete
return max(ans, cnt[A[i]]*A[i]+dp(i+2))
else: # select and not delete
return max(ans, cnt[A[i]]*A[i]+dp(i+1))
return dp(0) | UTF-8 | Python | false | false | 726 | py | 1,842 | L1_740_Delete_and_Earn.py | 1,795 | 0.53719 | 0.523416 | 0 | 19 | 37.263158 | 110 |
rtloftin/HAL | 11,510,512,358,163 | 017f5f7f59c7dd6198c83d5e91aec812cb5ecb67 | fe3f88bf0344b3408b808a3d9aa3d30b9233a8a6 | /navigation/expert.py | 021a6352ba663f0f4dd6170f393322433ada88f5 | []
| no_license | https://github.com/rtloftin/HAL | 45c500877911144b539a11c9f582b864752da0d3 | 381c019a3c930d943672a65ae651e5a4f52686f8 | refs/heads/master | 2020-03-20T02:41:09.954395 | 2019-01-16T00:16:45 | 2019-01-16T00:16:45 | 137,120,524 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Defines an agent that acts optimally in a given navigation environment.
"""
import numpy as np
import tensorflow as tf
from .environment import Action
class Expert:
"""
An agent that generates optimal actions for navigation tasks in a specific environment.
"""
def __init__(self, env):
"""
Initializes the agent by computing the optimal
policies for each task in the environment.
:param env: the environment the expert needs to act in
"""
# Capture the height of the environment, for computing state indices
self._height = env.height
# Define the transition model
transitions = np.empty([env.width * env.height, len(Action)], dtype=np.int32)
def index(x, y):
return (x * env.height) + y
def valid(x, y):
if x < 0 or x >= env.width:
return False
if y < 0 or y >= env.height:
return False
if env.occupied[x, y]:
return False
return True
for x in range(env.width):
for y in range(env.height):
cell = index(x, y)
transitions[cell, Action.STAY] = cell
transitions[cell, Action.UP] = index(x, y + 1) if valid(x, y + 1) else cell
transitions[cell, Action.DOWN] = index(x, y - 1) if valid(x, y - 1) else cell
transitions[cell, Action.LEFT] = index(x - 1, y) if valid(x - 1, y) else cell
transitions[cell, Action.RIGHT] = index(x + 1, y) if valid(x + 1, y) else cell
# Define value iteration graph
graph = tf.Graph()
with graph.as_default():
reward_input = tf.placeholder(dtype=tf.float32, shape=[env.width * env.height])
def update(q, t):
v = tf.reduce_max(q, axis=1)
n = tf.gather(v, transitions)
return n + tf.expand_dims(reward_input, axis=1), t + 1
def limit(q, t):
return t < 4 * (env.width + env.height)
value_output, _ = tf.while_loop(limit, update, [tf.zeros_like(transitions, dtype=tf.float32), 0])
# Compute the optimal policies
self._policies = dict()
self._policy = None
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options)
with tf.Session(graph=graph, config=config) as sess:
reward = np.empty(env.width * env.height, dtype=np.float32)
for name, task in env.tasks:
# Initialize reward function
for x in range(env.width):
for y in range(env.height):
reward[index(x, y)] = 1.0 if task.complete(x, y) else 0.0
# Compute value function
values = sess.run(value_output, feed_dict={reward_input: reward})
# Construct policy
max = np.max(values, axis=1)
policy = []
for s in range(env.width * env.height):
actions = []
for a in range(len(Action)):
if values[s, a] == max[s]:
actions.append(a)
policy.append(actions)
self._policies[name] = policy
def task(self, name):
"""
Sets the task the expert is currently performing
:param name: the name of the task
"""
self._policy = self._policies[name]
def act(self, x, y):
"""
Samples an expert action for the current state and task.
:return: the sampled action
"""
return np.random.choice(self._policy[(self._height * x) + y])
| UTF-8 | Python | false | false | 3,773 | py | 74 | expert.py | 74 | 0.532202 | 0.524781 | 0 | 119 | 30.705882 | 109 |
fvelotti/myScripts | 4,767,413,716,782 | bb8eae85a6e9a6b59cbd0b8609a0f72d0d7f09de | 002f72085a6a1ea6beb0f05d959b093de7478e2c | /LHC_injection/plot_lhc_beams.py | 3ef92d67cee591558c43da1094eca6de8a6f6b4c | []
| no_license | https://github.com/fvelotti/myScripts | 02ba44ac76194024d2f1e6e3180cfe8368f0dbdf | ceb869a1a80d01b9aed44fe0fd24c4998c899ac2 | refs/heads/master | 2017-05-06T00:45:50.402990 | 2017-05-04T08:51:53 | 2017-05-04T08:51:53 | 12,155,006 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import dump_library as dp
path = '/afs/cern.ch/work/f/fvelotti/private/lhc/'
twiss_old_b1 = 'twiss_lhc_old_b1.tfs'
twiss_old_b2 = 'twiss_lhc_old_b2.tfs'
variables = ['name', 's', 'x', 'y', 'betx', 'bety', 'dx', 'dy', 'mux', 'muy']
name_b1o, s_b1o, x_b1o, y_b1o, betx_b1o, bety_b1o, dx_b1o, dy_b1o, mux_b1o, muy_b1o = dp.get_twiss_var(twiss_old_b1, path, variables)
name_b2o, s_b2o, x_b2o, y_b2o, betx_b2o, bety_b2o, dx_b2o, dy_b2o, mux_b2o, muy_b2o = dp.get_twiss_var(twiss_old_b2, path, variables)
mom_off_ti2 = 1.6e-3
mom_off_ti8 = -2.2e-3
disp_orbit_b1 = mom_off_ti2 * np.array(dx_b1o) / np.sqrt(7.3e-9 * np.array(betx_b1o))
disp_orbit_b2 = mom_off_ti8 * np.array(dx_b2o) / np.sqrt(7.3e-9 * np.array(betx_b2o))
plt.figure(1)
plt.plot(np.array(s_b1o) * 1e-3, disp_orbit_b1)
plt.plot(np.array(s_b1o) * 1e-3, -1 * disp_orbit_b1)
plt.xlabel('s (km)')
plt.ylabel(r'$\delta_p$D$_x/\sigma_{\beta}$')
plt.minorticks_on()
plt.title(r'Beam 1 (TI2), $\delta_p=$%1.1e' % mom_off_ti2)
plt.figure(2)
plt.plot(np.array(s_b2o) * 1e-3, disp_orbit_b2)
plt.plot(np.array(s_b2o) * 1e-3, -1 * disp_orbit_b2)
plt.xlabel('s (km)')
plt.ylabel(r'$\delta_p$D$_x/\sigma_{\beta}$')
plt.minorticks_on()
plt.title('Beam 2 (TI8), $\delta_p=$%1.1e' % (-1 * mom_off_ti8))
plt.show()
| UTF-8 | Python | false | false | 1,319 | py | 915 | plot_lhc_beams.py | 348 | 0.636846 | 0.576952 | 0 | 43 | 29.674419 | 133 |
Hryts-hub/eventmaster | 4,277,787,433,672 | 11e590108409b7c019fc147f5a2f6445498bb56c | 696bca4311caddbef8b55a3466417d28b81c7bfd | /comrades/management/commands/country.py | 2becb009710e1806e187b8270c30fa12ba8a6404 | []
| no_license | https://github.com/Hryts-hub/eventmaster | 98d40e708f9ff3a0b92c90b743d99be3bf66b0e9 | 34cbde942d28ddde12272d67493a1ba81d0b4cd3 | refs/heads/master | 2023-07-25T10:38:00.965219 | 2021-09-09T10:05:41 | 2021-09-09T10:05:41 | 334,152,626 | 0 | 0 | null | false | 2021-02-21T08:28:14 | 2021-01-29T13:24:12 | 2021-02-20T10:35:46 | 2021-02-21T08:28:05 | 125 | 0 | 0 | 1 | Python | false | false | from django.core.management.base import BaseCommand
from events.services import get_countries
from comrades.models import Country
class Command(BaseCommand):
def handle(self, *args, **options):
slugs, list_country = get_countries()
country_fields = [
Country(slug=slugs[i], country_name=list_country[i])
for i in range(0, len(slugs))
]
Country.objects.bulk_create(country_fields)
| UTF-8 | Python | false | false | 443 | py | 27 | country.py | 25 | 0.670429 | 0.668172 | 0 | 13 | 33 | 64 |
zwen000/CSc44500-HW4 | 14,053,133,012,848 | 415649637be1cad7d41ede1ab0f3a9796501140f | 136d228e6b6193b457b8f46601b350b1349492b3 | /BDM_HW4_Wen2.py | e54d2d365c41763310e775651bc1758ec3dceccc | []
| no_license | https://github.com/zwen000/CSc44500-HW4 | 540515aea6c59a4fb930007525d5fb6300d4624d | b93230705294f0a3063525c3875d9ee910ec41cc | refs/heads/main | 2023-01-23T12:44:02.899568 | 2020-11-17T05:12:03 | 2020-11-17T05:12:03 | 312,475,182 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pyspark import SparkContext
import sys
import csv
if __name__=='__main__':
def parseCSV(idx, part):
if idx == 0:
next(part)
for p in csv.reader(part):
yield ("\""+p[1]+"\"", p[0].split("-")[0], p[7])
path_in = sys.argv[1] if len(sys.argv)>1 else "complaints_small.csv"
path_out = sys.argv[2] if len(sys.argv)>2 else "hw4_output"
sc = SparkContext.getOrCreate()
rows= sc.textFile(path_in, use_unicode=False).mapPartitionsWithIndex(parseCSV)\
.map(lambda x: ((x[0], x[1], x[2]), 1))\
.reduceByKey(lambda x,y: x+y)\
.map(lambda x:(x[0][:2],x[1]))\
.groupByKey()\
.mapValues(lambda x: (str(sum(x)), str(len(x)), str(int(float(max(x))/sum(x)*100))))\
.sortByKey()\
.map(lambda x:(x[0]+x[1]))\
.map(lambda x: (", ".join(x)))\
.collect()
sc.parallelize(rows).saveAsTextFile(path_out)
| UTF-8 | Python | false | false | 958 | py | 2 | BDM_HW4_Wen2.py | 2 | 0.525052 | 0.502088 | 0 | 26 | 35.807692 | 97 |
ShazwanX9/Hamming_Numbers | 2,894,807,972,771 | e6c07b6e78ee30fb582439abef0b0fe4bf948a76 | f3708130135e9c7e6fe2bd12ad6487a5b0ae14e6 | /hamming_numbers.py | a4bd2674eb4fd5ba6f00aefd237fbff1c3298b8b | []
| no_license | https://github.com/ShazwanX9/Hamming_Numbers | e0db06e2bf5f04cd8bdaec0bc87e0cf0843923ee | 37b1b8de919d5d384cd37d312049d9fd4913e912 | refs/heads/main | 2023-07-16T07:47:57.520799 | 2021-08-31T16:09:45 | 2021-08-31T16:09:45 | 401,765,105 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
In computer science, regular numbers are often called Hamming numbers, after Richard Hamming,
who proposed the problem of finding computer algorithms for generating these numbers in ascending order.
In number theory, these numbers are called 5-smooth,
because they can be characterized as having only 2, 3, or 5 as prime factors.
"""
class Hamming:
__doc__ = __doc__
@staticmethod
def get_nth(n):
"""
:param n: int nth position
:return: int nth Hamming Nummber
"""
if n <= 0: raise ValueError("n should be more than 0 (positive integer)")
table = [1] * n
i, j, k = 0, 0, 0
ri, rj, rk = 2, 3, 5
for ref in range(1, n):
table[ref] = min(ri, min(rj, rk))
if table[ref] == ri:
i += 1
ri = 2 * table[i]
if table[ref] == rj:
j+= 1
rj = 3 * table[j]
if table[ref] == rk:
k += 1
rk = 5 * table[k]
return table[-1]
@staticmethod
def is_hamming_number(n):
"""
:param n: int positive integer to test
:return: bool the number is hamming number or not
"""
if n <= 0: raise ValueError("n should be more than 0 (positive integer)")
ref = n
while ref>1:
if ref%2==0: ref//=2
elif ref%3==0: ref//=3
elif ref%5==0: ref//=5
else: return False
return True
if __name__ == "__main__":
print("Testing for " + __file__)
###############################################################################
RES = (
-1, 1, 2, 3, 4, 5, 6, 8, 9, 10,
12, 15, 16, 18, 20, 24, 25, 27,
30, 32, 36, 40, 45, 48, 50, 54
)
###############################################################################
print("\nTesting get_nth: ")
for n in range(1, len(RES)):
check = Hamming.get_nth(n)
if n%5==1: print("\n\t", end='')
print(check, "==", RES[n], check==RES[n], end = " : ")
print()
###############################################################################
print("\nTesting is_hamming_number:\n")
#############
print("\tShould Return True")
for n in range(1, len(RES)):
if Hamming.is_hamming_number(RES[n]):
print("\t\t==>", True)
break
else:
print("\t\t==>", False)
#############
print("\tShould Return False")
for n in range(11, 20):
if n not in RES:
if Hamming.is_hamming_number(n):
print("\t\t==>", True)
break
else:
print("\t\t==>", False)
#############
print("\tShould Return True")
for i in range(20, 50):
if Hamming.is_hamming_number(Hamming.get_nth(n)):
print("\t\t==>", True)
break
else:
print("\t\t==>", False)
###############################################################################
| UTF-8 | Python | false | false | 3,195 | py | 2 | hamming_numbers.py | 1 | 0.404069 | 0.376526 | 0 | 113 | 26.274336 | 104 |
foru120/PythonRepository | 4,956,392,264,768 | 08a0ad4ec4cbb2f276e1179c4ea4d6f2484f328e | f8ad6963bfc851657ea50c6a036cfad29cdd7f60 | /Books/DeepLearningLearningFromTheFounderOfKeras/chapter4/chapter4_2.py | 169e6b99e6419949c52106331c23e305c0b8e016 | []
| no_license | https://github.com/foru120/PythonRepository | e1ab0265c0f50ef2e9acdf7447237c913560692b | db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98 | refs/heads/master | 2021-01-01T06:53:11.728109 | 2019-04-25T13:52:50 | 2019-04-25T13:52:50 | 97,541,222 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #todo K-겹 교차 검증
#todo - 데이터를 동일한 크기를 가진 K 개 분할로 나누고, 각 분할 i에 대해 남은 K - 1개의 분할로 모델을 훈련하고 분할 i에서 모델을 평가한다.
#todo 최종 점수는 이렇게 얻은 K개의 점수를 평균한다.
#todo 이 방법은 모델의 성능이 데이터 분할에 따라 편차가 클 때 도움이 되고, 홀드아웃 검증처럼 모델의 튜닝에 별개의 검증 세트를 사용한다.
import numpy as np
k = 4
data = []
test_data = data[int(len(data) * 0.7):]
data = data[:int(len(data) * 0.7)]
num_validation_samples = len(data) // k
class get_model:
def train(self, x):
pass
def evaluation(self, x):
pass
validation_scores = []
for fold in range(k):
validation_data = data[num_validation_samples * fold: num_validation_samples * (fold + 1)]
training_data = data[:num_validation_samples * fold] + data[num_validation_samples * (fold + 1):]
model = get_model() # 훈련되지 않은 새로운 모델 생성
model.train(training_data)
validation_score = model.evaluation(validation_data)
validation_scores.append(validation_score)
validation_score = np.average(validation_scores)
model = get_model()
model.train(data) # 테스트 데이터를 제외한 전체 데이터로 최종 모델을 훈련
test_score = model.evaluation(test_data)
#todo 셔플링을 사용한 반복 K-겹 교차 검증
#todo - 이 방법은 비교적 가용 데이터가 적고 가능한 정확하게 모델을 평가하고자 할 때 사용한다.
#todo 캐글 경연에서는 이 방법이 아주 크게 도움이 된다.
#todo 이 방법은 K-겹 교차 검증을 여러 번 적용하되 K 개의 분할로 나누기 전에 매번 데이터를 무작위로 섞는다.
#todo 최종 점수는 모든 K-겹 교차 검증을 실행해서 얻은 점수의 평균이 된다.
#todo 결국 PxK(P는 반복 횟수)의 모델을 훈련하고 평가하므로 비용이 매우 많이 든다. | UTF-8 | Python | false | false | 2,027 | py | 577 | chapter4_2.py | 541 | 0.667154 | 0.661302 | 0 | 42 | 31.571429 | 101 |
fclintj/bash_setup | 14,242,111,559,692 | 9a845e0873fff89d1a618b25dfce20e5dcfb8eee | 9dce72b2a41433602b710e00470115ca364970f9 | /.autokey/unicode/rarrow.py | 371b0462ff89f79a12c967e7ea66fe8834688f28 | []
| no_license | https://github.com/fclintj/bash_setup | 7213c2a0a672f4a29d2aa1329df7bfcc902e7353 | 8e71a744302d604724bfab0e2bab843cd8867322 | refs/heads/master | 2021-01-02T08:38:00.518288 | 2018-09-28T06:10:54 | 2018-09-28T06:10:54 | 99,036,013 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # → U+2192
keyboard.send_keys("<ctrl>+<shift>+u+" + "2192")
keyboard.send_keys("<ctrl>"+" ")
| UTF-8 | Python | false | false | 95 | py | 60 | rarrow.py | 35 | 0.591398 | 0.505376 | 0 | 3 | 30 | 48 |
hw233/lolita_son | 8,830,452,781,327 | d48f995fbd283433642c3466a5da7443609e93ee | 8d6227b76789f4e64a8fda38c5b678114c81fd36 | /firefly/utils/interfaces.py | eb05a1d8ced6187ab77f3e7494104d7e391a7916 | [
"MIT"
]
| permissive | https://github.com/hw233/lolita_son | 2a2ddeb70bda323c6cf4c1ca1defa25c018c9801 | 8205dff0d423aaedfa7fca8790d1d6fe50213e6e | refs/heads/master | 2021-02-13T10:44:11.393406 | 2019-04-23T15:19:35 | 2019-04-23T15:19:35 | 244,689,286 | 0 | 1 | MIT | true | 2020-03-03T16:42:02 | 2020-03-03T16:42:02 | 2019-04-23T15:19:42 | 2019-04-23T15:19:40 | 17,647 | 0 | 0 | 0 | null | false | false | #coding:utf8
'''
Created on 2013-10-17
@author: lan (www.9miao.com)
'''
from __future__ import division, absolute_import
from zope.interface import Interface
class IDataPackProtoc(Interface):
def getHeadlength():
"""获取数据包的长度
"""
pass
def unpack():
'''解包
'''
def pack():
'''打包数据包
'''
| UTF-8 | Python | false | false | 427 | py | 248 | interfaces.py | 231 | 0.496222 | 0.471033 | 0 | 24 | 14.708333 | 48 |
Alex1820209/AprendiendoPython | 9,285,719,336,837 | 76ec0acebb5fd7b29f8a5d6e28390afb64da2e4b | d1c58b834a9d86b9447eb984213ffd56ec1cbdcb | /GuiaPython/compara.py | 0d9e737fa36c423d2ce7b8c38cc98daae347ec42 | []
| no_license | https://github.com/Alex1820209/AprendiendoPython | 201f4228220c09c4c3260ef2172d4dbd0a30e183 | 1b5642dcaa2caf13303d3ede48b94293f82a91fd | refs/heads/master | 2020-08-01T09:06:35.852829 | 2019-09-25T21:33:48 | 2019-09-25T21:33:48 | 210,944,026 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | numero1 =input("Numero 1: ")
numero2=input("Numero 2: ")
#se muestran los valores
salida="Numeros proporcionados: {} y {}. {}."
if (numero1==numero2):
#se mete a esta funcion si los capturados son iguales
print(salida.format(numero1, numero2,"Los numeros son iguales"))
else:
#se usa un if dentro de otro (condiciones anidadas)
#si los numeros son iguales.
if numero1>numero2:
#se hace si el primer valor es mayor al segundo
print(salida.format(numero1, numero2, "el mayor es el primero"))
else:
#si no es asi el segundo es mayor
print(salida.format(numero1, numero2, "el mayor es el segundo"))
| UTF-8 | Python | false | false | 669 | py | 10 | compara.py | 9 | 0.660688 | 0.639761 | 0 | 16 | 39.6875 | 72 |
xingxingdegit/learnpython | 14,972,255,994,655 | ddd2afd21a88adeb95c0c0683ca7a1b14ffe7223 | 618389bc0bb267785af72ccc9e1cf0dcec874861 | /2.7/threadling/test2.py | b9113c626bd252f85134e12ea0c77dacbc2fd5b6 | []
| no_license | https://github.com/xingxingdegit/learnpython | 4479cb24d77af6b0bc50ee5cadd49a85848113e9 | 4a353363e5040c8a8c6a48cf3654583fbd1c54a4 | refs/heads/master | 2020-02-29T15:05:57.991568 | 2017-03-30T03:24:08 | 2017-03-30T03:24:08 | 65,520,341 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
#
# --*-- cofing: utf-8 --*--
#
import threading,time
i = 100
lock = threading.Lock()
def push(tid):
global i
check = True
while check:
# lock.acquire()
if i > 0:
i -= 1
print 'i is:',i,'the tid is:',tid
else:
print 'nothing','the tid is:',tid
check = False
# lock.release()
time.sleep(0.5)
for x in range(10):
new_thread = threading.Thread(target=push,args=(x,))
new_thread.start()
| UTF-8 | Python | false | false | 531 | py | 47 | test2.py | 43 | 0.495292 | 0.47646 | 0 | 26 | 18.769231 | 56 |
fmhr/procon | 4,973,572,174,651 | e967242d5223ad33cda44cca35aa6848694d7dff | a3c70d94274216451520905d83685cc3c6315473 | /atcoder/ABC/012/abc12c.py | 7b6ad8d55d0ecb3f14fcf5290e514c8e321ece72 | []
| no_license | https://github.com/fmhr/procon | b939827f03d8a5c0e2faada81ae14c11f861909d | e8bea807b9cc8b8781fc21c5e64ab20e4b453283 | refs/heads/master | 2019-08-27T10:53:57.594751 | 2019-01-28T09:05:06 | 2019-01-28T09:05:06 | 59,471,219 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def main():
n = 2025-int(input())
x = []
for i in range(1, 10, 1):
if n%i==0 and n//i<10:
print(i, 'x', n//i)
if __name__=='__main__':
main()
| UTF-8 | Python | false | false | 228 | py | 11,142 | abc12c.py | 777 | 0.421053 | 0.368421 | 0 | 14 | 15.285714 | 31 |
OMalenfantThuot/scripts_raman | 9,887,014,741,332 | 57d8788700f0047bb42d2c7f8e36075f2911a2ea | 5660cc796ba00f5090c7de6208c2e0a77c2328ba | /utils/functions/gramschmidt.py | a25bd32ad43e68be72b93b735fccbc39728a0748 | []
| no_license | https://github.com/OMalenfantThuot/scripts_raman | ac65fec1c5ac97a13b8c222e02241f02393f7709 | 2fd42a4b09d33fcf96da4b4d3340f67d21428b18 | refs/heads/master | 2023-07-19T20:11:10.457974 | 2023-07-18T18:15:26 | 2023-07-18T18:15:26 | 193,724,148 | 1 | 1 | null | false | 2023-08-28T20:00:38 | 2019-06-25T14:29:42 | 2021-12-14T20:08:26 | 2023-08-28T20:00:37 | 17,462 | 1 | 1 | 0 | Python | false | false | import numpy as np
def gramschmidt(basis, normalize=True):
basis = [np.array(vec, dtype=np.float64) for vec in basis]
nvectors = len(basis)
ndim = len(basis[0])
for vec in basis:
assert len(vec) == ndim
new_basis = []
for i in range(nvectors):
new_vec = basis[i].copy()
for vec in new_basis:
new_vec -= proj(basis[i], vec)
if normalize:
new_vec /= np.linalg.norm(new_vec)
new_basis.append(new_vec)
return np.array(new_basis)
def proj(v, u):
return np.dot(v, u) / np.dot(u,u) * u
| UTF-8 | Python | false | false | 583 | py | 71 | gramschmidt.py | 69 | 0.567753 | 0.562607 | 0 | 22 | 25.5 | 62 |
smutch/gpu_tests | 1,434,519,100,451 | ffd6abef4c171c66c2a2f5f4a895d51b1e7dd1e2 | c09bf27d535f52c1c613b843a54b4f8853e560b7 | /test.py | ca2e0068a4349dbe79b46eb7824f001ae65dad04 | []
| no_license | https://github.com/smutch/gpu_tests | 2f14c4b7ce969e04a2e4694503da513990311d8f | f9df91357d89310b1679e3a7c855e9b23ea32839 | refs/heads/master | 2020-04-21T07:02:07.926383 | 2019-02-06T09:36:22 | 2019-02-06T09:36:22 | 169,382,095 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.path.insert(0, "build/lib")
from mpi4py import MPI
import pygpu_tests
if MPI.COMM_WORLD.rank == 0:
print(f"Running test with {MPI.COMM_WORLD.size} ranks...")
pygpu_tests.init_cuda()
| UTF-8 | Python | false | false | 204 | py | 7 | test.py | 2 | 0.710784 | 0.696078 | 0 | 10 | 19.4 | 62 |
vedantshr/python-learning | 14,216,341,782,672 | 7019978851bf125c1db556b47650040e58f7900c | f5bf67a840af96182fafaf9528fac2b83fed62fa | /learning/armstrongno.py | 02a53dab85e7c4492624b8de5355bb4a89416355 | []
| no_license | https://github.com/vedantshr/python-learning | 4a6d4e2d230d8db4b4370a7ef9375bf4c4dc96c2 | 637efb29f2f13c47cd3d9b402735b15657392835 | refs/heads/main | 2023-06-28T04:24:25.215630 | 2021-07-28T15:13:11 | 2021-07-28T15:13:11 | 281,611,580 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def armstrong(ip):
n = 0
for i in range(len(ip)):
n = n + int(ip[i])**len(ip)
if n == int(ip):
return True
else:
return False
if __name__ == "__main__":
ip = input()
ip2 = input()
print (armstrong(ip))
print (armstrong(ip2)) | UTF-8 | Python | false | false | 283 | py | 136 | armstrongno.py | 86 | 0.480565 | 0.469965 | 0 | 15 | 17.933333 | 35 |
CiiMAV/python-cdr | 11,759,620,466,043 | 79c2a896e856771bb462f8aae1f643156213e53e | f65ebbcf5baf0cb649b28df191b38a75fbdb71fe | /test/sensor_accel_cdr.py | a81fafcad457a15c635066ce8d63a5a02ffa1b19 | []
| no_license | https://github.com/CiiMAV/python-cdr | c59bafd9b0ae2eaf5777f874cafe28e36b2be2f9 | 1cfa4ea30694b4e7173bdc0f63d2e8a2ce0dca22 | refs/heads/master | 2020-03-13T06:28:32.340915 | 2018-06-25T10:20:18 | 2018-06-25T10:20:18 | 131,005,536 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import ctypes
import sys
lib = ctypes.cdll.LoadLibrary('./sensor_accel_cdr.so')
class sensor_accel(object):
"""docstring for sensor_accel"""
def __init__(self):
lib.sensor_accel_new.restype = ctypes.c_void_p
lib.sensor_accel_deserialize.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int]
lib.sensor_accel_deserialize.restype = ctypes.c_void_p
lib.get_m_x.argtype = ctypes.c_void_p
lib.get_m_x.restype = ctypes.c_float
self.obj = lib.sensor_accel_new()
self.m_x = 0
def get_m_x(self):
self.m_x = lib.get_m_x(self.obj)
return self.m_x;
def deserialize(self,data,length):
lib.sensor_accel_deserialize(self.obj,data,length )
self.get_m_x()
| UTF-8 | Python | false | false | 694 | py | 18 | sensor_accel_cdr.py | 17 | 0.681556 | 0.680115 | 0 | 20 | 32.6 | 90 |
askemottelson/minigit | 1,520,418,425,058 | 0246f38a8434ff5b4800ba36b76be9228116b8b9 | 9965abf48dacdf15cf7c2b3b7fb7450dfcbdd2c1 | /gui/clonedialog.py | 1f29cb666219d8c17ddaeb322efed55a032974bd | []
| no_license | https://github.com/askemottelson/minigit | c72d5c98827fa57aed5639f1550d7181947553ba | 7468eead89f68d6a80cb0d5ad5dba0ce22cecb02 | refs/heads/master | 2016-08-04T04:29:48.543996 | 2014-12-07T14:37:31 | 2014-12-07T14:37:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from PyQt4 import QtCore, QtGui
from clone import Ui_CloneDialog
class CloneDialog():
url = ""
path = ""
def __init__(self, parent):
self.parent = parent
def urlSelected(self):
root = str(QtGui.QFileDialog.getExistingDirectory(self.parent, "Select Directory"))
self.clonedialog_ui.path.setText(root)
def setup(self):
self.clonedialog = QtGui.QDialog(self.parent)
self.clonedialog_ui = Ui_CloneDialog()
self.clonedialog_ui.setupUi(self.clonedialog)
self.clonedialog_ui.browseButton.clicked.connect(self.urlSelected)
self.clonedialog.exec_()
self.url = self.clonedialog_ui.url.text()
self.path = self.clonedialog_ui.path.text()
def result(self):
return (self.url, self.path) | UTF-8 | Python | false | false | 744 | py | 13 | clonedialog.py | 8 | 0.700269 | 0.698925 | 0 | 32 | 22.28125 | 87 |
anitabaral/Exercism-exercises | 18,098,992,209,019 | 78310c21b47e2dc37e896f4e05cec0c133691294 | 10815fbddf1d85fbdf2655c1b41977496aa28a6f | /robot_name.py | 2dbc69ab107f223f6c2b27494c73bf00e36a9e5a | []
| no_license | https://github.com/anitabaral/Exercism-exercises | 983d6b6ce1c0ee8f0d70d5dbe8ab59e8e8b3e878 | 81034e5174ef6933e07bc595c9dd708265faeaaf | refs/heads/master | 2023-05-30T17:22:38.143483 | 2021-06-14T08:33:47 | 2021-06-14T08:33:47 | 363,824,732 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import string
class Robot:
def __init__(self):
random.seed()
self.reset()
def reset(self):
self.get_name()
def get_name(self):
text = []
for _ in range(2):
text.append(random.choice(string.ascii_uppercase))
for _ in range(3):
text.append(random.choice(string.digits))
self.name = "".join(text)
| UTF-8 | Python | false | false | 403 | py | 39 | robot_name.py | 39 | 0.543424 | 0.538462 | 0 | 18 | 21.388889 | 62 |
bibongbong/pythonCookBook | 16,630,113,411,239 | e90b83216d5bd040546fe3d9450c4855128bbce3 | 6d8f60ea657cdc4d82ae6f9272e3614a9ac6e525 | /learningStuff/getUrl_multiThread.py | ff30bf643f0a5e53e15b151c436afc088f3d1d74 | []
| no_license | https://github.com/bibongbong/pythonCookBook | 4d6dc3904a1afa1250304b66ec99a98c1a80649f | c92c75f2d5256de0bb2ec879a7d6484a22cc6bcd | refs/heads/master | 2021-06-10T01:37:26.178698 | 2021-06-01T13:52:18 | 2021-06-01T13:52:18 | 152,709,122 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ####################################################################
# File: getUrl.py
# Function: Learning to how to get the specific URL, and parse the HTML page to get the content you want
#
####################################################################
import urllib.request
import operator
from bs4 import BeautifulSoup
import asyncio
# testNum = [3,5,11,12,26,29,15]
#testNum = [11, 16, 19, 22, 25, 30, 8]
testNum = [8, 10, 17, 20, 27, 30, 1]
startUrl = 'http://kaijiang.zhcw.com/zhcw/inc/ssq/ssq_wqhg.jsp'
serverUrl = 'http://kaijiang.zhcw.com/'
isNeedProxy = True
def getWinNum(tr):
win_num = []
all_td = tr.find_all("td")
for em in all_td[2]:
if em.string.isdigit():
win_num.append(int(em.string))
return win_num
def getWinDate(tr):
all_td = tr.find_all("td")
winDate = all_td[0].string
return winDate
def getnextPageUrl(i):
return "http://kaijiang.zhcw.com/zhcw/inc/ssq/ssq_wqhg.jsp?pageNum="+str(i)
def getLastPageNum(all_tr):
nextPageTr = all_tr[-1]
all_strong = nextPageTr.find_all("strong")
nextPageItem = all_strong[5].a
return nextPageItem['href'][35:]
def getResponse(currentUrl):
print("current url is %s " % (currentUrl))
if isNeedProxy:
# set the http proxy
proxy_handler = urllib.request.ProxyHandler({"http": 'http://135.245.248.89:8000'})
opener = urllib.request.build_opener(proxy_handler)
urllib.request.install_opener(opener)
print("getResponse ",currentUrl)
# open the zhcw ShuangSeQiu page
response = urllib.request.urlopen(currentUrl)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
return soup
def loopAllWinNumInCurrentPage(all_tr,myNum):
#loop all winNum in current page
for tr in all_tr[2:-1]:
winNum = getWinNum(tr)
if winNum == myNum:
winDate = getWinDate(tr)
isFound = True
print("Found: %s" % winNum)
print(winDate)
raise StopIteration
def searchFirstPage(currentUrl, myNum):
soup = getResponse(currentUrl)
all_tr = soup.find_all("tr")
isFound = False
loopAllWinNumInCurrentPage(all_tr,myNum)
lastPageNum = getLastPageNum(all_tr)
return lastPageNum
def searchPage(i, myNum):
print(i)
currentUrl = getnextPageUrl(i)
soup = yield from getResponse(currentUrl)
all_tr = soup.find_all("tr")
isFound = False
loopAllWinNumInCurrentPage(all_tr,myNum)
lastPage = searchFirstPage(startUrl, testNum)
print("lastPage ",lastPage)
loop = asyncio.get_event_loop()
tasks = [searchPage(i, testNum) for i in range(int(lastPage))]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
| UTF-8 | Python | false | false | 2,840 | py | 80 | getUrl_multiThread.py | 80 | 0.601408 | 0.57993 | 0 | 105 | 25.028571 | 104 |
Renjiewu/2019-3d- | 11,184,094,877,899 | 292100defb8ac84c6e43675a4170fb50e2e56556 | c249a7472d64b0bac788aa1d106958e94b888bfc | /rs1_1.py | c653a657a40f456e7996e48a6180bfe22d151111 | []
| no_license | https://github.com/Renjiewu/2019-3d- | 88fe444fd7795d75d1044d47999eb00dbfd75551 | a2e71e5ef1c934a3fc38117ac37bfec4f13939bc | refs/heads/master | 2020-09-20T06:41:02.264820 | 2019-11-27T10:28:13 | 2019-11-27T10:28:13 | 224,402,171 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 7 17:58:18 2019
@author: w3321
"""
import pyrealsense2 as rs
import numpy as np
import cv2
from . import rs1_cv as rs1
import math
import time
import os
from . import square_distance_angle as da
import pandas as pd
from .. import output2 as op
from .. import find
pc = rs.pointcloud()
# We want the points object to be persistent so we can display the last cloud when a frame drops
points = rs.points()
imgk=np.ones([550,550,3],np.uint8)*255
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth,640,480,rs.format.z16,30)
config.enable_stream(rs.stream.color,640,480,rs.format.bgr8,30)
# Start streaming
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
global vtx
def GetLineLength(p1,p2):
'''计算边长'''
length = math.pow((p2[0]-p1[0]),2) + math.pow((p2[1]-p1[1]),2)+ math.pow((p2[2]-p1[2]),2)
length = math.sqrt(length)
return length
depth_pixel = [320, 240]
def draw_circle(event,x,y,flags,param):
global depth_pixel
if event == cv2.EVENT_MOUSEMOVE:
depth_pixel = [x, y]
def get_deep():
while True:
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
if not depth_frame or not color_frame:
continue
break
return depth_frame,color_frame
def get_xyz(p):
global vtx
i=p[1]*640+p[0]
p1=[np.float(vtx[i][0]),np.float(vtx[i][1]),np.float(vtx[i][2])]
return p1
def get_one(path1):
global vtx
#time.sleep(5)
start=time.time()
pipe_profile=pipeline.start(config)
depth_sensor = pipe_profile.get_device().first_depth_sensor()
depth_sensor.set_option(rs.option.visual_preset,1)
depth_sensor = pipe_profile.get_device().first_depth_sensor()
depth_sensor.set_option(rs.option.visual_preset,1)
sensor = pipe_profile.get_device().query_sensors()[1]
sensor.set_option(rs.option.enable_auto_exposure,1)
sensor.set_option(rs.option.backlight_compensation,0.000)
sensor.set_option(rs.option.brightness,0.000)
sensor.set_option(rs.option.contrast,50.000)
sensor.set_option(rs.option.gain,61.000)
sensor.set_option(rs.option.gamma,220.000)
sensor.set_option(rs.option.hue,0.000)
sensor.set_option(rs.option.saturation,68.000)
sensor.set_option(rs.option.sharpness,50.000)
cv2.namedWindow('depth_frame')
cv2.setMouseCallback('depth_frame',draw_circle)
while True:
depth_frame1,color_frame1=get_deep()
depth_frame2,color_frame2=get_deep()
depth_frame,color_frame=get_deep()
img_color = np.asanyarray(color_frame.get_data())
img_test =img_color.copy()
img_depth1 = np.asanyarray(depth_frame1.get_data())
img_depth2 = np.asanyarray(depth_frame2.get_data())
img_depth = np.asanyarray(depth_frame.get_data())
img_depthx=(img_depth1+img_depth2+img_depth)//3
depth_colormap1 = cv2.applyColorMap(cv2.convertScaleAbs(img_depth1, alpha=0.03), cv2.COLORMAP_BONE)
depth_colormap2 = cv2.applyColorMap(cv2.convertScaleAbs(img_depth2, alpha=0.03), cv2.COLORMAP_BONE)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(img_depth, alpha=0.03), cv2.COLORMAP_BONE)
p,imgf=rs1.rs1_cv(depth_colormap1,depth_colormap2,depth_colormap)
# Intrinsics & Extrinsics
depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(color_frame.profile)
# Depth scale - units of the values inside a depth frame, i.e how to convert the value to units of 1 meter
depth_sensor = pipe_profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
# Map depth to color
depth_point = rs.rs2_deproject_pixel_to_point(depth_intrin, depth_pixel, depth_scale)
color_point = rs.rs2_transform_point_to_point(depth_to_color_extrin, depth_point)
color_pixel = rs.rs2_project_point_to_pixel(color_intrin, color_point)
#print ('depth: ',color_point)
#print ('depth: ',color_pixel)
x1,y1=depth_pixel
pc.map_to(color_frame)
points = pc.calculate(depth_frame)
v, t = points.get_vertices(), points.get_texture_coordinates()
vtx = np.asanyarray(v).view(np.float32).reshape(-1, 3) # xyz
tex = np.asanyarray(t).view(np.float32).reshape(-1, 2) # uv
i = y1*640+x1
'''
i0=p[0][1]*640+p[0][0]
i1=p[1][1]*640+p[1][0]
i2=p[2][1]*640+p[2][0]
i3=p[3][1]*640+p[3][0]
p0=[np.float(vtx[i0][0]),np.float(vtx[i0][1]),np.float(vtx[i0][2])]#左下
p1=[np.float(vtx[i1][0]),np.float(vtx[i1][1]),np.float(vtx[i1][2])]#右下
p2=[np.float(vtx[i2][0]),np.float(vtx[i2][1]),np.float(vtx[i2][2])]#左上
p3=[np.float(vtx[i3][0]),np.float(vtx[i3][1]),np.float(vtx[i3][2])]#右上
'''
p0=get_xyz(p[0])
p1=get_xyz(p[1])
p2=get_xyz(p[2])
p3=get_xyz(p[3])
l0=GetLineLength(p0,p1)
l1=GetLineLength(p0,p2)
l2=GetLineLength(p1,p3)
if l0==0:
continue
r=550/l0#mm/xlength
l1_1=int(r*l1)#左边像素长度
l2_1=int(r*l2)#右边像素长度
#print(r,p0,p1,p2,p3)
pts1 = np.float32([[0,550],[550,550],[0,550-l1_1],[550,550-l2_1]])
pts2 = np.float32([[p[0][0],p[0][1]],[p[1][0],p[1][1]],[p[2][0],p[2][1]],[p[3][0],p[3][1]]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(imgk,M,(640,480))
dst=cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
imgn, cnt, h = cv2.findContours(dst, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if cnt:
cnt=cnt[0]
cv2.drawContours(img_color, [cnt], 0, (0,255,0), 1)
approx = cv2.approxPolyDP(cnt,8,True)
cv2.drawContours(img_color, [approx], 0, (0,0,255), 3)
a=np.asarray(approx)
a=a.reshape(-1,2)
#print(tuple(a[0]))
'''
cv2.circle(img_color,tuple(a[0]),5,(255,0,0),-1)
cv2.circle(img_color,tuple(a[1]),5,(255,0,0),-1)
cv2.circle(img_color,tuple(a[2]),5,(255,0,0),-1)
cv2.circle(img_color,tuple(a[3]),5,(255,0,0),-1)
'''
#print ('depth: ',[np.float(vtx[i][0]),np.float(vtx[i][1]),np.float(vtx[i][2])])
cv2.circle(img_color,(x1,y1), 8, [255,0,255], thickness=-1)
cv2.putText(img_color,"Dis:"+str(img_depth[y1,x1]), (40,40), cv2.FONT_HERSHEY_SIMPLEX, 1.2,[255,0,255])
cv2.putText(img_color,"X:"+str(np.float(vtx[i][0])), (80,80), cv2.FONT_HERSHEY_SIMPLEX, 1.2,[255,0,255])
cv2.putText(img_color,"Y:"+str(np.float(vtx[i][1])), (80,120), cv2.FONT_HERSHEY_SIMPLEX, 1.2,[255,0,255])
cv2.putText(img_color,"Z:"+str(np.float(vtx[i][2])), (80,160), cv2.FONT_HERSHEY_SIMPLEX, 1.2,[255,0,255])
cv2.imshow('depth_frame',img_color)
#cv2.imshow('3',depth_colormap)
cv2.imshow('4',imgf)
#cv2.imshow('5',dst)
key = cv2.waitKey(1)
end=time.time()
if (end-start)>5:
#data = pd.DataFrame(img_depth)
#data.to_csv('./1.csv',index = False)
#if key & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
pipeline.stop()
cv2.imwrite(path1+'/test_image/img_test.jpg',img_test)
return r,p0,p1,p2,p3,img_depth,img_test
def find_p(imgs):
m=find.find(imgs)
n=[]
b=0
for i in m:
if imgs[int(i[0,0]),int(i[0,1])] !=0:
#print([int(i[0,0]),int(i[0,1])])
n+=[int(i[0,0]),int(i[0,1])]
if n==[]:
b=1
n=np.asarray(n).reshape(-1,2)
return n,b
def fake_det(img,pm0):
p=0
if (img[pm0[1]+30,pm0[0]]!=0):
if abs(int(img[pm0[1]+30,pm0[0]])-int(img[pm0[1],pm0[0]]))<700:
if abs(int(img[pm0[1],pm0[0]])-int(img[pm0[1]+30,pm0[0]]))>350:
#print(abs(int(img[pm0[1],pm0[0]])-int(img[pm0[1]+30,pm0[0]])))
print('error')
p=1
return p
def out(list,p0,p1,p2,p3,img):
z=[['ZA001',0,0,0,0],
['ZA002',0,0,0,0],
['ZA003',0,0,0,0],
['ZA004',0,0,0,0],
['ZA005',0,0,0,0],
['ZB001',0,0,0,0],
['ZB002',0,0,0,0],
['ZB003',0,0,0,0],
['ZB004',0,0,0,0],
['ZB005',0,0,0,0],
['ZB006',0,0,0,0],
['ZB007',0,0,0,0],
['ZB008',0,0,0,0],
['ZB009',0,0,0,0],
['ZB010',0,0,0,0],
['ZC001',0,0,0,0],
['ZC002',0,0,0,0],
['ZC003',0,0,0,0],
['ZC004',0,0,0,0],
['ZC005',0,0,0,0],
['ZC006',0,0,0,0],
['ZC007',0,0,0,0],
['ZC008',0,0,0,0],
['ZC009',0,0,0,0],
['ZC010',0,0,0,0],
['ZC011',0,0,0,0],
['ZC012',0,0,0,0],
['ZC013',0,0,0,0],
['ZC014',0,0,0,0],
['ZC015',0,0,0,0],
['ZC016',0,0,0,0],
['ZC017',0,0,0,0],
['ZC018',0,0,0,0],
['ZC019',0,0,0,0],
['ZC020',0,0,0,0],
['ZC021',0,0,0,0],
['ZC022',0,0,0,0],
['ZC023',0,0,0,0],
['CA001',0,0,0,0],
['CA002',0,0,0,0],
['CA003',0,0,0,0],
['CA004',0,0,0,0],
['CD001',0,0,0,0],
['CD002',0,0,0,0],
['CD003',0,0,0,0],
['CD004',0,0,0,0],
['CD005',0,0,0,0],
['CD006',0,0,0,0]]
b=[['ZA001',1,0,15],
['ZA002',0,0,42],
['ZA003',1,1,20],
['ZA004',0,1,25],
['ZA005',1,0,35],
['ZB001',0,0,32],
['ZB002',0,0,32],
['ZB003',0,0,20],
['ZB004',1,0,15],
['ZB005',0,0,25],
['ZB006',0,0,40],
['ZB007',1,0,20],
['ZB008',0,0,32],
['ZB009',0,1,30],
['ZB010',1,0,20],
['ZC001',0,1,15],
['ZC002',0,1,15],
['ZC003',0,1,15],
['ZC004',0,1,15],
['ZC005',0,0,15],
['ZC006',0,1,15],
['ZC007',0,1,15],
['ZC008',0,1,15],
['ZC009',0,1,15],
['ZC010',0,1,15],
['ZC011',1,1,15],
['ZC012',1,1,15],
['ZC013',0,1,15],
['ZC014',0,0,15],
['ZC015',0,0,32],
['ZC016',0,0,32],
['ZC017',0,0,32],
['ZC018',0,0,32],
['ZC019',0,0,32],
['ZC020',0,0,32],
['ZC021',0,0,32],
['ZC022',0,0,32],
['ZC023',0,0,32],
['CA001',0,0,32],
['CA002',0,0,32],
['CA003',0,0,32],
['CA004',0,0,32],
['CD001',0,0,32],
['CD002',0,0,32],
['CD003',0,0,32],
['CD004',0,0,32],
['CD005',0,0,32],
['CD006',0,0,32]]
for i in list[1:]:
zr=0.
y1=int(i[1])
x1=int(i[2])
y2=int(i[3])
x2=int(i[4])
x=(x1+x2)//2
y=(y1+y2)//2
p=[x,y]
p10=[x-5,y]
p11=[x+5,y]
if b[int(i[6]-1)][2]==1:
imgs=img[y1:y2,x1:x2]
m,c=find_p(imgs)
if c==0:
#print(m)
j=m.shape[0]//2
pm0=[m[j][1]+x1,m[j][0]+y1]
'''
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(img, alpha=0.03), cv2.COLORMAP_BONE)
for l in m:
cv2.circle(depth_colormap,(int(l[1])+x1,int(l[0])+y1), 1, [0,0,255], thickness=-1)
#img_color=cv2.imread('../../vision_output/output/result/img_test.jpg')
#cv2.circle(img_color,(x,y), 8, [255,0,255], thickness=-1)
cv2.circle(depth_colormap,(pm0[0],pm0[1]), 2, [255,0,255], thickness=-1)
cv2.circle(depth_colormap,(pm0[0],pm0[1]+10), 2, [255,0,255], thickness=-1)
cv2.imshow('1',depth_colormap)
cv2.waitKey()
cv2.destroyAllWindows()
'''
else:
print('s')
pm0=p
else:
pm0=p
fd=fake_det(img,pm0)
if fd ==1:
continue
pm=get_xyz(pm0)
p21=get_xyz(p10)
p22=get_xyz(p11)
zb=da.finddistan(p0,p2,p1,pm)
if b[int(i[6]-1)][1]==1:
zr=da.findangle(p0,p2,p1,p21,p22)
if (b[int(i[6]-1)][0]=='ZC011') or (b[int(i[6]-1)][0]=='ZC011'):
pass
elif img[p10[1],p10[0]]<img[p11[1],p11[0]]:
zr=180-zr
z[int(i[6]-1)][4]+=1
z[int(i[6]-1)][1]+=zb[0]
z[int(i[6]-1)][2]+=zb[1]-b[int(i[6]-1)][3]
z[int(i[6]-1)][3]+=zr
return z
'''
path=os.path.dirname(os.path.abspath(__file__))
path1=os.path.normpath(path+'../../../vision_output/output')
start1=time.time()
r,p0,p1,p2,p3,img_d,img_c=get_one(path1)
op.tf_dete(pth=path1)
#pipeline.stop()
#print(r,p0,p1,p2,p3)
#print(path1)
#os.system('python '+path1)
data=pd.read_csv(path1+'/result/img_test.jpg.csv',header=None)
list=data.values.tolist()
z=out(list,p0,p1,p2,p3,img_d)
zc=np.asanyarray(z).reshape(-1,4)
print(zc)
end1=time.time()
print(end1-start1)
''' | UTF-8 | Python | false | false | 13,887 | py | 5 | rs1_1.py | 5 | 0.511092 | 0.407255 | 0 | 402 | 32.430348 | 114 |
mrerren/Workshop-Python-101 | 1,992,864,851,537 | a7c8a24deefcbe885d014286807e1c060d423026 | 2b7f99469ebf47efddc522503df0742cef2abd10 | /8-OOP/2-Class/lat31.py | 1fdb44bf436a7a961c9807e2fde79febe5501e8c | []
| no_license | https://github.com/mrerren/Workshop-Python-101 | 961c15ed972cc6b3bc3cd9cf36b8f915794751ca | 5a870d0664f3119348c12c01ab76968055d4a6c1 | refs/heads/master | 2023-03-05T10:43:49.830222 | 2020-12-18T08:54:12 | 2020-12-18T08:54:12 | 321,860,883 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # lat31.py
class Orang:
pass
org = Orang()
print(org) | UTF-8 | Python | false | false | 59 | py | 69 | lat31.py | 46 | 0.627119 | 0.59322 | 0 | 7 | 7.571429 | 13 |
444thLiao/my_toolkit | 19,155,554,146,969 | 3fa5b7b0e26f8fcfcef8376d3a32a505cd49b06c | 9ea705a75261fc8a128f016e0af37ee413ad98d7 | /vis_for/newick/draw_tanglegram.py | 6d82242460e6b606bc438cc6e3cf5d6bca6a6a4e | []
| no_license | https://github.com/444thLiao/my_toolkit | a3ec207f36978d8fe40278b43d3e7b63e00d690b | a5b7cda90d28d98edf90456dca34da3824aa5ce7 | refs/heads/master | 2020-05-09T12:11:45.161286 | 2020-05-08T07:38:11 | 2020-05-08T07:38:11 | 181,104,919 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This script is mainly written for tanglegram which connect leafs from two dendrograms.
Further, it also implement some useful transferring function from iTOL.
for adding color to the leaves of the dendrogram. you could use the iTOL file.. or something simpler like removing the description
"""
from collections import defaultdict
import click
import os
import plotly
import plotly.graph_objs as go
from tqdm import tqdm
from Tree_for.api_IO import read_tree
from Tree_for.plotly_draw_newick_tree import main as get_plotly_data_from_newick
def get_leafs(newick):
t = read_tree(newick, format='auto')
return t.get_leaf_names()
def get_preferred_scale(newick1, newick2):
t1 = read_tree(newick1, format='auto')
t2 = read_tree(newick2, format='auto')
num_1 = len(t1.get_leaf_names())
num_2 = len(t2.get_leaf_names())
yscale = num_1 / num_2
return 1 / yscale
def parse_color_scheme_files(file, extra_set=False,get_raw_name=False):
"""
:param file:
:param extra_set:
:param get_raw_name:
:return:
"""
lines = open(file).read().split('\n')
lines = [_ for _ in lines if _]
field_labels = [_ for _ in lines if _.startswith("FIELD_LABELS")]
field_colors = [_ for _ in lines if _.startswith("FIELD_COLORS")]
name2color = {}
sep_indicator = [_ for _ in lines if _.startswith("SEPARATOR")][0]
sep_indicator = sep_indicator.split(' ')[-1]
s2s = {"TAB": '\t', "COMMA": ',', "SPACE": ' '}
sep = s2s.get(sep_indicator, ',')
if field_labels and field_colors:
colors = field_colors[0].split('\t')[1:]
anno_names = field_labels[0].split('\t')[1:]
_name2data = [_ for _ in lines[lines.index("DATA") + 1:] if _ and not _.startswith('#')]
for line in _name2data:
line.strip('\n')
vs = line.split('\t')
name = vs[0]
color = [c for c, _ in zip(colors, vs[1:]) if str(_) != '0']
if color:
name2color[name] = color[0] # would overlap.. be careful..
else:
lines = [_ for _ in lines[lines.index("DATA") + 1:] if _ and not _.startswith('#')]
c2name = {}
for line in lines:
values = line.split(sep)
name = values[0]
if "range" in line:
color = values[2]
c2name[color] = values[-1]
else:
color = values[1]
name2color[name] = color
if get_raw_name:
return c2name
if str(extra_set) == 'rename':
new_name2color = {k.split('_')[-1].replace('.', 'v'): v
for k, v in name2color.items()}
name2color = new_name2color.copy()
return name2color
def main(newick1, newick2,
color_file1, color_file2,
l_legnth='max', sep='_', extra_set=False, identical=False):
left_leaves = get_leafs(newick1)
right_leaves = get_leafs(newick2)
yscale = get_preferred_scale(newick1, newick2)
fig = plotly.subplots.make_subplots(rows=1, cols=3, shared_yaxes=True,
horizontal_spacing=0.05/3)
# get dendrogram parts
tqdm.write('drawing dendrograms')
datas, labels, _, labels_draw_text, labels_x, labels_y = get_plotly_data_from_newick(newick1,
fixed_length=l_legnth,
yscale=yscale)
datas2, labels2, _, labels_draw_text2, labels_x2, labels_y2 = get_plotly_data_from_newick(newick2,
fixed_length=l_legnth)
# add colors or something else into above datas
l2color = {_: '#000000' for _ in labels_draw_text}
r2color = {_: '#000000' for _ in labels_draw_text2}
l2color.update(parse_color_scheme_files(color_file1, extra_set=False))
r2color.update(parse_color_scheme_files(color_file2, extra_set=extra_set))
# add color into generated trace
tqdm.write('adding color')
name2trace = {_['name']: _ for _ in datas if _['name'] is not None}
for l, color in l2color.items():
if color != '#00000' and l in labels_draw_text:
trace = name2trace[l]
trace['line']['color'] = color
name2trace = {_['name']: _ for _ in datas2 if _['name'] is not None}
for r, color in r2color.items():
if color != '#00000' and r in labels_draw_text2:
trace = name2trace[r]
trace['line']['color'] = color
# add dendrogram parts into figure.
# data1/newick1 would be the left, data2/newick2 would be the right and the leaves of it will point to left
# for _ in datas:
fig.add_traces(datas, rows=[1] * len(datas), cols=[1] * len(datas))
# for _ in datas2:
fig.add_traces(datas2, rows=[1] * len(datas2), cols=[3] * len(datas2))
# init data of middle part
# get the y-coordinate information from below. put them into two dict.
left_data = {k:v for k,v in dict(zip(labels_draw_text, labels_y)).items() if k in left_leaves}
right_data = {k:v for k,v in dict(zip(labels_draw_text2, labels_y2)).items() if k in right_leaves}
# get mapping relationship, default is from left to the right.. one to multi
# so, the leaf names from the left tree should be the part of right, separate with `underline` or `space`
l2r = defaultdict(list)
for leaf1 in left_data.keys():
if identical:
leaf2s = [r for r in right_data.keys() if leaf1 == r]
else:
leaf2s = [r for r in right_data.keys() if leaf1.split(sep)[0] == r]
l2r[leaf1] = leaf2s
# init the data from above mapping dict
c2data = defaultdict(lambda: ([], []))
for l, color in l2color.items():
_xs = c2data[color][0]
_ys = c2data[color][1]
rs = l2r.get(l, [])
l_y = left_data.get(l, 0)
for r in rs:
r_y = right_data[r]
_xs += [0, 1, None]
_ys += [l_y, r_y, None]
for color, (_xs, _ys) in c2data.items():
trace = go.Scatter(x=_xs,
y=_ys,
mode='lines',
line=dict(color=color, ),
hoverinfo='none',
showlegend=True)
fig.add_traces([trace], rows=[1], cols=[2])
fig.layout.xaxis3.autorange = 'reversed'
fig.layout.xaxis.showticklabels = False
fig.layout.xaxis2.showticklabels = False
fig.layout.xaxis3.showticklabels = False
fig.layout.xaxis.zeroline = False
fig.layout.xaxis2.zeroline = False
fig.layout.xaxis3.zeroline = False
fig.layout.yaxis.zeroline = False
fig.layout.yaxis.showticklabels = False
return fig
@click.command()
@click.option("-newick1", help="first tree file, would be placed on the left")
@click.option("-newick2", help="second tree file, would be placed on the right")
@click.option("-output", "output_file", help="the path of html output ")
@click.option("-cf1", help="the file for color annotation to first tree and the links")
@click.option("-cf2", help="the file for color annotation to second tree ")
@click.option("-length", default="max", help="the length of leaves you want to extend to. normally all leaves will extend to identical length which similar to the longest one")
@click.option("-sep", default="_", help="the separator which used to . ")
@click.option("-extra_set", "extra_set", is_flag=True, default=False)
@click.option("-identical", "identical", is_flag=True, default=False,help='if the leaf names are identical. you should pass this parameter or redundant lines will show up ')
def cli(newick1, newick2, output_file, cf1, cf2, length, sep, extra_set,identical):
fig = main(newick1=newick1,
newick2=newick2,
color_file1=cf1,
color_file2=cf2,
l_legnth=length,
sep=sep,
extra_set=extra_set,
identical=identical)
fig.layout.width = 1400
fig.layout.height = 3000
if not exists(dirname(output_file)):
os.makedirs(dirname(output_file))
fig.write_html(output_file)
if __name__ == '__main__':
from os.path import dirname, join, exists
example_dir = join(dirname(dirname(dirname(__file__))), 'example', 'tanglegram')
newick1 = join(example_dir, 'nxrA.newick')
newick2 = join(example_dir, 'species.txt')
color_file1 = join(example_dir, 'gene_annotation.txt')
color_file2 = join(example_dir, 'phylum_annotate.txt')
fig = main(newick1, newick2,
color_file1, color_file2,
l_legnth='max', sep='_', extra_set='rename')
fig.layout.width = 1400
fig.layout.height = 3000
fig.write_html(join(example_dir, 'p.html'))
| UTF-8 | Python | false | false | 8,875 | py | 35 | draw_tanglegram.py | 30 | 0.58738 | 0.566535 | 0 | 217 | 39.898618 | 176 |
udemirezen/rational_activations | 6,012,954,218,353 | ed9e090bd0b27670529672ad6d04bcd92f4118a1 | 22ad9abeb6c9bcddb6ee127309cd1afce9e1cf3c | /rational/torch/rationals.py | 2551ab2c8038a17dd89087ea537af0a9196e9539 | [
"MIT"
]
| permissive | https://github.com/udemirezen/rational_activations | 8e9abf3475d78251ddb8e9f0b37bd6bbf8135dbb | 5bc4ade862eb8967e940afb15d33e0d6653668cc | refs/heads/master | 2023-08-11T15:38:01.254359 | 2021-08-10T13:23:22 | 2021-08-10T13:23:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Rational Activation Functions for Pytorch
=========================================
This module allows you to create Rational Neural Networks using Learnable
Rational activation functions with Pytorch networks.
"""
import torch
from torch._C import device
import torch.nn as nn
from torch.cuda import is_available as torch_cuda_available
from rational.utils.get_weights import get_parameters
from rational.utils.warnings import RationalWarning
from rational._base.rational_base import Rational_base
from rational.torch.rational_pytorch_functions import Rational_PYTORCH_A_F, \
Rational_PYTORCH_B_F, Rational_PYTORCH_C_F, Rational_PYTORCH_D_F, \
Rational_NONSAFE_F, Rational_CUDA_NONSAFE_F, _get_xps
if torch_cuda_available():
try:
from rational.torch.rational_cuda_functions import Rational_CUDA_A_F, \
Rational_CUDA_B_F, Rational_CUDA_C_F, Rational_CUDA_D_F
except ImportError:
pass
class Rational(Rational_base, nn.Module):
"""
Rational activation function inherited from ``torch.nn.Module``.
Arguments:
approx_func (str):
The name of the approximated function for initialisation. \
The different initialable functions are available in \
`rational.rationals_config.json`. \n
Default ``leaky_relu``.
degrees (tuple of int):
The degrees of the numerator (P) and denominator (Q).\n
Default ``(5, 4)``
cuda (bool):
Use GPU CUDA version. \n
If ``None``, use cuda if available on the machine\n
Default ``None``
version (str):
Version of Rational to use. Rational(x) = P(x)/Q(x)\n
`A`: Q(x) = 1 + \|b_1.x\| + \|b_2.x\| + ... + \|b_n.x\|\n
`B`: Q(x) = 1 + \|b_1.x + b_2.x + ... + b_n.x\|\n
`C`: Q(x) = 0.1 + \|b_1.x + b_2.x + ... + b_n.x\|\n
`D`: like `B` with noise\n
Default ``A``
trainable (bool):
If the weights are trainable, i.e, if they are updated during \
backward pass\n
Default ``True``
Returns:
Module: Rational module
"""
def __init__(self, approx_func="leaky_relu", degrees=(5, 4), cuda=None,
version="A", trainable=True, train_numerator=True,
train_denominator=True, name=None):
if name is None:
name = approx_func
super().__init__(name)
if cuda is None:
cuda = torch_cuda_available()
if cuda is True:
device = "cuda"
elif cuda is False:
device = "cpu"
else:
device = cuda
w_numerator, w_denominator = get_parameters(version, degrees,
approx_func)
self.numerator = nn.Parameter(torch.FloatTensor(w_numerator).to(device),
requires_grad=trainable and train_numerator)
self.denominator = nn.Parameter(torch.FloatTensor(w_denominator).to(device),
requires_grad=trainable and train_denominator)
self.register_parameter("numerator", self.numerator)
self.register_parameter("denominator", self.denominator)
self.device = device
self.degrees = degrees
self.version = version
self.training = trainable
self.init_approximation = approx_func
self._saving_input = False
if "cuda" in str(device):
if version == "A":
rational_func = Rational_CUDA_A_F
elif version == "B":
rational_func = Rational_CUDA_B_F
elif version == "C":
rational_func = Rational_CUDA_C_F
elif version == "D":
rational_func = Rational_CUDA_D_F
elif version == "N":
rational_func = Rational_CUDA_NONSAFE_F
else:
raise NotImplementedError(f"version {version} not implemented")
if 'apply' in dir(rational_func):
self.activation_function = rational_func.apply
else:
self.activation_function = rational_func
else:
if version == "A":
rational_func = Rational_PYTORCH_A_F
elif version == "B":
rational_func = Rational_PYTORCH_B_F
elif version == "C":
rational_func = Rational_PYTORCH_C_F
elif version == "D":
rational_func = Rational_PYTORCH_D_F
elif version == "N":
rational_func = Rational_NONSAFE_F
else:
raise NotImplementedError(f"version {version} not implemented")
self.activation_function = rational_func
def forward(self, x):
return self.activation_function(x, self.numerator, self.denominator,
self.training)
def _cpu(self):
if self.version == "A":
rational_func = Rational_PYTORCH_A_F
elif self.version == "B":
rational_func = Rational_PYTORCH_B_F
elif self.version == "C":
rational_func = Rational_PYTORCH_C_F
elif self.version == "D":
rational_func = Rational_PYTORCH_D_F
elif self.version == "N":
rational_func = Rational_NONSAFE_F
else:
raise ValueError("version %s not implemented" % self.version)
self.activation_function = rational_func
self.device = "cpu"
def _cuda(self, device):
if self.version == "A":
rational_func = Rational_CUDA_A_F
elif self.version == "B":
rational_func = Rational_CUDA_B_F
elif self.version == "C":
rational_func = Rational_CUDA_C_F
elif self.version == "D":
rational_func = Rational_CUDA_D_F
elif self.version == "N":
rational_func = Rational_CUDA_NONSAFE_F
else:
raise ValueError("version %s not implemented" % self.version)
if "cuda" in str(device):
self.device = f"{device}"
else:
self.device = f"cuda:{device}"
if 'apply' in dir(rational_func):
self.activation_function = rational_func.apply
else:
self.activation_function = rational_func
def _to(self, device):
"""
Moves the rational function to its specific device. \n
Arguments:
device (torch device):
The device for the rational
"""
if "cpu" in str(device):
self.cpu()
elif "cuda" in str(device):
self.cuda(device)
def _apply(self, fn):
if "Module.cpu" in str(fn):
self._cpu()
elif "Module.cuda" in str(fn):
device = fn.__closure__[0].cell_contents
self._cuda(device)
elif "Module.to" in str(fn):
for clos in fn.__closure__:
if type(clos.cell_contents) is torch.device:
device = clos.cell_contents
self.device = str(device)
self._to(device)
break
return super()._apply(fn)
def numpy(self):
"""
Returns a numpy version of this activation function.
"""
from rational.numpy import Rational as Rational_numpy
rational_n = Rational_numpy(self.init_approximation, self.degrees,
self.version)
rational_n.numerator = self.numerator.tolist()
rational_n.denominator = self.denominator.tolist()
return rational_n
def _from_old(self, old_rational_func):
self.version = old_rational_func.version
self.degrees = old_rational_func.degrees
self.numerator = old_rational_func.numerator
self.denominator = old_rational_func.denominator
if "center" in dir(old_rational_func) and old_rational_func.center != 0:
print("Found a non zero center, please adapt the bias of the",
"previous layer to have an equivalent neural network")
self.training = old_rational_func.training
if "init_approximation" not in dir("init_approximation"):
self.init_approximation = "leaky_relu"
else:
self.init_approximation = old_rational_func.init_approximation
if "cuda" in str(self.device):
if self.version == "A":
rational_func = Rational_CUDA_A_F
elif self.version == "B":
self.rational_func = Rational_CUDA_B_F
elif self.version == "C":
rational_func = Rational_CUDA_C_F
elif self.version == "D":
rational_func = Rational_CUDA_D_F
elif self.version == "N":
rational_func = Rational_CUDA_NONSAFE_F
else:
raise ValueError("version %s not implemented" % self.version)
if 'apply' in dir(rational_func):
self.activation_function = rational_func.apply
else:
self.activation_function = rational_func
else:
if self.version == "A":
rational_func = Rational_PYTORCH_A_F
elif self.version == "B":
rational_func = Rational_PYTORCH_B_F
elif self.version == "C":
rational_func = Rational_PYTORCH_C_F
elif self.version == "D":
rational_func = Rational_PYTORCH_D_F
elif self.version == "N":
rational_func = Rational_NONSAFE_F
else:
raise ValueError("version %s not implemented" % self.version)
self.activation_function = rational_func
self._handle_retrieve_mode = None
self.distribution = None
return self
def change_version(self, version):
assert version in ["A", "B", "C", "D"]
if version == self.version:
print(f"This Rational function has already the correct type {self.version}")
return
if "cuda" in str(self.device):
if version == "A":
rational_func = Rational_CUDA_A_F
elif version == "B":
rational_func = Rational_CUDA_B_F
elif version == "C":
rational_func = Rational_CUDA_C_F
elif version == "D":
rational_func = Rational_CUDA_D_F
elif self.version == "N":
rational_func = Rational_CUDA_NONSAFE_F
else:
raise ValueError("version %s not implemented" % version)
if 'apply' in dir(rational_func):
self.activation_function = rational_func.apply
else:
self.activation_function = rational_func
self.version = version
else:
if version == "A":
rational_func = Rational_PYTORCH_A_F
elif version == "B":
rational_func = Rational_PYTORCH_B_F
elif version == "C":
rational_func = Rational_PYTORCH_C_F
elif version == "D":
rational_func = Rational_PYTORCH_D_F
elif self.version == "N":
rational_func = Rational_NONSAFE_F
else:
raise ValueError("version %s not implemented" % self.version)
self.activation_function = rational_func
self.version = version
def input_retrieve_mode(self, auto_stop=False, max_saves=1000,
bin_width=0.1):
"""
Will retrieve the distribution of the input in self.distribution. \n
This will slow down the function, as it has to retrieve the input \
dist.\n
Arguments:
auto_stop (bool):
If True, the retrieving will stop after `max_saves` \
calls to forward.\n
Else, use :meth:`torch.Rational.training_mode`.\n
Default ``False``
max_saves (int):
The range on which the curves of the functions are fitted \
together.\n
Default ``1000``
"""
if self._handle_retrieve_mode is not None:
# print("Already in retrieve mode")
return
if "cuda" in self.device:
from rational.utils.histograms_cupy import Histogram
else:
from rational.utils.histograms_numpy import Histogram
self.distribution = Histogram(bin_width)
# print("Retrieving input from now on.")
if auto_stop:
self.inputs_saved = 0
self._handle_retrieve_mode = self.register_forward_hook(_save_input_auto_stop)
self._max_saves = max_saves
else:
self._handle_retrieve_mode = self.register_forward_hook(_save_input)
def clear_hist(self):
self.inputs_saved = 0
bin_width = self.distribution.bin_size
if "cuda" in self.device:
from rational.utils.histograms_cupy import Histogram
else:
from rational.utils.histograms_numpy import Histogram
self.distribution = Histogram(bin_width)
def training_mode(self):
"""
Stops retrieving the distribution of the input in `self.distribution`.
"""
# print("Training mode, no longer retrieving the input.")
if self._handle_retrieve_mode is not None:
self._handle_retrieve_mode.remove()
self._handle_retrieve_mode = None
@classmethod
def save_all_inputs(self, save, auto_stop=False, max_saves=10000,
bin_width="auto"):
"""
Have every rational save every input.
Arguments:
save (bool):
If True, every instanciated rational function will \
retrieve its input, else, it won't.
auto_stop (bool):
If True, the retrieving will stop after `max_saves` \
calls to forward.\n
Else, use :meth:`torch.Rational.training_mode`.\n
Default ``True``
max_saves (int):
The range on which the curves of the functions are fitted \
together.\n
Default ``10000``
bin_width (float or "auto"):
The size of the histogram's bin width to store the input \
in.\n
If `"auto"`, then automatically determines the bin width \
to have ~100 bins.\n
Default ``"auto"``
"""
if save:
for rat in self.list:
rat._saving_input = True
rat.input_retrieve_mode(auto_stop, max_saves,
bin_width=bin_width)
else:
for rat in self.list:
rat._saving_input = False
rat.training_mode()
@property
def saving_input(self):
return self._saving_input
@saving_input.setter
def saving_input(self, new_value):
if new_value is True:
self._saving_input = True
self.input_retrieve_mode()
elif new_value is False:
self._saving_input = False
self.training_mode()
else:
print("saving_input of rationals should be set with booleans")
class AugmentedRational(nn.Module):
"""
Augmented Rational activation function inherited from ``Rational``
Arguments:
approx_func (str):
The name of the approximated function for initialisation. \
The different initialable functions are available in
`rational.rationals_config.json`. \n
Default ``leaky_relu``.
degrees (tuple of int):
The degrees of the numerator (P) and denominator (Q).\n
Default ``(5, 4)``
cuda (bool):
Use GPU CUDA version. If None, use cuda if available on the
machine\n
Default ``None``
version (str):
Version of Rational to use. Rational(x) = P(x)/Q(x)\n
`A`: Q(x) = 1 + \|b_1.x\| + \|b_2.x\| + ... + \|b_n.x\|\n
`B`: Q(x) = 1 + \|b_1.x + b_2.x + ... + b_n.x\|\n
`C`: Q(x) = 0.1 + \|b_1.x + b_2.x + ... + b_n.x\|\n
`D`: like `B` with noise\n
Default ``A``
trainable (bool):
If the weights are trainable, i.e, if they are updated during
backward pass\n
Default ``True``
Returns:
Module: Augmented Rational module
"""
def __init__(self, approx_func="leaky_relu", degrees=(5, 4), cuda=None,
version="A", trainable=True, train_numerator=True,
train_denominator=True):
super(AugmentedRational, self).__init__()
self.in_bias = nn.Parameter(torch.FloatTensor([0.0]))
self.out_bias = nn.Parameter(torch.FloatTensor([0.0]))
self.vertical_scale = nn.Parameter(torch.FloatTensor([1.0]))
self.horizontal_scale = nn.Parameter(torch.FloatTensor([1.0]))
def forward(self, x):
x = self.horizontal_scale * x + self.in_bias
out = self.activation_function(x, self.numerator, self.denominator,
self.training)
return self.vertical_scale * out + self.out_bias
class RationalNonSafe(Rational_base, nn.Module):
"""
Rational activation function inherited from ``torch.nn.Module``
Arguments:
approx_func (str):
The name of the approximated function for initialisation. \
The different initialable functions are available in \
`rational.rationals_config.json`. \n
Default ``leaky_relu``.
degrees (tuple of int):
The degrees of the numerator (P) and denominator (Q).\n
Default ``(5, 4)``
cuda (bool):
Use GPU CUDA version. \n
If ``None``, use cuda if available on the machine\n
Default ``None``
version (str):
Version of Rational to use. Rational(x) = P(x)/Q(x)\n
`A`: Q(x) = 1 + \|b_1.x\| + \|b_2.x\| + ... + \|b_n.x\|\n
`B`: Q(x) = 1 + \|b_1.x + b_2.x + ... + b_n.x\|\n
`C`: Q(x) = 0.1 + \|b_1.x + b_2.x + ... + b_n.x\|\n
`D`: like `B` with noise\n
Default ``A``
trainable (bool):
If the weights are trainable, i.e, if they are updated during \
backward pass\n
Default ``True``
Returns:
Module: Rational module
"""
def __init__(self, degrees=(5, 4), cuda=None, trainable=True, train_numerator=True,
train_denominator=True):
super().__init__()
if cuda is None:
cuda = torch_cuda_available()
if cuda is True:
device = "cuda"
elif cuda is False:
device = "cpu"
else:
device = cuda
self.numerator = nn.Parameter(torch.tensor([ 0., 1.01130152, -0.25022214, -0.10285302, 0.02551535]).to(device),
requires_grad=True)
self.denominator = nn.Parameter(torch.tensor([-0.24248419, 0.07964891, -0.02110156]).to(device),
requires_grad=True)
# self.numerator = nn.Parameter(torch.ones(degrees[0]+1).to(device),
# requires_grad=True)
# self.denominator = nn.Parameter(torch.ones(degrees[1]).to(device),
# requires_grad=True)
self.register_parameter("numerator", self.numerator)
self.register_parameter("denominator", self.denominator)
self.device = device
self.degrees = degrees
self.training = trainable
self.version = "NonSafe"
#
# def forward(self, x, y):
# z = x.view(-1)
# len_num, len_deno = len(self.numerator), len(self.denominator)
# # xps = torch.vander(z, max(len_num, len_deno), increasing=True)
# xps = _get_xps(z, len_num, len_deno).to(self.numerator.device)
# numerator = xps.mul(self.numerator).sum(1)
# denominator = xps[:, 1:len_deno+1].mul(self.denominator).sum(1) * y.to(self.numerator.device)
# return (numerator - denominator).view(x.shape)
def forward(self, x):
z = x.view(-1)
len_num, len_deno = len(self.numerator), len(self.denominator)
# xps = torch.vander(z, max(len_num, len_deno), increasing=True)
xps = _get_xps(z, len_num, len_deno).to(self.numerator.device)
numerator = xps.mul(self.numerator).sum(1)
denominator = xps[:, 1:len_deno+1].mul(self.denominator).sum(1)
return numerator.div(1 + denominator).view(x.shape)
def fit(self, x, y):
"""
Linear regression trick to calculate the numerator and denominator \
based on x and y
"""
from sklearn import linear_model
clf = linear_model.LinearRegression(fit_intercept=False)
[np.ones_like(x), x, x**2, x**3, x**4, -y*x, -y*x**2, -y*x**3].T
clf.fit(np.array(), y)
class EmbeddedRational(Rational, nn.Module):
nb_rats = 2
list = []
def __init__(self, approx_func="leaky_relu", degrees=(3, 2), cuda=None,
version="A", *args, **kwargs):
super().__init__(approx_func, degrees)
if approx_func == "leaky_relu":
approx_func += "_0.1"
RationalWarning.warn("Using a leaky_relu_0.1 to make " \
"EmbeddedRational approx leaky_relu")
self.init_approximation = approx_func
self.degrees=degrees
self.cuda = cuda
self.version = version
self.successive_rats = []
for i in range(self.nb_rats):
rat = Rational(approx_func, degrees, cuda, version, *args,
**kwargs)
self.add_module(f"rational_{i}", rat)
self.successive_rats.append(rat)
self.list.append(self)
del self.numerator
del self.denominator
self.numerators = [rat.numerator for rat in self.successive_rats]
self.denominators = [rat.denominator for rat in self.successive_rats]
def forward(self, x):
for rat in self.successive_rats:
x = rat(x)
return x
def _apply(self, fn):
for rat in self.successive_rats:
for clos in fn.__closure__:
if type(clos.cell_contents) is torch.device:
device = clos.cell_contents
rat.device = device
break
return super()._apply(fn)
def numpy(self):
from rational.numpy import EmbeddedRational as ERational_numpy
ERational_numpy.nb_rats = self.nb_rats
erational_n = ERational_numpy(self.init_approximation, self.degrees,
self.version)
for trat, nrat in zip(self.successive_rats, erational_n.successive_rats):
nrat.numerator = trat.numerator.tolist()
nrat.denominator = trat.denominator.tolist()
return erational_n
def __repr__(self):
return (f"Embedded Rational Activation Function (PYTORCH version "
f"{self.version}) of degrees {self.degrees} running on "
f"{self.device}")
# @property()
# def list(self):
class RecurrentRational():
"""
Recurrent rational activation function - wrapper for Rational
Arguments:
approx_func (str):
The name of the approximated function for initialisation. \
The different initialable functions are available in \
`rational.rationals_config.json`. \n
Default ``leaky_relu``
degrees (tuple of int):
The degrees of the numerator (P) and denominator (Q).\n
Default ``(5, 4)``
cuda (bool):
Use GPU CUDA version. \n
If ``None``, use cuda if available on the machine\n
Default ``None``
version (str):
Version of Rational to use. Rational(x) = P(x)/Q(x)\n
`A`: Q(x) = 1 + \|b_1.x\| + \|b_2.x\| + ... + \|b_n.x\|\n
`B`: Q(x) = 1 + \|b_1.x + b_2.x + ... + b_n.x\|\n
`C`: Q(x) = 0.1 + \|b_1.x + b_2.x + ... + b_n.x\|\n
`D`: like `B` with noise\n
Default ``A``
trainable (bool):
If the weights are trainable, i.e, if they are updated during \
backward pass\n
Default ``True``
Returns:
Module: Rational module
"""
def __init__(self, approx_func="leaky_relu", degrees=(5, 4), cuda=None,
version="A", trainable=True, train_numerator=True,
train_denominator=True):
self.rational = Rational(approx_func=approx_func,
degrees=degrees,
cuda=cuda,
version=version,
trainable=trainable,
train_numerator=train_numerator,
train_denominator=train_denominator)
def __call__(self, *args, **kwargs):
return RecurrentRationalModule(self.rational)
class RecurrentRationalModule(nn.Module):
def __init__(self, rational):
super(RecurrentRationalModule, self).__init__()
self.rational = rational
self._handle_retrieve_mode = None
self.distribution = None
def forward(self, x):
return self.rational(x)
def __repr__(self):
return (f"Recurrent Rational Activation Function (PYTORCH version "
f"{self.rational.version}) of degrees {self.rational.degrees} running on "
f"{self.rational.device}")
def cpu(self):
return self.rational.cpu()
def cuda(self):
return self.rational.cuda()
def numpy(self):
return self.rational.numpy()
def fit(self, function, x=None, show=False):
return self.rational.fit(function=function, x=x, show=show)
def input_retrieve_mode(self, auto_stop=True, max_saves=10000,
bin_width=0.01):
"""
Will retrieve the distribution of the input in self.distribution. \n
This will slow down the function, as it has to retrieve the input \
dist.\n
Arguments:
auto_stop (bool):
If True, the retrieving will stop after `max_saves` \
calls to forward.\n
Else, use :meth:`torch.Rational.training_mode`.\n
Default ``True``
max_saves (int):
The range on which the curves of the functions are fitted \
together.\n
Default ``10000``
"""
if self._handle_retrieve_mode is not None:
# print("Already in retrieve mode")
return
from rational.utils.histograms_cupy import Histogram as hist1
self.distribution = hist1(bin_width)
# print("Retrieving input from now on.")
if auto_stop:
self.inputs_saved = 0
self._handle_retrieve_mode = self.register_forward_hook(_save_input_auto_stop)
self._max_saves = max_saves
else:
self._handle_retrieve_mode = self.register_forward_hook(_save_input)
def training_mode(self):
"""
Stops retrieving the distribution of the input in `self.distribution`.
"""
print("Training mode, no longer retrieving the input.")
self._handle_retrieve_mode.remove()
self._handle_retrieve_mode = None
def show(self, input_range=None, display=True):
return self.rational.show(input_range=input_range, display=display)
def _save_input(self, input, output):
self.distribution.fill_n(input[0])
def _save_input_auto_stop(self, input, output):
self.inputs_saved += 1
self.distribution.fill_n(input[0])
if self.inputs_saved > self._max_saves:
self.training_mode()
| UTF-8 | Python | false | false | 28,724 | py | 85 | rationals.py | 55 | 0.53645 | 0.529488 | 0 | 726 | 38.564738 | 121 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.