repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
hchaudhari73/flask | 8,280,696,982,963 | a8a03dc607f1e93a89780c431fee178182f79563 | 5f1c5fb8f729eed74270ec9b2595f9368cd1188b | /loan_prediction/app.py | 719419fd7bb19e1d591cd9bd195744da47f06a22 | []
| no_license | https://github.com/hchaudhari73/flask | c2e39f42e27a2f5febfd8600a26bc3ef013a840b | b23f2f860bcd65d49e609340f4160f9d78a18d30 | refs/heads/master | 2022-12-16T14:09:44.736877 | 2020-09-10T15:58:32 | 2020-09-10T15:58:32 | 258,689,505 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
from flask import Flask, request, render_template
app = Flask(__name__)
import pickle
pickle_in = open("model.pkl","rb")
classifier = pickle.load(pickle_in)
pickle_in.close()
@app.route("/")
def welcome():
return render_template("home.html")
@app.route("/predict")
def prediction():
f = np.array([int(x) for x in request.args.get("features")]).reshape(1,4)
output = classifier.predict(f)
if output[0]==1:
return "Your Loan has be approved"
return "Your Loan has not be approved"
@app.route("/predict_file", methods=["POST"])
def predict_file():
df = pd.read_csv(request.files.get("file"))
output = classifier.predict(df)
return str(list(output))
if __name__ == "__main__":
app.run(port=8080, host="0.0.0.0")
| UTF-8 | Python | false | false | 810 | py | 9 | app.py | 4 | 0.645679 | 0.630864 | 0 | 33 | 23.484848 | 77 |
MU-Software/foxsnow | 14,869,176,792,689 | 689f601655d342a7191b716988f11d498682705e | 5330eb06a9daee7c2ff9845a8f92ffc88a14cac1 | /foxsnow_python_compatibler.py | 5fcb71fade95764d7f0d2f99129885f6afc77ff5 | []
| no_license | https://github.com/MU-Software/foxsnow | 29120e57f4dd380641d7ffc7516efbeefcedc0fc | c22d8faaebebd070d05f233bfb6a4a9f5de0d7e6 | refs/heads/master | 2023-02-21T22:27:56.098765 | 2021-01-28T10:28:48 | 2021-01-28T10:28:48 | 237,782,690 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import errno
import json
import os
import pathlib as pt
import pprint
import subprocess as sp
import sys
def parse_c_header(source, defines={}, recursive=True):
c_std_lib = [
'assert.h', 'complex.h', 'ctype.h', 'errno.h',
'fenv.h', 'float.h', 'inttypes.h', 'iso646.h',
'limits.h', 'locale.h', 'math.h', 'setjmp.h',
'signal.h', 'stdalign.h', 'stdarg.h', 'stdatomic.h',
'stdbool.h', 'stddef.h', 'stdint.h', 'stdio.h',
'stdlib.h', 'stdnoreturn.h', 'string.h', 'tgmath.h',
'threads.h', 'time.h', 'uchar.h', 'wchar.h', 'wctype.h']
orig_wd = os.getcwd()
source = pt.Path(source).resolve()
if not source.exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), source.as_posix())
os.chdir(source.parents[0])
result_dict = {
'keyword_defined' : {},
'include_list' : {
'lib':[],
'src':[]
}
}
if defines: result_dict['keyword_defined'].update(defines)
keyword_ifdef = []
file_lines = None
with open(source, 'r', encoding='utf-8') as fp:
file_lines = fp.readlines()
for index, line in enumerate(file_lines):
cur_line = line.strip()
if not cur_line.startswith('#'):
continue
else:
cur_line = cur_line[1:] # remove '#'
token = cur_line.strip().split(' ')
if not token: continue
# Preprocessor IF control
if token[0] == 'ifdef':
if len(token) < 2: raise Exception(f'ifdef line {index} too short')
keyword_ifdef.append((token[1], bool(token[1] in result_dict['keyword_defined'])))
elif token[0] == 'ifndef':
if len(token) < 2: raise Exception(f'ifndef line {index} too short')
keyword_ifdef.append((token[1], bool(not token[1] in result_dict['keyword_defined'])))
elif token[0] == 'endif':
# TODO : FIX HERE (try this code with only keyword_ifdef.pop() then you'll notice a problem.)
try:
keyword_ifdef.pop()
except:
pass
else:
# Preprocessor VAR control
if not keyword_ifdef or keyword_ifdef[-1][1]:
if token[0] == 'define':
if len(token) < 2: raise Exception(f'define line {index} too short')
result_dict['keyword_defined'][token[1]] = None if len(token) < 3 else ''.join(token[2:])
elif token[0] == 'include':
if len(token) < 2: raise Exception(f'include line {index} too short')
value = ''.join(token[1:])
if '<' in value:
result_dict['include_list']['lib'].append(value.replace('<', '').replace('>', ''))
else:
value = value.replace('"', '')
result_dict['include_list']['src'].append(pt.Path(value).resolve(strict=True))
if recursive:
for file in result_dict['include_list']['src']:
recursive_result = parse_c_header(file, result_dict['keyword_defined'], True)
result_dict['include_list']['lib'] += recursive_result['include_list']['lib']
result_dict['include_list']['src'] += recursive_result['include_list']['src']
result_dict['keyword_defined'].update(recursive_result['keyword_defined'])
os.chdir(orig_wd)
#remove duplicated items
result_dict['include_list']['lib'] = list(dict.fromkeys(result_dict['include_list']['lib']))
result_dict['include_list']['src'] = list(dict.fromkeys(result_dict['include_list']['src']))
#remove C standard library
result_dict['include_list']['lib'] = [z for z in result_dict['include_list']['lib']\
if z not in c_std_lib]
return result_dict
def get_target_src_files(target):
result = parse_c_header(target)
#include C files if exists
tmp_src_c_list = list()
for file in result['include_list']['src']:
target_c_src = file.with_suffix('.c')
if target_c_src.exists():
tmp_src_c_list.append(file.with_suffix('.c'))
result['include_list']['src'] += tmp_src_c_list
result['include_list']['src'].sort()
result['include_list']['src'].insert(0, pt.Path(target).resolve())
return result
def include_concat(filename):
result_txt = ''
src_target_list = get_target_src_files(filename)
for src_path in src_target_list['include_list']['src']:
if src_path.suffix == '.h':
result_txt += src_path.read_text()
result_txt += '\n'
return result_txt
def extract_typedef_struct_code(include_data):
struct_data = dict()
struct_parenthesis_stack = list()
enum_parenthesis_stack = list()
tmp_line_buffer = ''
tmp_line_buffer_enum = ''
for index, line in enumerate(include_data.splitlines()):
if 'typedef struct' in line:
struct_parenthesis_stack.append(line)
tmp_line_buffer += (line + '\n')
continue
elif line.startswith('enum'):
enum_parenthesis_stack.append(line)
tmp_line_buffer_enum += (line + '\n')
continue
elif line.startswith('}'):
if enum_parenthesis_stack:
enum_parenthesis_stack.pop()
# TODO : NEED TO PARSE ENUM!
continue
struct_name = line.replace('}', '').replace(';', '').strip()
if struct_name:
struct_data[struct_name] = tmp_line_buffer
tmp_line_buffer = ''
continue
elif struct_parenthesis_stack:
tmp_line_buffer += (line + '\n')
continue
return struct_data
def ctags_parse(project_dir):
orig_wd = pt.Path()
print(orig_wd.absolute())
os.chdir(project_dir)
ctags_proc = None
try:
ctags_proc = sp.run(['ctags', '-R', '--output-format=json'], capture_output=True, check=True, shell=True)
except Exception as e:
raise e
ctags_output = ctags_proc.stdout.decode()
ctags_output_corrected = ''
for line in ctags_output.splitlines():
ctags_output_corrected += f'{line},\n'
ctags_output_corrected = ctags_output_corrected[:-2]
ctags_output_corrected = '{"tagdata":[' + ctags_output_corrected + ']}'
try:
ctags_data = json.loads(ctags_output_corrected)['tagdata']
except json.JSONDecodeError as err:
# grab a reasonable section, say 40 characters.
start, stop = max(0, err.pos - 20), err.pos + 20
snippet = err.doc[start:stop]
print(err)
print('... ' if start else '', snippet, ' ...' if stop < len(err.doc) else '', sep="")
print('^'.rjust(21 if not start else 25))
raise err
function_list = [func_data.get('name', '') for func_data in ctags_data if func_data.get('kind', '') == 'function' and 'glew.c' not in func_data.get('path', '')]
struct_list = [struct_data.get('name', '') for struct_data in ctags_data if struct_data.get('kind', '') == 'typedef' and 'glew.c' not in struct_data.get('path', '')]
global_list = [global_data.get('name', '') for global_data in ctags_data if global_data.get('kind', '') == 'variable' and 'glew.c' not in global_data.get('path', '')]
os.chdir(orig_wd)
return {
'ctags_data': ctags_data,
'function_list': function_list,
'struct_list': struct_list,
'global_list': global_list,
}
def create_vs_def_file(ctags_data):
file_data = 'LIBRARY FOXSNOW\nEXPORTS\n'
for function_name in ctags_data['function_list']:
file_data += f'\t{function_name}\n'
for global_name in ctags_data['global_list']:
file_data += f'\t{global_name} DATA\n'
target_path = pt.Path('foxsnow_dll.def').absolute()
print(target_path)
if target_path.exists():
target_path.unlink()
with target_path.open('w') as fp:
fp.write(file_data)
def create_cffi_json_file(ctags_data, input_file):
# We need to include function and struct definition.
target_path = pt.Path('foxsnow_cffi_data.json').absolute()
print(target_path)
if target_path.exists():
target_path.unlink()
with target_path.open('w') as fp:
fp.write(file_data)
if __name__ == '__main__':
import pprint
if len(sys.argv) > 2:
target_main_src = ''.join(sys.argv[1:])
else:
target_main_src = './foxsnow'
# include_concat_data = include_concat(target_main_src)
# struct_data = extract_typedef_struct_code(include_concat_data)
# print(struct_data.keys())
parsed_data = ctags_parse(target_main_src)
result = create_vs_def_file(parsed_data) | UTF-8 | Python | false | false | 8,782 | py | 35 | foxsnow_python_compatibler.py | 1 | 0.571965 | 0.567297 | 0 | 239 | 35.748954 | 170 |
liqile1/pytorch-deeplab-xception | 14,181,982,033,538 | a3b2d0a95a3d4ac53e04e1fb23c1602b86f61ea9 | e4e382e06e10509bf161736061b9df3cb2161b87 | /eval_leadbang.py | c50eb32521508029ca35a27c974f43de2d37a465 | [
"MIT"
]
| permissive | https://github.com/liqile1/pytorch-deeplab-xception | 464e08d818307c5bcaa4cf25485ffa1392b10dcf | 6186b4a586507340b95f224b8dae4950cdee137f | refs/heads/master | 2020-09-21T02:20:49.994967 | 2019-12-01T06:52:20 | 2019-12-01T06:52:20 | 224,653,147 | 0 | 0 | MIT | true | 2019-11-28T12:53:09 | 2019-11-28T12:53:08 | 2019-11-28T11:06:50 | 2019-06-24T01:03:43 | 948 | 0 | 0 | 0 | null | false | false | import argparse
import os
import os.path as osp
import numpy as np
#from tqdm import tqdm
import torch
from mypath import Path
#from dataloaders import make_data_loader
from dataloaders.datasets import leadbang
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from utils.loss import SegmentationLosses
from utils.calculate_weights import calculate_weigths_labels
from utils.lr_scheduler import LR_Scheduler
from utils.saver import Saver
from utils.summaries import TensorboardSummary
from utils.metrics import Evaluator
from torch.utils import data
import cv2
class Tester(object):
def __init__(self, args):
self.args = args
# Define Dataloader
kwargs = {'num_workers': args.workers, 'pin_memory': True}
# self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)
self.test_set = leadbang.LeadBangTest("/leadbang/data/")
self.test_loader = data.DataLoader(self.test_set,
batch_size=1, shuffle=True, num_workers=1, pin_memory=True)
# self.val_loader = leadbang.LeadBangTest(Path.db_root_dir("leadbang"))
self.nclass = 2
print(args.backbone)
# Define network
self.model = DeepLab(num_classes=self.nclass,
backbone=args.backbone,
output_stride=args.out_stride,
sync_bn=args.sync_bn,
freeze_bn=args.freeze_bn)
#self.model = self.model.cuda()
self.model = torch.nn.DataParallel(self.model)
#cudnn.benchmark = True
self.model = self.model.cuda()
args.start_epoch = 0
def test(self, epoch):
saved_state_dict = torch.load("checkpoint/{}.pth".format(epoch))
# print('state dict: ', saved_state_dict)
#print('type of dict: ', type(saved_state_dict))
#new_dict = {}
#for name in saved_state_dict:
# new_dict[name[7:]] = saved_state_dict[name]
self.model.load_state_dict(saved_state_dict)
self.model.eval()
result = {}
# tbiar = tqdm(self.train_loader)
# num_img_tr = len(self.train_loader)
for i, sample in enumerate(self.test_loader):
image, target, _, name = sample
image, target = image.cuda(), target.cuda()
print('size of img: ', image.size())
with torch.no_grad():
output = self.model(image)
output = output.cpu().data.numpy().transpose(0,2,3,1)
seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)
seg_pred = np.reshape(seg_pred, (output.shape[1], output.shape[2]))
seg_pred = 255 - 255 * seg_pred
result[name[0]] = seg_pred.copy()
print('process: ', name)
return result
def main():
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Training")
parser.add_argument('--backbone', type=str, default='resnet',
choices=['resnet', 'xception', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
parser.add_argument('--out-stride', type=int, default=16,
help='network output stride (default: 8)')
parser.add_argument('--dataset', type=str, default='pascal',
choices=['pascal', 'coco', 'cityscapes'],
help='dataset name (default: pascal)')
parser.add_argument('--use-sbd', action='store_true', default=True,
help='whether to use SBD dataset (default: True)')
parser.add_argument('--workers', type=int, default=4,
metavar='N', help='dataloader threads')
parser.add_argument('--base-size', type=int, default=513,
help='base image size')
parser.add_argument('--crop-size', type=int, default=513,
help='crop image size')
parser.add_argument('--sync-bn', type=bool, default=None,
help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze-bn', type=bool, default=False,
help='whether to freeze bn parameters (default: False)')
parser.add_argument('--loss-type', type=str, default='ce',
choices=['ce', 'focal'],
help='loss func type (default: ce)')
# training hyper params
parser.add_argument('--epochs', type=int, default=None, metavar='N',
help='number of epochs to train (default: auto)')
parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='start epochs (default:0)')
parser.add_argument('--batch-size', type=int, default=None,
metavar='N', help='input batch size for \
training (default: auto)')
parser.add_argument('--test-batch-size', type=int, default=None,
metavar='N', help='input batch size for \
testing (default: auto)')
parser.add_argument('--use-balanced-weights', action='store_true', default=False,
help='whether to use balanced weights (default: False)')
# optimizer params
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (default: auto)')
parser.add_argument('--lr-scheduler', type=str, default='poly',
choices=['poly', 'step', 'cos'],
help='lr scheduler mode: (default: poly)')
parser.add_argument('--momentum', type=float, default=0.9,
metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=5e-4,
metavar='M', help='w-decay (default: 5e-4)')
parser.add_argument('--nesterov', action='store_true', default=False,
help='whether use nesterov (default: False)')
# cuda, seed and logging
parser.add_argument('--no-cuda', action='store_true', default=
False, help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# checking point
parser.add_argument('--resume', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default=None,
help='set the checkpoint name')
# finetuning pre-trained models
parser.add_argument('--ft', action='store_true', default=False,
help='finetuning on a different dataset')
# evaluation option
parser.add_argument('--eval-interval', type=int, default=1,
help='evaluuation interval (default: 1)')
parser.add_argument('--no-val', action='store_true', default=False,
help='skip validation during training')
args = parser.parse_args()
args.cuda = True
args.gpu_ids = [0]
# if args.sync_bn is None:
# if args.cuda and len(args.gpu_ids) > 1:
# args.sync_bn = True
# else:
# args.sync_bn = False
args.sync_bn = False
#args.freeze_bn = True
# default settings for epochs, batch_size and lr
args.epochs = 1000
# if args.batch_size is None:
# args.batch_size = 4 * len(args.gpu_ids)
# if args.test_batch_size is None:
# args.test_batch_size = args.batch_size
# if args.lr is None:
# lrs = {
# 'coco': 0.1,
# 'cityscapes': 0.01,
# 'pascal': 0.007,
# }
# args.lr = lrs[args.dataset.lower()] / (4 * len(args.gpu_ids)) * args.batch_size
args.lr = 0.01
args.batch_size = 1
print(args)
torch.manual_seed(args.seed)
tester = Tester(args)
result = tester.test(500)
if not osp.exists('/leadbang/data/test_result/'):
os.makedirs('/leadbang/data/test_result/')
for name in result:
path = '/leadbang/data/test_result/' + name + '.bmp'
cv2.imwrite(path, result[name])
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 8,514 | py | 3 | eval_leadbang.py | 3 | 0.573996 | 0.565422 | 0 | 200 | 41.57 | 110 |
sorooshsorkhani/RankLib-Gini | 6,846,177,881,737 | ef011d6df84625a375231007f0c1e8860f411bb2 | d91ecf5922fa61a9216b0556d13abfe788516328 | /Gini.py | 9a2093868115cf95e0196eb8d66c734d90aa09d4 | []
| no_license | https://github.com/sorooshsorkhani/RankLib-Gini | eef63d96ad0ec92b01404b85cf0274b805fefbcd | 4097fbf9e2f16c41bb521161f8a7bea5ecf6603c | refs/heads/main | 2023-05-03T03:51:14.612806 | 2021-05-19T02:32:26 | 2021-05-19T02:32:26 | 320,728,132 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Creator: Soroosh Sorkhani
Email: soroosh.sorkhani@gmail.com or soroosh.sorkhani@ryerson.ca
"""
import sys
from os import path
import re
import math
import xml.etree.ElementTree as ET
# First, check if the inputs are correct
try:
num_features = int(sys.argv[1]) # number of features you have in your data
except:
print('number of features must be an integer greater than 4') # because ranklib model uses at least 4 features
sys.exit()
try:
dataset_file = open(sys.argv[2]) # the data used for training the ranklib model
except:
print('dataset is missing')
sys.exit()
try:
model_file = open(sys.argv[3]) # the random forests model saved by RankLib
except:
print('model is missing')
sys.exit()
try:
path2trees = sys.argv[4] # the directory that trees will be saved
except:
print("path to trees is missing")
sys.exit()
if not path.exists(path2trees):
print("path to trees doesn't exist")
sys.exit()
try:
gini_file_name = sys.argv[5] # choose a name or directory(optional) for the output
except:
print("the output file is not determined")
sys.exit()
if gini_file_name.find(".txt") == -1:
print("the output file must be a .txt file")
sys.exit()
# making the dataset from training data
# the dataset format of ranklib is read and saved as a dictionary here:
training_dataset = dict()
match_list = dataset_file.readlines().copy()
d = 0 # helps to distinguish between each line (query-document pair) in the dataset
for m in range(len(match_list)):
q = match_list[m].strip().split(" ")[1][4:] # q is a query
d += 1 # d is assigned to a document
id = (q, str(d)) # combination of a query and a document is a record (match) in the dataset
label = match_list[m].strip().split(" ")[0] # the rank label/score
training_dataset[id] = dict()
training_dataset[id]["label"] = label
for f in range(num_features):
training_dataset[id][str(f+1)] = float(match_list[m].strip().split(" ")[2 + f].split(":")[-1])
# Read the model and save trees in separated xml files
model_lines = model_file.readlines().copy()
i = 0
for line in model_lines:
line = line.rstrip()
if len(line) == 0:
continue
if re.search("^#.*bags =", line):
num_bags = re.findall("^#.*bags = ([0-9]+)", line)[0]
if re.search("^#", line):
continue
if re.search("^<ensemble>", line):
i += 1
# if the tree is about to start, open an xml file for it
if i % 2 == 1:
tree_file = open(path2trees + "\\tree" + str(math.trunc((i+1)/2)) + ".xml", "w")
tree_file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
# if the tree is about to end, close the xml file
if re.search("^</ensemble>", line):
i += 1
if i & 2 == 0:
tree_file.write(line)
tree_file.close()
continue
# write the tree (lines from model) in the tree file
line += "\n"
tree_file.write(line)
print("Separation of trees is done")
def node_extraction(node): # reading the information for each node from the parsed tree
dict_help = dict()
dict_help[node] = dict()
if node.find("output") is None:
node_type = "C" # C as a connector
dict_help[node]["feature"] = node[0].text.strip()
dict_help[node]["threshold"] = float(node[1].text.strip())
dict_help[node]["left"] = node[2]
dict_help[node]["right"] = node[3]
else:
node_type = "L" # L as a leaf
dict_help[node]["output"] = float(node[0].text.strip())
dict_help[node]["node_type"] = node_type
dict_help[node]["parent_node"] = parent
dict_help[node]["node_data"] = node_data.copy()
return dict_help
def gini_importance(node): # it must be a "C" type node
main = [training_dataset[id]["label"] for id in node_dict[node]["node_data"]]
left = [training_dataset[id]["label"] for id in node_dict[node_dict[node]["left"]]["node_data"]]
right = [training_dataset[id]["label"] for id in node_dict[node_dict[node]["right"]]["node_data"]]
main_dict = dict()
letf_dict = dict()
right_dict = dict()
for i in main:
main_dict[i] = main_dict.get(i, 0) + 1
for i in left:
letf_dict[i] = letf_dict.get(i, 0) + 1
for i in right:
right_dict[i] = right_dict.get(i, 0) + 1
main_gini = 0
for i in main_dict.keys():
main_gini += (main_dict[i]/len(main))**2
main_gini = 1 - main_gini # gini impurity of the node
left_gini = 0
for i in letf_dict.keys():
left_gini += (letf_dict[i]/len(left))**2
left_gini = 1 - left_gini # gini impurity of the left child
right_gini = 0
for i in right_dict.keys():
right_gini += (right_dict[i]/len(right))**2
right_gini = 1 - right_gini # gini impurity of the right child
gini_children = (len(left)*left_gini + len(right)*right_gini)/(len(left)+len(right))
importance = main_gini - gini_children # the change in gini impurity after the split = importance
return len(main), importance
# steps are:
# parsing a tree
# read nodes and find out the portion of data that go to the nodes
# reading nodes include identifying their parent node and child nodes (if applicable)
# after going down the tree and reading all the nodes:
# go back up the tree and calculate gini for each feature in a split (node)
# the set of gini importances of a feature is saved as a dictionary called "importance"
importance = dict()
how_many_trees = 0
for i in range(int(num_bags)):
mark = 0
print("Parsing tree" + str(i + 1))
tree1 = ET.parse(path2trees + "\\tree" + str(i + 1) + ".xml")
tree1_root = tree1.getroot()
root_parent = tree1_root[0][0]
node = root_parent
node_data = list(training_dataset.keys()).copy()
parent = "tree"
node_dict = dict()
while True:
if node not in node_dict.keys(): # if the node is not in the node-set
node_dict = {**node_dict, **node_extraction(node)} # read the node
else:
pass
if node_dict[node]["node_type"] == "C": # if the node is not a leaf node
if node[2] not in node_dict.keys(): # if the left child is not in the node-set, read it
parent = node
node = node[2]
newdata = [id for id in node_dict[parent]["node_data"] if training_dataset[id][node_dict[parent]["feature"]] <= node_dict[parent]["threshold"]]
node_data = newdata.copy()
continue
elif node[3] not in node_dict.keys(): # if the right child is not in the node-set, read it
parent = node
node = node[3]
newdata = [id for id in node_dict[parent]["node_data"] if training_dataset[id][node_dict[parent]["feature"]] > node_dict[parent]["threshold"]]
node_data = newdata.copy()
continue
else: # if both left and right nodes are read before, calculate gini and then go to the parent node
# here starts to calculate gini index
feature = node_dict[node]["feature"]
if feature not in importance.keys():
importance[feature] = list()
try:
count, importance_value = gini_importance(node)
importance[feature].append((count, importance_value))
except:
mark = 1
node = node_dict[node]["parent_node"]
if node == "tree":
if mark == 1:
how_many_trees += 1
break
continue
else: # if it's a leaf node, go back to the parent node
node = node_dict[node]["parent_node"]
continue
print("Processing trees are done.\n")
# Calculating weighted average of all the nodes split by the same feature over the entire trees
features_list = list() # this is the list of used features
for f in importance.keys():
features_list.append(int(f))
gini_file = open(gini_file_name, "w")
feature_importance = dict()
for feature in sorted(features_list):
feature = str(feature)
numerator = 0
denominator = 0
for (count, importance_value) in importance[feature]:
numerator += count * importance_value
denominator += count
feature_importance[feature] = numerator/denominator
gini_file.write(feature + "\t" + str(feature_importance[feature]) + "\n")
gini_file.close()
print("gini file is ready")
| UTF-8 | Python | false | false | 8,571 | py | 5 | Gini.py | 1 | 0.603547 | 0.594796 | 0 | 243 | 34.271605 | 159 |
IntelAI/nauta | 12,395,275,623,015 | 07a04ad4639217c62103e1cfd016aa1465f475da | 677a07ab78896fa78d9d8fb75006d7d0e7d53b73 | /applications/activity-proxy/app/tests/test_database.py | fa544c528cda60eabcf6d5b44d9831102fe67c0a | [
"CC-BY-ND-4.0",
"Apache-2.0",
"CC-BY-4.0"
]
| permissive | https://github.com/IntelAI/nauta | 0d2debae66f0bad241d6aba4414f851035b70f91 | bbedb114a755cf1f43b834a58fc15fb6e3a4b291 | refs/heads/develop | 2022-12-10T11:10:18.878605 | 2020-03-18T07:38:38 | 2020-03-18T07:38:38 | 164,936,937 | 392 | 67 | Apache-2.0 | false | 2022-11-21T21:58:46 | 2019-01-09T20:58:33 | 2022-05-24T21:10:37 | 2022-11-21T21:58:43 | 48,595 | 394 | 66 | 26 | Python | false | false | #
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
import sqlite3
import pytest
import database
def test_init_db(mocker):
fake_connection = mocker.MagicMock()
mocker.patch('sqlite3.connect').return_value = fake_connection
database.init_db()
assert fake_connection.execute.call_count == 2
assert fake_connection.commit.call_count == 1
assert fake_connection.close.call_count == 1
def test_init_db_already_exists(mocker):
fake_connection = mocker.MagicMock()
mocker.patch.object(fake_connection, 'execute').side_effect = sqlite3.OperationalError('already exists')
mocker.patch('sqlite3.connect').return_value = fake_connection
database.init_db()
assert fake_connection.execute.call_count == 1
assert fake_connection.commit.call_count == 0
assert fake_connection.close.call_count == 1
def test_init_db_unknown_error(mocker):
fake_connection = mocker.MagicMock()
mocker.patch.object(fake_connection, 'execute').side_effect = sqlite3.OperationalError
mocker.patch('sqlite3.connect').return_value = fake_connection
with pytest.raises(sqlite3.OperationalError):
database.init_db()
def test_update_timestamp(mocker):
fake_connection = mocker.MagicMock()
mocker.patch('sqlite3.connect').return_value = fake_connection
database.update_timestamp()
assert fake_connection.execute.call_count == 1
assert fake_connection.commit.call_count == 1
assert fake_connection.close.call_count == 1
def test_get_timestamp(mocker):
expected_datetime_return = datetime(year=2018, month=7, day=26, hour=11, minute=41, second=26)
fake_cursor = mocker.MagicMock(fetchone=lambda: ('26.07.2018 11:41:26',))
mocker.spy(fake_cursor, 'fetchone')
fake_connection = mocker.MagicMock(execute=lambda *args: fake_cursor)
mocker.spy(fake_connection, 'execute')
mocker.patch('sqlite3.connect').return_value = fake_connection
timestamp = database.get_timestamp()
assert timestamp == expected_datetime_return
assert fake_connection.execute.call_count == 1
assert fake_cursor.fetchone.call_count == 1
assert fake_connection.close.call_count == 1
| UTF-8 | Python | false | false | 2,735 | py | 840 | test_database.py | 269 | 0.732724 | 0.712249 | 0 | 79 | 33.620253 | 108 |
SeeWhatSticks/task-mistress-rewrite | 19,061,064,903,206 | 0684d4c613467c6286e2296e057103fa7ee5c19f | dc7d0dc41c0914de0e657f8d6e4ea38593d36bae | /main.py | b65809d5c9a7a3e1a7be5a246ae33c7aca9ca5a9 | [
"MIT"
]
| permissive | https://github.com/SeeWhatSticks/task-mistress-rewrite | b5664b1566b4cd6131f9ca5346405f92f18baceb | 78dc7f7aac2b9655b1810ce144beb49bd2f95474 | refs/heads/master | 2021-05-24T13:41:46.668851 | 2020-04-14T08:48:04 | 2020-04-14T08:48:04 | 253,587,854 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from datetime import datetime
import json
from types import MethodType
from discord import Embed
from discord.ext import commands
bot = commands.Bot(command_prefix='$')
bot.load_extension('play')
bot.load_extension('verify')
bot.load_extension('create')
bot.load_extension('set')
bot.CHECK_MARK_BUTTONS = "✅"
bot.NUMBER_BUTTONS = {
"1️⃣": 1,
"2️⃣": 2,
"3️⃣": 3,
"4️⃣": 4,
"5️⃣": 5
}
bot.BKWD_ARROW = "◀️"
bot.FRWD_ARROW = "▶️"
bot.COLORS = {
'default': 0x3300cc,
'set': 0x003399,
'verify': 0xff3399,
'confirm': 0x33ff33,
'error': 0xff3333
}
with open('data/game.json', 'r', encoding='utf8') as file:
bot.game = json.load(file)
with open('data/tasks.json', 'r', encoding='utf8') as file:
bot.tasks = json.load(file)
bot.tasks = {int(k): v for (k, v) in bot.tasks.items()}
for task in bot.tasks.values():
task['ratings'] = {int(k): v for (k, v) in task['ratings'].items()}
with open('data/players.json', 'r', encoding='utf8') as file:
bot.players = json.load(file)
bot.players = {int(k): v for (k, v) in bot.players.items()}
for player in bot.players.values():
player['tasks'] = {int(k): v for (k, v) in player['tasks'].items()}
with open('data/interfaces.json', 'r', encoding='utf8') as file:
bot.interfaces = json.load(file)
bot.interfaces = {int(k): v for (k, v) in bot.interfaces.items()}
def get_player_data(self, user_id):
if user_id not in self.players:
bot.players[user_id] = {
'available': False,
'tasks': {},
'lastBegTime': None,
'lastTreatTime': None,
'limits': []
}
for key in self.game['categories'].keys():
bot.players[user_id]['limits'].append(key)
return bot.players[user_id]
bot.get_player_data = MethodType(get_player_data, bot)
async def add_category_reactions(self, message):
await message.clear_reactions()
await message.add_reaction(self.BKWD_ARROW)
page = self.interfaces[message.id]['page']
for value in self.game['categories'].values():
if value['page'] == page:
await message.add_reaction(value['symbol'])
await message.add_reaction(self.FRWD_ARROW)
bot.add_category_reactions = MethodType(add_category_reactions, bot)
def confirm_embed(name, confirm_string):
return Embed(
title='Confirmation for {}'.format(name),
description=confirm_string,
color=bot.COLORS['confirm'])
bot.confirm_embed = confirm_embed
def error_embed(name, error_string):
return Embed(
title='Error for {}'.format(name),
description=error_string,
color=bot.COLORS['error'])
bot.error_embed = error_embed
def calculate_severity(task):
return round(sum([v for v in task['ratings'].values()]) / len(task['ratings']))
bot.calculate_severity = calculate_severity
def calculate_score(player):
print(player['tasks'].values())
completed_tasks = [k for (k, v) in player['tasks'].items() if v['completed']]
return sum([calculate_severity(bot.tasks[v]) for v in completed_tasks])
def save_data(self):
with open('data/game.json', 'w+') as file:
file.write(json.dumps(self.game, indent=4))
with open('data/tasks.json', 'w+') as file:
file.write(json.dumps(self.tasks, indent=4))
with open('data/players.json', 'w+') as file:
file.write(json.dumps(self.players, indent=4))
with open('data/interfaces.json', 'w+') as file:
file.write(json.dumps(self.interfaces, indent=4))
bot.save_data = MethodType(save_data, bot)
@bot.event
async def on_command_error(ctx, error):
await ctx.channel.send(embed=ctx.bot.error_embed(
ctx.author.display_name,
str(error)))
@bot.event
async def on_ready():
print('We have logged in as {}'.format(bot.user))
@bot.event
async def on_raw_reaction_add(event):
user = bot.get_user(event.user_id)
if user.bot:
return # Ignore own reactions and other bot reactions
if event.message_id not in bot.interfaces:
return # Ignore if this is not an interface message
interface = bot.interfaces[event.message_id]
if 'page' not in interface:
return # Ignore interfaces that don't have pages
channel = bot.get_channel(event.channel_id)
message = await channel.fetch_message(event.message_id)
if event.emoji.name == bot.FRWD_ARROW:
interface['page'] = interface['page'] + 1
if interface['page'] > bot.game['lastCategoryPage']:
interface['page'] = 0
await bot.add_category_reactions(message)
return
elif event.emoji.name == bot.BKWD_ARROW:
interface['page'] = interface['page'] - 1
if interface['page'] < 0:
interface['page'] = bot.game['lastCategoryPage']
await bot.add_category_reactions(message)
return
@bot.command(hidden=True)
@commands.has_role("Administrator")
async def begin(ctx):
user = ctx.author
season_number = bot.game['seasonNumber']
if season_number is None:
bot.game['seasonNumber'] = 1
else:
bot.game['seasonNumber'] = season_number + 1
player_scores = {k: calculate_score(v) for (k, v) in bot.players.items()}
top = max(player_scores.values())
winners = {}
for k, v in player_scores.items():
if v == top:
winners[k] = await bot.fetch_user(k)
bot.game['pastWinners'][season_number] = {
'winners': [v.id for v in winners.values()],
'score': top
}
embed = Embed(
title="Winners",
description='Winners for season {}'.format(season_number),
color=bot.COLORS['default'])
embed.add_field(
name='With {} points'.format(top),
value=", ".join([winner.mention for winner in winners.values()]),
inline=False)
await ctx.channel.send(embed=embed)
await ctx.channel.send(embed=confirm_embed(
user.display_name,
"Scores and tasks have been reset, a new game has started!"))
bot.game['seasonBegin'] = datetime.now().timestamp()
# Reset player data
for key in bot.players:
player = bot.players[key]
player['tasks'] = {}
player['lastBegTime'] = None
player['lastTreatTime'] = None
bot.interfaces = {k: v for (k, v) in bot.interfaces.items() if v['type'] is not 'verification'}
ctx.bot.save_data()
@bot.command(hidden=True)
@commands.has_role("Administrator")
async def end(ctx):
bot.save_data()
await bot.close()
# Get the Discord token from local a plaintext file
with open('token.txt', 'r') as file:
# This line causes the client to connect to the server
bot.run(file.read())
| UTF-8 | Python | false | false | 6,831 | py | 9 | main.py | 5 | 0.618291 | 0.610793 | 0 | 193 | 34.238342 | 99 |
RedaRamadan/PythonSamples | 395,137,029,701 | a6d1d4985eee15ac1aab977ea9e576b33f28762d | 98d43ced928ac8ede62387f7f2210ae4b9489926 | /03. Access Web Data/04_NetworksAndSockets.py | a57610479f81fa7116b25c98a93961988e7d0534 | []
| no_license | https://github.com/RedaRamadan/PythonSamples | ee5de834c30d68deb36df6b609121e65ab1ade29 | 58ffb038844163a465c946b213f8773ab4e3996e | refs/heads/master | 2019-03-14T06:33:26.595365 | 2017-09-17T15:25:47 | 2017-09-17T15:25:47 | 102,898,258 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Transport Control Protocol (TCP)
# =================================
# Built on top of IP (Internet Protocol).
# Assumes IP might lose some data - stores and retransmits data if it seems to be lost.
# Handles "flow control" using a transmit window.
# Provides a nice reliable pipe.
# TCP Connections / Sockets
# =================================
# In computer networking, an Internet socket or network socket is an endpoint of a bidirectional
# inter-process communication flow across an Internet Protocol-based computer network, such as the Internet.
# TCP Port Numbers
# =================================
# A port is an application-specific or process-specific software communications endpoint.
# It allows multiple networked applications to coexist on the same server.
# There is a list of well-known TCP port numbers.
# Sockets in Python
# =================================
# Python has built-in support for TCP Sockets.
import socket
mySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mySocket.connect(('data.pr4e.org', 80))
# Application Protocol
# =================================
# Since TCP (and Python) gives us a reliable socket, what do we want to do with the socket? What problem do we
# want to solve?
# Application Protocols:
# Mail
# World Wide Web
# HTTP - Hypertext Transfer Protocol
# ==================================
# The dominant Application Layer Protocol on the Internet.
# Invented for the Web - to Retrieve HTML, Images, Documents, etc.
# Extended to be data in addition to documents - RSS, Web Services, etc..
# Basic Concept - Make a connection - Request a document - Retrieve the document - Close the connection.
# HTTP is the set of rules to allow browsers to retrieve web documents from servers over the Internet.
# An HTTP Request in Python
# ==================================
import socket
mySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mySocket.connect(('localhost', 80))
cmd = 'GET http://localhost/romeo.txt HTTP/1.0\n\n'.encode()
mySocket.send(cmd)
while True:
data = mySocket.recv(512)
if len(data) < 1: break
print(data.decode())
mySocket.close() | UTF-8 | Python | false | false | 2,152 | py | 54 | 04_NetworksAndSockets.py | 52 | 0.663569 | 0.658457 | 0 | 64 | 32.640625 | 110 |
IcaroTARique/APA_1 | 13,056,700,610,428 | 2d130c9d112ae83011701dddb4ee0d927c479c57 | 677c7e48864e2cd6aef3f0fbdbb131d218e41415 | /insertionSort.py | 8be0f5d604ecbf379e0cee45e72d9773ab5ff0e0 | []
| no_license | https://github.com/IcaroTARique/APA_1 | 4d5b2be6793bdc6297f5952cbcff5f4189611c61 | dd44e7481bfe3ecb9f2f9dedb2816ed8012cbf75 | refs/heads/master | 2021-04-06T00:49:49.491488 | 2018-07-25T23:07:42 | 2018-07-25T23:07:42 | 124,418,316 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3.5
#PARA RODAR O PROGRAMA ./insertionSort.py
import sys
#coding=UTF-8
def insertionSort(lista, tamanho):
for i in range (1,len(lista),1):
menor = i
#for j in range(0, i, -1): <== (FOR DANDO ERRO DESCONHECIDO)
j = i - 1
while j >= 0:
if lista[menor] <lista[j]:
aux = lista[menor]
lista[menor] = lista[j]
lista[j] = aux
else:
break
#print (lista)
j = j - 1
menor = menor -1
#lista = [2,5,6,3,1,4]
#lista = [54,26,93,17,77,31,44,55,20]
#lista = ["a", "k", "b", "f", "z", "g", "d", "o"]
#print(lista)
#insertionSort(lista, len(lista))
#print(lista)
lista = []
nome = 'num.1000.1.in'
nome = sys.argv[1]
arq = open(nome, 'r')
#lista = ["a", "k", "b", "f", "z", "g", "d", "o"]
i = 0
for line in arq:
convertido = int(line)
lista.append(convertido)
i = i + 1
arq.close()
insertionSort(lista, len(lista))
print(lista) | UTF-8 | Python | false | false | 1,005 | py | 4 | insertionSort.py | 4 | 0.503483 | 0.460697 | 0 | 44 | 21.863636 | 68 |
Deyspring/Hydra_transcripts | 16,217,796,551,765 | ad0fa4a04ce5f4d7790f24a0a1d44d62d9724b35 | 84694ba6c19b4471c86ab39b8c8141ab3d318f34 | /hyvenv/bin/conda | 1ecb8b5cbb1c6e097e0405c4d9891e2a442aafd1 | []
| no_license | https://github.com/Deyspring/Hydra_transcripts | 03cd337a2a29c314e03bc188b9ca45b5378674bd | 6fca0f194d0b53c7ba079a5cb81bc3daf8a6efc0 | refs/heads/master | 2022-10-14T09:45:10.450675 | 2020-08-06T23:23:31 | 2020-08-06T23:23:31 | 229,341,949 | 0 | 0 | null | false | 2022-10-01T15:33:45 | 2019-12-20T21:55:44 | 2020-08-06T23:23:49 | 2020-08-06T23:23:47 | 23,098 | 0 | 1 | 1 | Python | false | false | #!/Users/katherinedey/Pictures/Code/GitHub/Hydra_transcripts/hyvenv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'conda==4.3.13','console_scripts','conda'
__requires__ = 'conda==4.3.13'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('conda==4.3.13', 'console_scripts', 'conda')()
)
| UTF-8 | Python | false | false | 435 | 12 | conda | 9 | 0.641379 | 0.609195 | 0 | 12 | 35.25 | 78 |
|
khoa-beep/Pythonbasic | 6,871,947,678,788 | 0305dfd74e566fc7a365c43847242dcf56ccd78a | 4501bde9ebb928d90a46025b70ede849b7ffa533 | /Data_Struct/basic_stack.py | 76a5af3bb359c00f9b7dd4dd8b60639b62407e4e | []
| no_license | https://github.com/khoa-beep/Pythonbasic | 8f6a91a98c9528059e8ef4241fc2eef7148080a3 | 8f7960e00512d46fe06689e3c35e1e6e46282b82 | refs/heads/master | 2023-04-13T17:57:41.330191 | 2021-04-25T15:20:45 | 2021-04-25T15:20:45 | 361,463,816 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Stack:
def __init__(self):
self._data = []
def push(self, e):
self._data.append(e)
def pop(self):
return self._data.pop()
def len(self):
return len(self._data)
def is_empty(self):
return not self._data
def top(self):
if self.is_empty():
return False
return self._data[-1]
def Binary(n):
s = Stack()
n = int(input())
while n != 0:
if(n % 2 == 0):
s.push(0)
else:
s.push(1)
n = n // 2
v = ''
while s.len() != 0:
a = s.pop()
v = v + str(a)
print(v)
| UTF-8 | Python | false | false | 640 | py | 5 | basic_stack.py | 4 | 0.428125 | 0.415625 | 0 | 36 | 16.75 | 31 |
KHdvip/Python3 | 7,584,912,292,319 | 0935ffd8bc9a782ddceb2ced4582c6cd84810a57 | 53e3df620f8e6e2f7bf260f25ae2df63aa75b2de | /Module/code/section_24/test_arrow.py | e0265ec158b3fa73fb051ff0935ca20eaca2deb3 | []
| no_license | https://github.com/KHdvip/Python3 | 8b5a07ace860721fdc87c3b065bf5d5a20052ce5 | 3a7184d27af706f21d35bb73fda270f89954360c | refs/heads/master | 2020-07-01T14:35:22.021696 | 2020-06-11T02:58:07 | 2020-06-11T02:58:07 | 201,196,801 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
import arrow
# 以当前时间获取 arrow 对象
# 获取系统时区的当前时间
now_time = arrow.now()
print(now_time)
# 2019-09-18T17:22:07.964295+08:00
# 获取时间标准时间
now_time = arrow.utcnow()
print(now_time)
# 2019-09-18T09:22:07.964374+00:00
# 获取美国时间
now_time = arrow.now('US/Pacific')
print(now_time)
# 2019-09-18T02:22:07.967367-07:00
# 以指定时间戳获取 arrow 对象
t = arrow.get(1568769380)
print(t)
# 其他获取 arrow 对象的方式
t = arrow.get('2019-09-05 12:30:45', 'YYYY-MM-DD HH:mm:ss')
print(t)
t = arrow.get(2019, 9, 18)
print(t)
t = arrow.Arrow(2019, 9, 18)
print(t)
t = arrow.get(2019, 9, 18, 12)
print(t)
t = arrow.get(2019, 9, 18, 12, 21)
print(t)
t = arrow.get(2019, 9, 18, 12, 23, 23)
print(t)
# arrow 对象的属性
print(t.timestamp)
print(t.year)
print(t.month)
print(t.day)
print(t.hour)
print(t.minute)
print(t.second)
print(t.microsecond)
print(t.week) # 全年中第几周
print(t.weekday()) # 本周的周几
# 时间计算相关
a = arrow.now()
print(a)
print(a.replace(hour=4))
print(a)
# 转换为指定的时间格式
a = arrow.now()
b = a.format('YYYY-MM-DD HH:mm:ss')
print(b)
# 本地时间与标准时间转换
a = arrow.now()
b = a.to('local')
c = a.to('utc')
print(a)
print(b)
print(c)
| UTF-8 | Python | false | false | 1,323 | py | 500 | test_arrow.py | 277 | 0.649867 | 0.521662 | 0 | 80 | 13.125 | 59 |
mcscope/music_generation | 13,993,003,471,321 | b49d00073d1abff695cdd3bedd62e9d9e7b1c1a8 | 2e21402c2ffb4928564600bdf7a789eb7a6fea45 | /pyo_demo5.py | f37deb9e216812c18a3bcf5bda66f6d403deb18e | []
| no_license | https://github.com/mcscope/music_generation | 01c1e2918ffae0fe0e75e342a5735fdb18d97eac | 36e008c17e71734216fd2e9025b2a32c592f19f0 | refs/heads/master | 2021-01-25T09:00:58.158172 | 2017-06-08T16:54:26 | 2017-06-08T16:54:26 | 93,771,283 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
from pyo import *
s = Server().boot()
s.start()
sixteenth_met = Metro(time=1.0/16, poly=1).play()
notes = range(300,500, 50)
# notes = [500,800]
def instrument(notes, amp=1.0):
t = CosTable([(0,0), (50,1), (250,.3), (8191,0)])
note_len = TrigRand(sixteenth_met, min=1.0/8, max=2.0)
note_freq = TrigRand(sixteenth_met, min=1.0/4, max=1.0)
met = Metro(time=note_freq, poly=2).play()
volume = TrigEnv(met, table=t, dur=note_len, mul=.4 * amp)
freq = TrigChoice(met, notes)
a = Sine(freq=freq, mul=volume )
return a
instruments = []
instruments.append(instrument(notes))
instruments.append(instrument(range(100,200,20), amp= 1.5))
instruments.append(instrument(range(1000,1500,100), amp=0.25))
mix = Mix(instruments)
fin = Chorus(mix, depth=[1.5,1.6], feedback=0.5, bal=0.5)
fin.out()
time.sleep(60.000000)
s.stop()
time.sleep(0.25)
s.shutdown()
| UTF-8 | Python | false | false | 896 | py | 10 | pyo_demo5.py | 9 | 0.651786 | 0.551339 | 0 | 37 | 23.216216 | 62 |
jhuapl-boss/boss-tools | 14,431,090,117,608 | 870ba0cf825692976444deffb7c5ca34831245a4 | 4bd6460f4bd8281e2fb88c9efd5effd4da7108e7 | /lmbdtest/test_downsample_volume_lambda.py | e0f59d3164bdb9d68f898aa1083db72f992b4f88 | [
"Apache-2.0"
]
| permissive | https://github.com/jhuapl-boss/boss-tools | dfa97fc72aeee290eb933e5641cabcce0a82561f | 2ace8ce2985ffa3c442ed85134d26c76fb5d984f | refs/heads/master | 2023-02-17T23:19:34.305298 | 2021-07-12T15:27:13 | 2021-07-12T15:27:13 | 56,524,104 | 1 | 3 | Apache-2.0 | false | 2023-02-16T19:49:04 | 2016-04-18T16:31:12 | 2021-09-23T19:23:16 | 2023-02-16T19:49:03 | 744 | 1 | 4 | 1 | Python | false | false | # Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# lambdafcns contains symbolic links to lambda functions in boss-tools/lambda.
# Since lambda is a reserved word, this allows importing from that folder
# without updating scripts responsible for deploying the lambda code.
import importlib
# from lambda import downsample_volume as dsv
dsv = importlib.import_module("lambda.downsample_volume")
import unittest
from unittest.mock import patch
from bossutils.multidimensional import XYZ
import numpy as np
import blosc
import boto3
from moto import mock_s3, mock_sqs, mock_dynamodb2
# Downsample Volume Lambda handler arguments
args = {
'bucket_size': 1,
'cubes_arn': 'cubes_arn',
'args': {
'collection_id': 1,
'experiment_id': 1,
'channel_id': 1,
'annotation_channel': False,
'data_type': 'uint8',
's3_bucket': 's3_bucket',
's3_index': 's3_index',
'resolution': 0,
'type': 'anisotropic',
'iso_resolution': 4,
'aws_region': 'us-east-1',
},
'step': [2,2,1],
'dim': [512,512,16],
'use_iso_flag': False,
}
# Since there is no default for the region_name, set one
boto3.setup_default_session(region_name = 'us-east-1')
class TestDownsampleVolumeLambda(unittest.TestCase):
@patch('lambda.downsample_volume.S3DynamoDBTable', autospec=True)
@patch('lambda.downsample_volume.S3Bucket', autospec=True)
@patch('blosc.decompress', autospec=True)
def test_downsample_volume(self, fake_decompress, fake_s3, fake_s3_ind):
"""
Just execute the majority of the code in downsample_volume() to catch
typos and other errors that might show up at runtime.
"""
fake_s3.get.return_value = None
fake_decompress.return_value = np.random.randint(
0, 256, (16, 512, 512), dtype='uint64')
args = dict(
collection_id=1,
experiment_id=2,
channel_id=3,
annotation_channel=True,
data_type='uint64',
s3_bucket='testBucket.example.com',
s3_index='s3index.example.com',
resolution=0,
type='isotropic',
iso_resolution=4,
aws_region='us-east-1'
)
target = XYZ(0, 0, 0)
step = XYZ(2, 2, 2)
dim = XYZ(512, 512, 16)
use_iso_key = True
dsv.downsample_volume(args, target, step, dim, use_iso_key)
@mock_sqs()
@mock_s3()
def test_empty_volume(self):
# Create cubes_arn and populate with one target cube
sqs = boto3.client('sqs')
sqs.create_queue(QueueName = 'cubes_arn')
sqs.send_message(QueueUrl = 'cubes_arn',
MessageBody = '[0,0,0]')
# Create the s3_bucket Bucket
s3 = boto3.client('s3')
s3.create_bucket(Bucket = 's3_bucket')
dsv.handler(args, None)
# TODO check s3 and verify no cubes were added
@mock_dynamodb2
@mock_sqs()
@mock_s3()
def test_full_volume(self):
# Create cubes_arn and populate with one target cube
sqs = boto3.client('sqs')
sqs.create_queue(QueueName = 'cubes_arn')
sqs.send_message(QueueUrl = 'cubes_arn',
MessageBody = '[0,0,0]')
# Create s3_index table
ddb = boto3.client('dynamodb')
ddb.create_table(TableName = 's3_index',
AttributeDefinitions = [
{ "AttributeName": "object-key", "AttributeType": "S" },
{ "AttributeName": "version-node", "AttributeType": "N" },
{ "AttributeName": "lookup-key", "AttributeType": "S" }
],
KeySchema = [
{ "AttributeName": "object-key", "KeyType": "HASH" },
{ "AttributeName": "version-node", "KeyType": "RANGE" }
],
GlobalSecondaryIndexes = [
{ "IndexName": "lookup-key-index",
"KeySchema": [
{ "AttributeName": "lookup-key", "KeyType": "HASH" }
],
"Projection": { "ProjectionType": "KEYS_ONLY" },
"ProvisionedThroughput": {
"ReadCapacityUnits": 15,
"WriteCapacityUnits": 15
}
}
],
ProvisionedThroughput = {
"ReadCapacityUnits": 15,
"WriteCapacityUnits": 15
})
# Create the s3_bucket Bucket
s3 = boto3.client('s3')
s3.create_bucket(Bucket = 's3_bucket')
# Create cube of data
data = np.zeros([16,512,512], dtype=np.uint8, order='C')
data = blosc.compress(data, typesize=8)
# Put cube data for the target cubes
for key in [dsv.HashedKey(None, 1,1,1,0,0,0,version=0),
dsv.HashedKey(None, 1,1,1,0,0,1,version=0),
dsv.HashedKey(None, 1,1,1,0,0,2,version=0),
dsv.HashedKey(None, 1,1,1,0,0,3,version=0)
]:
s3.put_object(Bucket = 's3_bucket',
Key = key,
Body = data)
dsv.handler(args, None)
# TODO check s3 and make sure cubes were added
# TODO check dynamodb and make sure index enteries were added
| UTF-8 | Python | false | false | 6,229 | py | 115 | test_downsample_volume_lambda.py | 107 | 0.545352 | 0.520308 | 0 | 174 | 34.798851 | 86 |
MarcoFurrer/cs-tribunal | 8,976,481,684,335 | 462668984fd9ccc9e04ff2734258f9ae8aaa57e4 | 5f1bdff59c499a8967fd99ef520e28d034aefe8d | /oauth2discord/discordlogin/views.py | 046558efea307fd75f50d67278a84d29fa148bba | []
| no_license | https://github.com/MarcoFurrer/cs-tribunal | bcd5413fc6946528e649b5f9f2c80556a09f7236 | ce61e8185ed98c30ca0ba1332046e9987b4c4f7c | refs/heads/master | 2023-03-22T18:59:40.914545 | 2021-03-14T15:18:54 | 2021-03-14T15:18:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.http import HttpRequest, HttpResponse, JsonResponse
from django.shortcuts import redirect
from django.contrib.auth import authenticate, login
from .decorators import unauthenticated_user
import requests
# Create your views here.
#@unauthenticated_user
def welcome(request):
return render(request,"welcome.html")
def instructions(request):
return render(request,"instructions.html")
def settings(request):
return render(request,"settings.html")
def contact(request):
return render(request,"contact.html")
def home(request):
return render(request,"vote.html")
auth_url_discord = "https://discord.com/api/oauth2/authorize?client_id=771309151410192384&redirect_uri=http%3A%2F%2Flocalhost%3A8000%2Foauth2%2Flogin%2Fredirect&response_type=code&scope=identify"
def discord_login(request: HttpRequest):
return redirect(auth_url_discord)
def discord_login_redirect(request: HttpRequest):
code = request.GET.get('code')
user = exchange_code(code)
authenticate(request, user = user)
#return JsonResponse({"user": user})
return redirect('home')
def exchange_code(code:str):
data = {
"client_id":"771309151410192384",
"client_secret":"7mfvS2ub9y4AU14SrB8k_ku8WVyD2BJT",
"grant_type": "authorization_code",
"code": code,
"redirect_uri": "http://localhost:8000/oauth2/login/redirect",
"scope": "identify" #todo: Welches scope nötig für Gruppen: https://discord.com/developers/docs/topics/oauth2#shared-resources-oauth2-scopes
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
response = requests.post("https://discord.com/api/oauth2/token", data = data, headers = headers)
print(response)
credentials = response.json()
access_token = credentials["access_token"]
response = requests.get("https://discord.com/api/v6/users/@me", headers = {
'Authorization': 'Bearer %s' % access_token
})
print(response)
user = response.json()
return user
| UTF-8 | Python | false | false | 2,048 | py | 14 | views.py | 6 | 0.718475 | 0.685728 | 0 | 52 | 38.346154 | 195 |
stnguyenn/learnpy | 14,302,241,144,246 | caf246cb0002f0f2e13d6dc61cbc673db90380c0 | 92157160381a47bea221f58e550f7b325fc3273e | /3_3_list.py | a07a18f33530e5c54c77fdbe0f3d9a4a65ce82f1 | [
"MIT"
]
| permissive | https://github.com/stnguyenn/learnpy | 96658789c0a816f9643ce2800f7038332d2a0795 | 4fc201bf461b0f7aa1a111a6a31b27dd492ad969 | refs/heads/master | 2020-04-30T13:24:49.327702 | 2019-03-25T00:57:33 | 2019-03-25T00:57:33 | 176,857,093 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
squares = [1, 4, 9, 16, 25]
squares
[1, 4, 9, 16, 25]
squares[0] # indexing returns the item
1
squares[-1]
25
squares[-3:] # slicing returns a new list
[9, 16, 25]
squares[:]
[1, 4, 9, 16, 25]
squares + [36, 49, 64, 81, 100]
[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
cubes = [1, 8, 27, 65, 125] # something's wrong here
4 ** 3 # the cube of 4 is 64, not 65!
64
cubes[3] = 64 # replace the wrong value
cubes
[1, 8, 27, 64, 125]
cubes.append(216) # add the cube of 6
cubes.append(7 ** 3) # and the cube of 7
cubes
[1, 8, 27, 64, 125, 216, 343]
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
letters
['a', 'b', 'c', 'd', 'e', 'f', 'g']
# replace some values
letters[2:5] = ['C', 'D', 'E']
letters
['a', 'b', 'C', 'D', 'E', 'f', 'g']
# now remove them
letters[2:5] = []
letters
['a', 'b', 'f', 'g']
# clear the list by replacing all the elements with an empty list
letters[:] = []
letters
[]
letters = ['a', 'b', 'c', 'd']
len(letters)
4
a = ['a', 'b', 'c']
n = [1, 2, 3]
x = [a, n]
x
[['a', 'b', 'c'], [1, 2, 3]]
x[0]
['a', 'b', 'c']
x[0][1]
'b'
# Fibonacci series:
# the sum of two elements defines the next
a, b = 0, 1
while a < 10:
print(a)
a, b = b, a+b
0
1
1
2
3
5
8
i = 256*256
print('The value of i is', i)
# The value of i is 65536
a, b = 0, 1
while a < 1000:
print(a, end=',')
a, b = b, a+b
0,1,1,2,3,5,8,13,21,34,55,89,144,233,377,610,987,
| UTF-8 | Python | false | false | 1,384 | py | 18 | 3_3_list.py | 18 | 0.524566 | 0.389451 | 0 | 85 | 15.235294 | 65 |
WodlBodl/visionAssistant | 18,743,237,287,123 | dcf68a8c9d7d567790a1b39ec25f1f3ae05a471e | 8f55c617b24de30965c03453cce8a7ba78e8243c | /backend/tagging.py | 4f5579aec0052f90801ac4ad66900c6f468316d3 | [
"MIT"
]
| permissive | https://github.com/WodlBodl/visionAssistant | ac99accc37f7747f58bb8a9a567346aeeacd6741 | 94bdb5ac1d4a0be0be23355df27f9c298784041e | refs/heads/master | 2020-04-30T14:07:54.305117 | 2015-09-20T11:43:19 | 2015-09-20T11:43:19 | 42,779,906 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
# Required headers for requests to Imagga API
headers = {
'accept': 'application/json',
'authorization': 'Basic YWNjXzRiNjIzMWMxYzdkZmZlYzoxYzlkNjYzZTUzODQwMzY3MmJiZWFkMTRmYjA1ZGRkZQ=='
}
# Imagga API url
imaggaUrl = 'http://api.imagga.com/v1/'
'''
This function posts an image to Imagga's /content endpoint. Once uploaded,
an image can be used in the other endpoints.
Image used is the one stored at images/input.jpg
Inputs:
None
Outputs:
- contentId, string
'''
def postImage(imgPath):
urlContent = imaggaUrl + 'content'
response = requests.post(urlContent, headers=headers, files={'file': open(imgPath, 'rb')})
contentId = response.json()['uploaded'][0]['id']
return contentId
'''
This function obtains tags with a confidence greater than 40 percent for the
image at images/input.jpg
Inputs:
None
Outputs:
- tags, array, each entry is a dictionary with the following
keys {'tag', 'confidence'}
'''
def tagImage(imgPath):
tags = []
# Obtains the iamge ID
imgContentId = postImage();
urlTagging = imaggaUrl + 'tagging'
querystring = {'content': imgContentId,'version':'2'}
response = requests.request('GET', urlTagging, headers=headers, params=querystring)
unfilteredTags = response.json()['results'][0]['tags']
# Removes the iamge from Imagga once tags are obtained
deleteUrl = imaggaUrl + 'content/' + imgContentId
deletedResponse = requests.delete(deleteUrl, headers=headers)
for tag in unfilteredTags:
if float(tag['confidence']) > 15:
tags.append(tag)
return tags
| UTF-8 | Python | false | false | 1,621 | py | 13 | tagging.py | 7 | 0.694016 | 0.687847 | 0 | 61 | 25.57377 | 101 |
ywang6/ML | 11,218,454,607,284 | ae30d45871a284e15978f911a31334ad8934fbbc | 36942fd7ded3fa59e5f7dcdf81d42526d19e067e | /HW1/Assignment1Code/Q5Part3_5YelpLasso.py | 0862a51ccde6b0eacab0a6efbcee2853d59578bd | []
| no_license | https://github.com/ywang6/ML | 13a1d629adbebb3c470c80391d1cfe2ae8849710 | d5a5b164ead5e2f35fad6bb11a325f59fea28543 | refs/heads/master | 2021-01-18T02:18:37.516407 | 2014-01-18T01:42:06 | 2014-01-18T01:42:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 14 02:12:18 2013
@author: shrainik
This solution uses sparse matrices
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse as sparse
import scipy.io as io
import time as time
from operator import itemgetter
def lasso_solver(X,wi,w0i,y,lmbd):
convergence = False
delta = 0.2
currentIteration = 0
maxIterations = 3000
n,d = X.shape
w = wi
w_old = wi
w0 = w0i
w0_old = w0i
while convergence == False and currentIteration < maxIterations:
currentIteration += 1
w_old = w
w0_old = w0
for j in range(d):
w_dum = w
w_dum[j] = 0
if sparse.issparse(X):
aj = 2*((X.T[j].todense().getA()**2).sum())
jth_col = np.reshape(X[:,j].todense().getA(),[n,1])
else:
aj = 2*((X.T[j]**2).sum())
jth_col = np.reshape(X[:,j],[n,1])
cj = np.dot(jth_col.T,y - w0 - X.dot(w_dum))
if cj < lmbd * -1:
wj_hat = (cj+lmbd)/aj
elif cj > lmbd:
wj_hat = (cj-lmbd)/aj
else:
wj_hat = 0
w[j] = wj_hat
w0 = (1.0/len(y))*(sum(y*1.0-(X.dot(w).getA())))
convergence = True
for i in range(d):
if np.absolute(w[i] - w_old[i])/w_old[i] > delta:
convergence = False
break
if convergence == True:
if np.absolute(w0_old - w0)/w0_old > delta:
convergence = False
return w0,w
def RMSE(y,y_actual):
return np.sqrt(np.sum(np.square(y-y_actual))/len(y))
if __name__ == "__main__":
y = np.loadtxt("C:\Users\shrjain\Downloads\data\star_labels.txt", dtype=np.int)
# Load a text file of strings:
featureNames = open("C:\Users\shrjain\Downloads\data\star_features.txt").read().splitlines()
# Load a matrix market matrix, convert it to csc format:
A = io.mmread("C:\Users\shrjain\Downloads\data\star_data.mtx").tocsc()
#A = A.todense().getA()
A_train = A[:30000]
A_validation = A[30001:37501]
A_test = A[37500:]
y_train = np.matrix(y[:30000]).T
y_validation = np.matrix(y[30001:37501]).T
y_test = np.matrix(y[37500:]).T
factor = 1.3
print 'All data loaded.. starting now...'
start = time.time()
lambda_max = 2*max(np.absolute(A_train.transpose().dot(y_train - np.average(y_train))))[0,0]
vldtnError = []
trainError = []
lambda_array = []
numNonZeroes = []
w0store = []
lambda_array.append(lambda_max)
w_int = np.matrix(np.zeros(2500)).T
w_hat1 = lasso_solver(A_train,w_int,0,y_train,lambda_max)
W_HAT = w_hat1[1].T
w0store.append(w_hat1[0])
vldtnError.append(RMSE(A_validation.dot(w_hat1[1])+w_hat1[0],y_validation))
trainError.append(RMSE(A_train.dot(w_hat1[1])+w_hat1[0],y_train))
numNonZeroes.append(sum(W_HAT!=0))
lamda = lambda_max/factor
iterationCount = 1
condition = True
while condition == True and iterationCount < 10000:
print 'iteration ', iterationCount
lambda_array.append(lamda)
w_hat1 = lasso_solver(A_train,w_hat1[1],w_hat1[0],y_train,lamda)
W_HAT = np.vstack((W_HAT, w_hat1[1].T))
w0store.append(w_hat1[0])
vldtnError.append(RMSE(A_validation.dot(w_hat1[1])+w_hat1[0],y_validation))
trainError.append(RMSE(A_train.dot(w_hat1[1])+w_hat1[0],y_train))
numNonZeroes.append(sum(w_hat1[1]!=0))
if vldtnError[iterationCount] > vldtnError[iterationCount - 1]:
if iterationCount > 5:
condition = False
lamda = lamda/factor
iterationCount += 1
end = time.time()
print end - start
plt.plot(lambda_array, vldtnError, 'r.--')
plt.plot(lambda_array, trainError, 'g.--')
plt.xlabel('Lambda')
plt.legend(('vldtnError','trainError'), loc='best')
plt.show()
plt.plot(lambda_array, numNonZeroes, 'b.--')
plt.xlabel('Lambda')
plt.ylabel('numNonZeroes')
plt.show()
print vldtnError, trainError, lambda_array, numNonZeroes
validFeatures = []
wForValidFeatures = []
bestW = 9999 if iterationCount == 10000 else iterationCount - 2
print 'lambda for best w', lambda_array[bestW]
print 'RMSE for best w on test data', RMSE(A_test.dot(W_HAT[bestW][0,:].T)+w0store[bestW], y_test)
absWeights = []
for i in range(2500):
if W_HAT[bestW][0,i] !=0:
validFeatures.append(featureNames[i])
wForValidFeatures.append(W_HAT[bestW][0,i])
absWeights.append(np.abs(W_HAT[bestW][0,i]))
print sorted(zip(validFeatures, absWeights, wForValidFeatures), key = itemgetter(1), reverse= True)[:10] | UTF-8 | Python | false | false | 4,787 | py | 33 | Q5Part3_5YelpLasso.py | 11 | 0.578651 | 0.543973 | 0 | 136 | 34.205882 | 108 |
Anzinius/Networking_Repository | 11,785,390,302,526 | bf70f3742a3f37826061f1fab3ecfed00dfd8f78 | 2c0c26cb5f5122f93347c415ff56f6a948b69aa6 | /4.Firewall_Batch_file/mf2_firewall.py | 3a7866b30307340e1eec504068b112d4ffca81a8 | []
| no_license | https://github.com/Anzinius/Networking_Repository | bb5f63a651a71a8e3f6bce9840d3a832ce313a00 | dc2ea6975e82a320a0b79b936cc8b481d16f3f5a | refs/heads/main | 2023-03-16T15:31:19.755383 | 2023-03-03T02:18:35 | 2023-03-03T02:18:35 | 316,579,353 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import os.path
from openpyxl import Workbook
import sys
from PyQt5 import QtCore, QtGui, QtWidgets, uic
Ui_Form = uic.loadUiType("mf2_win.ui")[0]
class MyWindow(QtWidgets.QMainWindow, Ui_Form):
def __init__(self):
super().__init__()
self.setupUi(self)
self.initUi()
def initUi(self):
self.setWindowTitle("공지 사항 IP 변환기")
self.pushButton.clicked.connect(self.fileopen)
def fileopen(self):
global filename
global f
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', os.path.expanduser("~/Downloads"))
if filename[0]:
f = open(filename[0], 'r')
## wb= openpyxl.Workbook() ===> import openpyxl
wb = Workbook() # from openpyxl import Workbook
sheet = wb.active
sheet['A1'] = 'ZONE'
sheet['B1'] = '호스트이름'
sheet['C1'] = 'IP'
while True:
line=f.readline()
if not line: break
a = line.split("|")
data = "상위기관"+time.strftime('%y%m%d',time.localtime(time.time()))+"_"+a[1]
sheet.append(['E',data,a[1]])
wb.save(os.path.expanduser("~/Downloads/deny.xlsx"))
f.close()
if __name__ == "__main__" :
app = QtWidgets.QApplication(sys.argv)
myWindow = MyWindow()
myWindow.show()
app.exec_()
| UTF-8 | Python | false | false | 1,473 | py | 13 | mf2_firewall.py | 9 | 0.536433 | 0.529493 | 0 | 52 | 26.711538 | 110 |
jakelourie1502/Giphy_Classification | 7,249,904,823,752 | b0207a055d2cd913c519df4a32bd1312cb99e5ba | 274de76745a8f1913749bf1011001704f2956285 | /slowfast_model.py | 33c0fb90b9f8de6854a39fd0dc7bcc26bfa9c4d7 | []
| no_license | https://github.com/jakelourie1502/Giphy_Classification | 0cee4e8631f00f62c9eacaea866d08a853a902dd | 3958b29c87b8e657e1f6ffc09e10a8e2c36c5702 | refs/heads/main | 2023-07-07T01:20:09.140412 | 2021-08-09T10:36:39 | 2021-08-09T10:36:39 | 385,563,711 | 0 | 0 | null | false | 2021-08-07T20:53:34 | 2021-07-13T10:24:49 | 2021-08-07T11:45:58 | 2021-08-07T20:53:33 | 1,596 | 0 | 0 | 1 | Jupyter Notebook | false | false | import torch
class ResidualBlock(torch.nn.Module):
'''
Base class for residual blocks, used in _make_layer function to create residual layers for the Slow model [only]
There are 2 block versions:
- block_ver=1 -> SlowPath res2 and res3 in slow
- block_ver=2 -> rest of the SlowPath and whole FastPath
'''
def __init__(self,
in_channels,
intermediate_channels,
block_ver,
identity_downsample=None,
stride=1
):
super(ResidualBlock, self).__init__()
# number of channels after a block is always *4 what it was when it entered
self.expansion = 4
if block_ver == 1:
self.conv1 = torch.nn.Conv3d(
in_channels,
intermediate_channels,
kernel_size=1,
stride=1,
padding=0,
bias=True
)
elif block_ver == 2:
self.conv1 = torch.nn.Conv3d(
in_channels,
intermediate_channels,
kernel_size=(3,1,1),
stride=1,
padding=(1,0,0),
bias=True
)
self.bn1 = torch.nn.BatchNorm3d(intermediate_channels)
self.conv2 = torch.nn.Conv3d(
intermediate_channels, # here the in and out channels are the same, value after first layer in the block
intermediate_channels,
kernel_size=(1,3,3),
stride=stride,
padding=(0,1,1),
bias=True
)
self.bn2 = torch.nn.BatchNorm3d(intermediate_channels)
self.conv3 = torch.nn.Conv3d(
intermediate_channels,
intermediate_channels*self.expansion,
kernel_size=1,
stride=1,
padding=0,
bias=True
)
self.bn3 = torch.nn.BatchNorm3d(intermediate_channels*self.expansion)
self.relu = torch.nn.ReLU()
self.conv4 = torch.nn.Conv3d(
intermediate_channels,
intermediate_channels*self.expansion,
kernel_size=1,
stride=stride,
padding=0,
bias=True
)
def forward(self, x):
# We enter the block with x having 'input channels shape, e.g. 256'
out = self.conv1(x) #goes to 128 e.g.
out = self.bn1(out)
out = self.relu(out)
identity = out # identity is at 128
out = self.conv2(out) #stride of 2 here shrinks spatial size
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out) # scales up to 512
identity = self.conv4(identity) # scales up channels to 512 and shrinks spatial size by 2.
out += identity
return self.relu(out)
class SlowNet(torch.nn.Module):
def __init__(self,
image_channels=3,
number_of_classes=10
):
super(SlowNet, self).__init__()
self.in_channels = 64
# Slow layers:
self.data_layer_slow = torch.nn.Conv3d(
in_channels=image_channels,
out_channels=image_channels,
kernel_size=1,
stride=(6,1,1),
padding=(0,0,0)
)
self.conv1_slow = torch.nn.Conv3d(
in_channels=3,
out_channels=64,
kernel_size=(1,7,7),
stride=(1,3,3)
)
self.max_pool_slow = torch.nn.MaxPool3d(kernel_size=(1,3,3), stride=(1,2,2))
# _make_residual(number_of_blocks, block_ver, intermediate_channels, stride)
self.res2_slow = self._make_residual_layer(3, 1, 64)
self.res3_slow = self._make_residual_layer(4, 1, 128, (1,2,2))
self.res4_slow = self._make_residual_layer(6, 2, 256, (1,2,2))
self.res5_slow = self._make_residual_layer(3, 2, 512, (1,2,2))
self.adaptavgpool_slow = torch.nn.AdaptiveAvgPool3d(1)
self.flat_slow = torch.nn.Flatten()
self.linear_slow = torch.nn.Linear(2048, number_of_classes)
# Fast layers:
self.data_layer_fast = torch.nn.Conv3d(in_channels=3, out_channels=3, kernel_size=1, stride=1) # stride can later be changed to 2 if needed
self.conv1_fast = torch.nn.Conv3d(in_channels=3, out_channels=16, kernel_size=(5,7,7), stride=(1,2,2)) # is this stride right?
self.max_pool_fast = torch.nn.MaxPool3d(kernel_size=(1,3,3), stride=(1,2,2))
self.res2_fast = self._make_residual_layer(3, 2, 16)
self.res3_fast = self._make_residual_layer(4, 2, 32)
self.res4_fast = self._make_residual_layer(4, 2, 64)
self.res5_fast = self._make_residual_layer(4, 2, 128)
self.avg_pool_fast = torch.nn.AvgPool3d(kernel_size=(1,3,3), stride=(alpha,1,1))
def forward(self, X):
# first put fast layers, so they can be fused to each of the slow ones
# Fast Path
X_fast = self.data_layer_fast(X)
X_fast = self.conv1_fast(X_fast)
X_fast = self.max_pool_fast(X_fast)
X_fast = self.res2_fast(X_fast)
X_fast = self.res3_fast(X_fast)
X_fast = self.res4_fast(X_fast)
X_fast = self.res5_fast(X_fast)
X_fast = self.avg_pool_fast(X_fast)
# Slow Path
X_slow = self.data_layer_slow(X)
X_slow = self.conv1_slow(X_slow)
X_slow = self.max_pool_slow(X_slow)
X_slow = self.res2_slow(X_slow)
# fuse
X_slow = self.res3_slow(X_slow)
# fuse
X_slow = self.res4_slow(X_slow)
# fuse
X_slow = self.res5_slow(X_slow)
# fuse
X_slow = self.adaptavgpool_slow(X_slow)
X_slow = self.flat_slow(X_slow)
X_slow = self.linear_slow(X_slow)
return X_slow
def _make_residual_layer(self,
number_of_blocks,
block_ver,
intermediate_channels,
stride=1
):
"""
as per paper the first layer in block has stride of 1, and then of 2
"""
layers = []
layers.append(
ResidualBlock(self.in_channels, intermediate_channels, block_ver, stride)
)
self.in_channels = intermediate_channels*4
for i in range(number_of_blocks -1):
layers.append(ResidualBlock(self.in_channels, intermediate_channels, block_ver))
return torch.nn.Sequential(*layers) | UTF-8 | Python | false | false | 6,631 | py | 7 | slowfast_model.py | 6 | 0.535817 | 0.50475 | 0 | 192 | 33.541667 | 147 |
jsourati/nn-active-learning | 9,380,208,598,310 | d92dcd5f195594b2f7d7f3c5b68cafc746036f7d | 0d6257f698819912a0d5eee3b829e8df457c585a | /NNAL.py | 9b845dc4b1730bcd1dc14562df015d7590366a7e | []
| no_license | https://github.com/jsourati/nn-active-learning | 4de2076fd93a1411b65a391197f5911568036b33 | 6eb4a7f1b67d732a8d7b25991ce5145e7e6da0c6 | refs/heads/master | 2022-01-21T14:03:26.615295 | 2019-07-24T13:34:47 | 2019-07-24T13:34:47 | 100,506,905 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import tensorflow as tf
import pdb
import sys
import pickle
import warnings
import time
import os
import NN
import NNAL_tools
from cvxopt import matrix, solvers
read_file_path = "/home/ch194765/repos/atlas-active-learning/AlexNet/"
sys.path.insert(0, read_file_path)
from alexnet import AlexNet
def test_MNIST(iters, B, k, init_size, batch_size, epochs,
train_dat=None, test_dat=None):
"""Evaluate active learning based on Fisher information,
or equivalently expected change of the model, over MNIST
data set
"""
# preparing MNIST data set
if not(train_dat):
batch_of_data, batch_of_labels, pool_images, pool_labels, \
test_images, test_labels = NNAL_tools.init_MNIST(init_size, batch_size)
else:
test_images = test_dat[0]
test_labels = test_dat[1]
batch_of_data, batch_of_labels, pool_images, pool_labels = \
NNAL_tools.divide_training(train_dat, init_size, batch_size)
# FI-based querying
print("Doing FI-based querying")
fi_accs, fi_data, fi_labels = \
querying_iterations_MNIST(batch_of_data, batch_of_labels,
pool_images, pool_labels,
test_images, test_labels,
iters, k, epochs, method="FI")
print("Doing random querying")
rand_accs, rand_data, rand_labels = \
querying_iterations_MNIST(batch_of_data, batch_of_labels,
pool_images, pool_labels,
test_images, test_labels,
iters, k, epochs, method="random")
print("Doing uncertainty sampling")
ent_accs, ent_data, ent_labels = \
querying_iterations_MNIST(batch_of_data, batch_of_labels,
pool_images, pool_labels,
test_images, test_labels,
iters, k, epochs, method="entropy")
return fi_accs, rand_accs, ent_accs
def querying_iterations_MNIST(batch_of_data, batch_of_labels,
pool_images, pool_labels,
test_images, test_labels,
iters, k, epochs, method):
c = pool_labels.shape[0]
d = pool_images.shape[0]
accs = np.zeros((c+1,iters+1))
# initial training
with tf.Session() as sess:
print("Initializing the model...")
# input and output placeholders
x = tf.placeholder(tf.float32, shape=[d, None])
y_ = tf.placeholder(tf.float32, shape=[10, None])
# parameters
W = tf.Variable(tf.zeros([10, d]))
b = tf.Variable(tf.zeros([10,1]))
# initializing
sess.run(tf.global_variables_initializer())
# outputs of the network
y = tf.matmul(W,x) + b
posteriors = tf.nn.softmax(tf.transpose(y))
#log_posteriors = tf.log(posteriors)
# cross entropy as the training objective
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf.transpose(y_),
logits=tf.transpose(y)))
# optimization iteration
train_step = tf.train.GradientDescentOptimizer(0.05).minimize(cross_entropy)
# initial training
for _ in range(epochs):
for i in range(len(batch_of_data)):
train_step.run(feed_dict={x: batch_of_data[i],
y_: batch_of_labels[i]})
# initial accuracy
correct_prediction = tf.equal(tf.argmax(y,0), tf.argmax(y_,0))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accs[0,0] = accuracy.eval(feed_dict={x: test_images,
y_: test_labels})
# accuracies in each class
for j in range(1,c+1):
indics = test_labels[j-1,:]==1
accs[j,0] = accuracy.eval(feed_dict={x: test_images[:,indics],
y_: test_labels[:,indics]})
# start the querying iterations
print("Starting the querying iterations..")
added_labels = []
#added_images = np.zeros((iters, d))
for t in range(1, iters+1):
if method=="FI":
"""FI-based querying"""
# compute all the posterior probabilities
pool_posteriors = sess.run(posteriors, feed_dict=
{x: pool_images, y_: pool_labels})
# using the normalized pool-samples
pool_norms = np.sum(pool_images**2, axis=0)
pool_norms /= pool_norms.max()
# norm of posteriors
pool_posteriors_norms = np.sum(pool_posteriors**2, axis=1)
# scores
scores = (pool_norms+1)*(1-pool_posteriors_norms)
# take the best k scores
#bests = np.argsort(-scores)[:100]
#Q = np.array([bests[np.random.randint(100)]])
Q = np.argsort(-scores)[:k]
elif method=="random":
"""randomd querying"""
Q = np.random.randint(0, pool_images.shape[1], k)
elif method=="entropy":
# compute all the posterior probabilities
pool_posteriors = sess.run(posteriors, feed_dict=
{x: pool_images, y_: pool_labels})
entropies = NNAL_tools.compute_entropy(pool_posteriors.T)
Q = np.argsort(-entropies)[:k]
new_train_data = pool_images[:,Q]
new_train_labels = pool_labels[:,Q]
#added_images[t-1,:] = np.squeeze(new_train_data)
added_labels += [np.where(new_train_labels)[0][0]]
batch_of_data, batch_of_labels = \
NNAL_tools.update_batches(batch_of_data,
batch_of_labels,
new_train_data,
new_train_labels,
'regular')
# fine-tuning
sess.run(tf.global_variables_initializer())
for _ in range(epochs):
for i in range(len(batch_of_data)):
train_step.run(feed_dict={x: batch_of_data[i],
y_: batch_of_labels[i]})
accs[0,t] = accuracy.eval(feed_dict={x: test_images,
y_: test_labels})
# accuracies in each class
for j in range(1,c+1):
indics = test_labels[j-1,:]==1
accs[j,t] = accuracy.eval(feed_dict={x: test_images[:,indics],
y_: test_labels[:,indics]})
# update the pool
np.delete(pool_images, Q, 1)
np.delete(pool_labels, Q, 1)
nL = np.concatenate(batch_of_data, axis=1).shape[1]
print("Iteration %d is done. Number of labels: %d" % (t, nL))
return accs, batch_of_data, batch_of_labels
def CNN_query(model,
expr,
pool_inds,
method_name,
session,
col=True,
extra_feed_dict={}):
"""Querying a number of unlabeled samples from a given pool
:Parameters:
**model** : CNN model object
any CNN class object which has methods, `output` as the
output of the network, and `posteriors` as the estimated
posterior probability of the classes
**k** : positive integer
number of queries to be selected
**B** : positive integer
number of samples to keep in uncertainty filterins
(only will be used in `egl` and `fi-` methods)
**pool_X** : 4D tensors
pool of unlabeled samples that is stored in format
`[batch, rows, columns, n_channels]`
**method** : string
the querying method
**session** : tf.Session()
the tensorflow session operating on the model
**batch_size** : integers (default is None)
size of the batches for batch-wise computation of
posteriors and gradients; if not provided, full data
will be used at once in those computations, which is
prone to out-of-memory error especially when GPU's
are being used
"""
k = expr.pars['k']
B = expr.pars['B']
lambda_ = expr.pars['lambda_']
batch_size = expr.pars['batch_size']
if method_name=='egl':
# uncertainty filtering
print("Uncertainty filtering...")
posteriors = NNAL_tools.batch_posteriors(
model, pool_inds,
img_path_list,
batch_size,
session, col, extra_feed_dict)
if B < posteriors.shape[1]:
sel_inds = NNAL_tools.uncertainty_filtering(posteriors, B)
sel_posteriors = posteriors[:, sel_inds]
else:
B = posteriors.shape[1]
sel_posteriors = posteriors
sel_inds = np.arange(B)
# EGL scoring
print("Computing the scores..")
c = posteriors.shape[0]
scores = np.zeros(B)
T = len(model.grad_log_posts['0'])
for i in range(B):
# gradients of samples one-by-one
feed_dict = {model.x:np.expand_dims(
pool_X[sel_inds[i],:,:,:],
axis=0)}
feed_dict.update(extra_feed_dict)
if c < 20:
grads = session.run(
model.grad_log_posts,
feed_dict=feed_dict)
sel_classes = np.arange(c)
else:
# if the number of classes is large,
# compute gradients of the largest twenty
# posteriors
sel_classes = np.argsort(
-sel_posteriors[:,i])[:10]
sel_classes_grads = {
str(cc): model.grad_log_posts[str(cc)]
for cc in sel_classes
}
grads = session.run(sel_classes_grads,
feed_dict=feed_dict)
for j in range(len(sel_classes)):
class_score = 0.
for t in range(T):
class_score += np.sum(
grads[str(sel_classes[j])][t]**2)
scores[i] += class_score*sel_posteriors[
sel_classes[j],i]
if not(i%10):
print(i, end=',')
# select the highest k scores
Q_inds = sel_inds[np.argsort(-scores)[:k]]
elif method_name=='random':
n = len(pool_inds)
Q_inds = np.random.permutation(n)[:k]
elif method_name=='entropy':
# computing the posteriors
posteriors = NNAL_tools.idxBatch_posteriors(
model,
pool_inds,
expr,
session,
col,
extra_feed_dict)
# entropies
entropies = NNAL_tools.compute_entropy(posteriors)
Q_inds = np.argsort(-entropies)[:k]
elif method_name=='fi':
# uncertainty filtering
print("Uncertainty filtering...", end='\n\t')
posteriors = NNAL_tools.idxBatch_posteriors(
model,
pool_inds,
expr,
session,
col,
extra_feed_dict)
# uncertainty filtering
if B < posteriors.shape[1]:
sel_inds = NNAL_tools.uncertainty_filtering(
posteriors, B)
sel_posteriors = posteriors[:, sel_inds]
else:
B = posteriors.shape[1]
sel_posteriors = posteriors
sel_inds = np.arange(B)
# forming A-matrices
# division by two in computing size of A is because
# in each layer we have gradients with respect to
# weights and bias terms --> number of layers that
# are considered is obtained after dividing by 2
A_size = int(
len(model.grad_posts['0'])/2)
c,n = posteriors.shape
A = []
# load an images
# indices: sel_inds --> pool_inds
# CAUTIOUS: this will give an error if the selected
# indices in `sel_inds` contains only one index.
sel_X, _ = NN.load_winds(
pool_inds[sel_inds],
expr.imgs_path_file,
expr.pars['target_shape'],
expr.pars['mean'])
for i in range(B):
X_i = sel_X[i,:,:,:]
feed_dict = {
model.x:np.expand_dims(X_i, axis=0)}
feed_dict.update(extra_feed_dict)
# remove zero, or close-to-zero posteriors
x_posterior = sel_posteriors[:,i]
x_posterior[x_posterior<1e-6] = 0.
nz_classes = np.where(x_posterior > 0.)[0]
nz_posts = x_posterior[nz_classes] / np.sum(
x_posterior[nz_classes])
nz_classes_grads = {
str(cc): model.grad_posts[str(cc)]
for cc in nz_classes}
# computing the gradients
# grads={ '0': dP(y=0|x)/dtheta,
# '1': dP(y=1|x)/dtheta,
# etc }
# if there are too many classes,
# grads={ 'c0': dP(y=c0|x)/dtheta,
# 'c1': dP(y=c1|x)/dtheta,
# etc }
# where {c0,c1,etc} are classes with largest
# posteriors for x.
#
if len(nz_classes) < 10:
grads = session.run(nz_classes_grads,
feed_dict=feed_dict)
sel_classes = nz_classes
new_posts = nz_posts
else:
# if the number of classes is large,
# compute gradients of few classes with
# largest posteriors only
sel_nz_classes = np.argsort(-nz_posts)[:10]
sel_classes = nz_classes[sel_nz_classes]
sel_classes_grads = {
str(cc): nz_classes_grads[str(cc)]
for cc in sel_classes}
# normalizing posteriors of the selected classes
new_posts = nz_posts[sel_nz_classes]
new_posts /= np.sum(new_posts)
# gradients for the selected classes
grads = session.run(sel_classes_grads,
feed_dict=feed_dict)
Ai = np.zeros((A_size, A_size))
for j in range(len(sel_classes)):
shrunk_grad = NNAL_tools.shrink_gradient(
grads[str(sel_classes[j])], 'sum')
Ai += np.outer(shrunk_grad,
shrunk_grad) / new_posts[j] \
+ np.eye(A_size)*1e-5
if not(i%10):
print(i, end=',')
A += [Ai]
# extracting features for pool samples
# using only few indices of the features
F = model.extract_features(pool_inds[sel_inds],
expr,session)
# selecting from those features that have the most
# non-zero values among the selected samples
nnz_feats = np.sum(F>0, axis=1)
feat_inds = np.argsort(-nnz_feats)[:int(B/2)]
F_sel = F[feat_inds,:]
# taking care of the rank
while np.linalg.matrix_rank(F_sel)<len(feat_inds):
# if the matrix is not full row-rank, discard
# the last selected index (worst among all)
feat_inds = feat_inds[:-1]
F_sel = F[feat_inds,:]
if len(feat_inds) < 10:
warnings.warn(
"Few features (%d) are selected"% (
len(feat_inds)))
# taking care of the conditional number
while np.linalg.cond(F_sel) > 1e6:
feat_inds = feat_inds[:-1]
F_sel = F[feat_inds,:]
if len(feat_inds)==1:
lambda_=0
break
#pdb.set_trace()
# subtracting the mean
F_sel -= np.repeat(np.expand_dims(
np.mean(F_sel, axis=1),
axis=1), B, axis=1)
print('Cond. #: %f'% (np.linalg.cond(F_sel)),
end='\n\t')
print('# selected features: %d'%
(len(feat_inds)), end='\n\t')
# SDP
print('Solving SDP..',end='\n\t')
soln = NNAL_tools.SDP_query_distribution(
A, lambda_, F_sel, k)
print('status: %s'% (soln['status']), end='\n\t')
q_opt = np.array(soln['x'][:B])
# sampling from the optimal solution
Q_inds = NNAL_tools.sample_query_dstr(
q_opt, k, replacement=True)
Q_inds = sel_inds[Q_inds]
elif method_name=='rep-entropy':
# uncertainty filtering
print("Uncertainty filtering...")
posteriors = NNAL_tools.idxBatch_posteriors(
model,
pool_inds,
expr,
session,
col,
extra_feed_dict)
if B < posteriors.shape[1]:
sel_inds = NNAL_tools.uncertainty_filtering(
posteriors, B)
sel_posteriors = posteriors[:, sel_inds]
else:
B = posteriors.shape[1]
sel_posteriors = posteriors
sel_inds = np.arange(B)
n = len(pool_inds)
rem_inds = list(set(np.arange(n)) - set(sel_inds))
print("\t Finding Similarities..", end='\n\t')
# extract the features for all the pool
# sel_inds, rem_inds --> pool_inds
F = model.extract_features(pool_inds,
expr,
session)
F_uncertain = F[:, sel_inds]
norms_uncertain = np.sqrt(np.sum(F_uncertain**2, axis=0))
F_rem_pool = F[:, rem_inds]
norms_rem = np.sqrt(np.sum(F_rem_pool**2, axis=0))
# compute cos-similarities between filtered images
# and the rest of the unlabeled samples
dots = np.dot(F_rem_pool.T, F_uncertain)
norms_outer = np.outer(norms_rem, norms_uncertain)
sims = dots / norms_outer
print("Greedy optimization..", end='\n\t')
# start from empty set
Q_inds = []
nQ_inds = np.arange(B)
# add most representative samples one by one
for i in range(k):
rep_scores = np.zeros(B-i)
for j in range(B-i):
cand_Q = Q_inds + [nQ_inds[j]]
rep_scores[j] = np.sum(
np.max(sims[:, cand_Q], axis=1))
iter_sel = nQ_inds[np.argmax(rep_scores)]
# update the iterating sets
Q_inds += [iter_sel]
nQ_inds = np.delete(
nQ_inds, np.argmax(rep_scores))
Q_inds = sel_inds[Q_inds]
return Q_inds
def run_CNNAL(A, init_X_train, init_Y_train,
X_pool, Y_pool, X_test, Y_test, epochs,
k, B, method, max_queries, train_batch=50,
eval_batch=None):
"""Starting with a CNN model that is trained with an initial
labeled data set, and then perform certain number of querying
iterations using a specified active learning method
"""
test_acc = []
saver = tf.train.Saver()
with tf.Session() as session:
saver.restore(session, A.save_path)
test_acc += [A.accuracy.eval(feed_dict={
A.x: X_test, A.y_:Y_test})]
print()
print('Test accuracy: %g' %test_acc[0])
# start querying
new_X_train, new_Y_train = init_X_train, init_Y_train
new_X_pool, new_Y_pool = X_pool, Y_pool
A.get_gradients()
# number of selected in each iteration is useful
# when samling from a distribution and repeated
# queries might be present
query_num = []
print(20*'-' + ' Querying ' +20*"-")
t = 0
while sum(query_num) < max_queries:
print("Iteration %d: "% t)
Q_inds = CNN_query(A, k, B, new_X_pool,
method, session, eval_batch)
query_num += [len(Q_inds)]
print('Query index: '+' '.join(str(q) for q in Q_inds))
# prepare data for another training
Q = new_X_pool[Q_inds,:,:,:]
#pickle.dump(Q, open('results/%s/%d.p'% (method,t),'wb'))
Y_Q = new_Y_pool[:,Q_inds]
# remove the selected queries from the pool
new_X_pool = np.delete(new_X_pool, Q_inds, axis=0)
new_Y_pool = np.delete(new_Y_pool, Q_inds, axis=1)
# update the model
print("Updating the model: ", end='')
new_X_train, new_Y_train = NNAL_tools.prepare_finetuning_data(
new_X_train, new_Y_train, Q, Y_Q, 200+t, 50)
for i in range(epochs):
A.train_graph_one_epoch(new_X_train, new_Y_train,
train_batch, session)
print(i, end=', ')
test_acc += [A.accuracy.eval(
feed_dict={A.x: X_test, A.y_:Y_test})]
print()
print('Test accuracy: %g' %test_acc[t+1])
t += 1
return np.array(test_acc), np.append(0, np.array(query_num))
def run_AlexNet_AL(X_pool, Y_pool, X_test, Y_test,
learning_rate, dropout_rate, epochs,
k, B, methods, max_queries,
train_batch_size,
model_save_path,
results_save_path,
index_save_path=None,
eval_batch_size=None,
init_train_dat=None):
"""Running active learning algorithms on a
pre-trained AlexNet
This function is written separate than `run_CNNAL`, because
the architecture of AlexNet cannot be modelled by our
current generic CNN class at this time. It is mainly
because AlexNet has more than two groups in some
convolutional layers, where the input is cut in half
and same or different filters are used in each group
to output a feature map.
Hence, we are using a publicly available piece of code,
which is written by Frederik Kratzert in his blog
https://kratzert.github.io/2017/02/24/finetuning-alexnet-
with-tensorflow.html
for fine-tuning pre-trained AlexNet in TensorFlow given
any labeled data set.
"""
# layers we don't wanna modify in the fine-tuning process
skip_layer = ['fc8']
# path to the pre-trained weights
weights_path = '/home/ch194765/repos/atlas-active-learning/AlexNet/bvlc_alexnet.npy'
# creating the AlexNet mode
# -------------------------
# preparing variables
c = Y_pool.shape[1]
if os.path.isfile('%s/results.dat'% index_save_path):
print('Some results already exist..')
accs, fi_queries = pickle.load(
'%s/results.dat'% index_save_path)
else:
accs = {method:[] for method in methods}
fi_query_num = [0]
tf.reset_default_graph()
x = tf.placeholder(tf.float32,
[None, 227, 227, 3])
# creating the model
model = NN.AlexNet_CNN(
x, dropout_rate, c, skip_layer, weights_path)
model.get_optimizer(learning_rate)
# getting the gradient operations
model.get_gradients(5)
saver = tf.train.Saver()
with tf.Session() as session:
# initialization
model.initialize_graph(session)
# if an initial training data is given..
if init_train_dat:
print("Initializing the model")
init_X_train = init_train_dat[0]
init_Y_train = init_train_dat[1]
for i in range(epochs):
model.train_graph_one_epoch(
init_X_train, init_Y_train,
train_batch_size, session)
if os.path.isfile(model_save_path+'.index'):
# load the graph
saver.restore(session, model_save_path)
else:
# save the graph
saver.save(session, model_save_path)
session.graph.finalize()
init_acc = NNAL_tools.batch_accuracy(
model, X_test, Y_test,
eval_batch_size, session, col=False)
extra_feed_dict = {model.KEEP_PROB: model.dropout_rate}
for M in methods:
print('Test accuracy: %g' %init_acc)
if os.path.exists('%s/%s'% (index_save_path, M)):
continue
if M=='fi':
accs[M] += [init_acc]
else:
accs[M] = np.zeros(int(max_queries/k)+1)
accs[M][0] = init_acc
if not(M==methods[0]):
saver.restore(session, model_save_path)
# start querying
if init_train_dat:
X_train = init_X_train
Y_train = init_Y_train
else:
X_train = np.zeros((0,)+X_pool.shape[1:])
Y_train = np.zeros((0,c))
# number of selected in each iteration is useful
# when samling from a distribution and repeated
# queries might be present
query_num = 0
print(20*'-' + ' Querying ' +20*"-")
t = 0
while query_num < max_queries:
#T1 = time.time()
print("Iteration %d: "% t)
Q_inds = CNN_query(model, k, B, X_pool,
M, session, eval_batch_size,
False, extra_feed_dict)
query_num += len(Q_inds)
# save the queries if necessary:
if index_save_path:
# create the path if necessary
if not(os.path.exists('%s/%s'% (index_save_path, M))):
os.mkdir('%s/%s'% (index_save_path, M))
# the query indices are based on rows of
# pool_inds.txt
np.savetxt(
'%s/%s/Q-%d.txt'% (index_save_path, M, t),
Q_inds, fmt='%d')
print('Query index: '+' '.join(str(q) for q in Q_inds))
# prepare data for another training
Q = X_pool[Q_inds,:,:,:]
Y_Q = Y_pool[Q_inds,:]
# remove the selected queries from the pool
X_pool = np.delete(X_pool, Q_inds, axis=0)
Y_pool = np.delete(Y_pool, Q_inds, axis=0)
# update the model
print("Updating the model: ", end='')
X_train, Y_train = NNAL_tools.prepare_finetuning_data(
X_train, Y_train.T,
Q, Y_Q.T, 200+t, train_batch_size)
Y_train = Y_train.T
for i in range(epochs):
model.train_graph_one_epoch(
X_train, Y_train,
train_batch_size, session)
print(i, end=', ')
print()
#T2 = time.time()
#dT = (T2 - T1) / 60
#print("This iteration took %f m"% dT)
iter_acc = NNAL_tools.batch_accuracy(
model, X_test, Y_test,
eval_batch_size, session, col=False)
t += 1
if M=='fi':
accs[M] += [iter_acc]
fi_query_num += [len(Q_inds)]
else:
accs[M][t] = iter_acc
print('Test accuracy: %g' % iter_acc)
#pdb.set_trace()
pickle.dump([accs, fi_query_num],
open(results_save_path, 'wb'))
return accs, fi_query_num
| UTF-8 | Python | false | false | 28,830 | py | 24 | NNAL.py | 24 | 0.487444 | 0.480472 | 0 | 762 | 36.834646 | 88 |
lazycat2/leetcode | 309,237,650,435 | f530f0aa2e580f37e4af8f336a197cbc00cf79cd | 797e0c32a28e461e76bb5e44ff40888fae9e13e6 | /dogge/Binary Tree Card/Construct Binary Tree from Preorder and Inorder Traversal.py | e9563ac6ec779b36ee672998d756ceab7395cace | [
"ICU"
]
| permissive | https://github.com/lazycat2/leetcode | 4dd58bed530dea72b7585756abed45514088dae5 | 0d81e8b48d7e1e3c52ec7f08b7c907cab40cc616 | refs/heads/master | 2020-03-27T20:38:30.378984 | 2019-07-11T02:37:50 | 2019-07-11T02:37:50 | 147,082,746 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
"""
3
/ \
9 20
/ \
15 7
preorder = [3,9,20,15,7]
inorder = [9,3,15,20,7]
"""
def buildTree(self, preorder: 'List[int]', inorder: 'List[int]') -> TreeNode:
def recursive(pre_: 'List[int]', in_: 'List[int]'):
if pre_:
root = pre_[0]
index = in_.index(root)
l_in = in_[0:index]
r_in = in_[index + 1:]
l_pre = pre_[1:len(l_in) + 1]
r_pre = pre_[len(l_in) + 1:]
o = TreeNode(root)
o.left = recursive(l_pre, l_in)
o.right = recursive(r_pre, r_in)
return o
return recursive(preorder, inorder)
| UTF-8 | Python | false | false | 907 | py | 183 | Construct Binary Tree from Preorder and Inorder Traversal.py | 178 | 0.432194 | 0.402426 | 0 | 35 | 24.914286 | 81 |
cheriezhang/LCode | 9,208,409,900,509 | b9d2e0583a47a8796aecf3290341dbe883317d96 | 5b4be91f2d5b2259e87189008c47b5a394a5d46d | /TowardOffer/17-mirror.py | e81bda09381c0d53fc1235fc7b44dc63b05776c7 | []
| no_license | https://github.com/cheriezhang/LCode | 3c48d2e3fae2f43980b8ec739bdaf53d5b9780ac | 6bb9af53cc036c441fab9690791fd9c8a83922b8 | refs/heads/master | 2021-05-15T12:22:41.970018 | 2017-12-04T06:19:14 | 2017-12-04T06:19:14 | 108,349,573 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
# 题目描述:
# 操作给定的二叉树,将其变换为源二叉树的镜像。
# 输入描述:
# 二叉树的镜像定义:源二叉树
# 8
# / \
# 6 10
# / \ / \
# 5 7 9 11
# 镜像二叉树
# 8
# / \
# 10 6
# / \ / \
# 11 9 7 5
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 迭代遍历 交换左右节点
class Solution:
def Mirror(self,root):
# write code here
if root is None:
return
tmp = root.left
root.left = root.right
root.right = tmp
if root.left is not None:
self.Mirror(root.left)
if root.right is not None:
self.Mirror(root.right)
# 运行时间:38ms
# 占用内存:5632k | UTF-8 | Python | false | false | 900 | py | 28 | 17-mirror.py | 28 | 0.438482 | 0.405759 | 0 | 40 | 18.125 | 35 |
dimven/SpringNodes | 2,413,771,670,476 | 809af923c1409fb0bf8b4de28fdd0814109eecb3 | 94813f80b7a931f04f5536e8d0039fa4eee119c9 | /py/Document.DeleteElements.py | 94a845784ed5c34ad7d82181d6efb36f9e16a92a | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | https://github.com/dimven/SpringNodes | 649b66f6ee39ffda8913a91f1f5f7765288a6856 | b9e19e4e3a8c3ce268e6c1ec7c821a84f85873bb | refs/heads/master | 2023-08-30T21:53:37.961719 | 2022-02-24T16:52:02 | 2022-02-24T16:52:02 | 43,051,678 | 64 | 52 | MIT | false | 2023-08-30T13:02:51 | 2015-09-24T07:28:19 | 2023-07-29T09:17:54 | 2023-08-30T13:02:51 | 15,139 | 64 | 45 | 10 | Python | false | false | #Copyright(c) 2016, Dimitar Venkov
# @5devene, dimitar.ven@gmail.com
import clr
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
def tolist(obj1):
if hasattr(obj1,"__iter__"): return obj1
else: return [obj1]
elems = UnwrapElement(tolist(IN[0]) )
if IN[1]:
deleted, failed = [], []
TransactionManager.Instance.EnsureInTransaction(doc)
for e in elems:
id = None
try:
id = e.Id
del_id = doc.Delete(id)
deleted.extend([d.ToString() for d in del_id])
except:
if id is not None:
failed.append(id.ToString() )
TransactionManager.Instance.TransactionTaskDone()
s = set(deleted)
failed1 = [x for x in failed if x not in s]
OUT = len(deleted), ';'.join(deleted), ';'.join(failed1)
else:
OUT = "Set confirm to True", "", "" | UTF-8 | Python | false | false | 916 | py | 103 | Document.DeleteElements.py | 99 | 0.718341 | 0.704148 | 0 | 35 | 25.2 | 57 |
Ruchira2k/NAS-DIP-pytorch | 6,227,702,616,340 | b2e9bf97aca43eeaf16dc8564e71981d5a70bb42 | bb4a2dd06a7f821b34e0d862103d420d15a37180 | /DIP/utils/load_image.py | 7110f12f2dcd476ee73c4b1e31fdc857118c4bdb | []
| no_license | https://github.com/Ruchira2k/NAS-DIP-pytorch | 4600dfc01475f2b73b101bf2d3f241b4ca50bb32 | 3dfb4cf6312599097a5a193d22fd8591467e1a6f | refs/heads/master | 2023-03-17T00:59:14.192699 | 2020-08-23T20:06:30 | 2020-08-23T20:06:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import matplotlib.pyplot as plt
from skimage import io
import numpy as np
import pickle
import json
import glob
import torch
from torch.utils.data import Dataset, DataLoader
def load_image_coco(num_image=64, crop_size=256):
images = np.empty((0, crop_size, crop_size, 3), dtype=np.float32)
coco_anno = json.load(open('/home/chengao/Dataset/annotations/instances_val2017.json'))
# img_pil = img_pil[[0, 4, 9, 14, 20, 30, 45, 50, 51, 52, 63, 65, 75, 83, 90, 91],:,:,:]
for idx in range(len(coco_anno['annotations'])):
GT = coco_anno['annotations'][idx]
image_id = GT['image_id']
H_box = [int(i) for i in GT['bbox']]
category = GT['category_id']
if H_box[2] <= crop_size and H_box[2] > 150 / 256 * crop_size and H_box[3] <= crop_size and H_box[3] > 150 / 256 * crop_size:
im_file = '/home/chengao/Dataset/val2017/' + (str(image_id)).zfill(12) + '.jpg'
im_data = plt.imread(im_file)
im_height, im_width, nbands = im_data.shape
height_pad = crop_size - H_box[3]
width_pad = crop_size - H_box[2]
x0 = H_box[0] - width_pad // 2
x1 = H_box[0] - width_pad // 2 + crop_size
y0 = H_box[1] - height_pad // 2
y1 = H_box[1] - height_pad // 2 + crop_size
if x0 < 0:
x1 = x1 - x0
x0 = 0
if x1 >= im_width:
continue
if y0 < 0:
y1 = y1 - y0
y0 = 0
if y1 >= im_height:
continue
im_data = im_data[y0 : y1, x0 : x1, :].reshape(1, crop_size, crop_size, 3)
images = np.concatenate((images, im_data), axis=0)
if len(images) >= num_image:
return images
return images
class DIV2KDataset(Dataset):
def __init__(self, root_dir, transform=None):
self.image_list = os.listdir(root_dir)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.image_list)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir,
self.image_list[idx])
image = io.imread(img_name)
image = image / 255.
if self.transform:
image = self.transform(image)
return image
class RandomCrop(object):
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, image):
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h,
left: left + new_w]
return image
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, image):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return torch.from_numpy(image) | UTF-8 | Python | false | false | 3,356 | py | 17 | load_image.py | 16 | 0.519666 | 0.486293 | 0 | 113 | 28.707965 | 133 |
LambdaRan/leetcode | 8,924,942,065,550 | 5e52a99fab326635843526d00a1e42ca255014c3 | f5ddfc2080749bd3916b139654a701bfd6a46361 | /leetCode_Python/prob492.py | 845a89946156bfe7210f8e7459928cbdce4ccfc4 | []
| no_license | https://github.com/LambdaRan/leetcode | a933c914a1774c9d9423ba895c570ff293881a5f | 62151aec659e3244fdc5ec48e851610743ac4105 | refs/heads/master | 2021-05-11T06:20:58.547672 | 2021-03-13T09:58:38 | 2021-03-13T09:58:38 | 117,985,216 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
# author : lambda RDG
# Date&Time : 2019/03/31 20:33:08
"模块的文档注释"
# Name: 492. Construct the Rectangle
# Website: https://leetcode.com/problems/construct-the-rectangle/
# Description:
import sys
class Solution:
def constructRectangle(self, area: int) -> List[int]:
import math
w = int(math.sqrt(area))
while area % w:
w -= 1
return [area//w, w]
if __name__ == "__main__":
pass | UTF-8 | Python | false | false | 515 | py | 360 | prob492.py | 356 | 0.570858 | 0.530938 | 0 | 22 | 20.863636 | 65 |
sunatthegilddotcom/Advanced-Solar-Cell-Research | 3,204,045,611,507 | 2423a72fe2e9efc6e2d36062f2233c4debdda9d2 | 62758b997dac8bd24f4ad45722879a91ef2b5895 | /sq.py | 668d0855df768225daa949e8a72ee09d592fb999 | []
| no_license | https://github.com/sunatthegilddotcom/Advanced-Solar-Cell-Research | 4d8df7e51c109d3e20d80732c9b75dee9d1d37ca | 90efac38b4adb5d0ebd35956240e438b44028bf0 | refs/heads/master | 2022-12-03T01:42:37.913000 | 2020-08-13T19:33:46 | 2020-08-13T19:33:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import scipy.integrate as integrate
import matplotlib.pyplot as plt
import pandas as pd
plt.rcParams.update({'font.size': 18})
########## Set sq to 1 to calculate standard Shockley-Queisser limit
########## Set multijunction to 1 to calculate efficiency for two-layer tandem with Si base
########## Set multiexciton to 1 to calculate efficiency for multi-exciton generation
########## Set multijunctionMEG to 1 to calculate efficiency for two-layer tandem with MEG
sq = 0
multijunction = 1
siliconTopLayer = 1 #set to 1 to test bandgaps below Si's (ie treat Si as the top layer as well)
multiexciton = 0 #set meg_cap to desired number
meg_cap = 4 #maximum number of electrons that can be generated by a high-energy photon for multiexciton and multijunctionMEG
multijunctionMEG = 0 #multijunctionMEG is still a work in progress
blackbody = 0 #set to 1 to calculate efficiencies for a blackbody spectrum source
am1pt5 = 1 #set to 1 to calculate efficiencies using AM 1.5 spectrum from nrel
### Set Joules to 1 to run the script in Joule units, eV to 1 to run in eV units. ###
### If both are set to 1, the script will default to eV units. ###
Joules = 0 #run the script in Joules units
eV = 1 #run the script in eV units
q = 1.602176634e-19 #in C
if Joules == 1:
h = 6.62607015e-34 #in J*s units
k = 1.380649e-23 #in J/K
if eV == 1:
h = 6.62607015e-34/q #in eV*s units
k = 1.380649e-23/q #in eV/K
c = 299792458 #in m/s
Tsun = 5762 #sun temp in K
Tc = 300 #ambient temp in K
g = 2*np.pi/h**3/c**2
qg = q*g
f = (6.957/1495.98)**2 #(radius of sun / distance to sun)^2
C = 1 #concentration factor
if am1pt5 == 1:
### Import the AM 1.5 Spectrum (from nrel) ###
spectrum = np.asarray(pd.read_csv('am1.5spectrum.txt',skiprows=2,sep='\t'))
wavelength = spectrum[:,0] #in nanometers
radiance = spectrum[:,3] #in W/s/m^2
energies = h*c*(wavelength/1e9)**(-1) #The 1e9 factor is to convert wavelength to meters. energies has same units as h
### Reverse the arrays to put the data values in order of increasing energy / decreasing wavelength ###
wavelength = wavelength[::-1]
radiance = radiance[::-1]
energies = energies[::-1]
def func(x):
return x**2/(np.exp(x)-1)
def func2(x):
return x/(np.exp(x)-1)
def func3(x):
if np.abs(x) > 1e-2:
return 1/(np.exp(x)-1)
elif np.abs(x) <= 1e-2:
return 1/x #taylor series approx for small x
def Q(egLower,egUpper,T,arrays=0):
"""This function calculates the photon flux for energies from egLower
to egUpper at temperature T using the quad() integration function from scipy.integrate.
The units of egLower and egUpper can be either eV or J. The integral's variables have been
changed to make the integral return a unitless value. The units of the returned value will
be in whatever energy units k is in."""
if arrays == 0:
xgLower = egLower/(k*T)
xgUpper = egUpper/(k*T)
integral1 = integrate.quad(func,xgLower,xgUpper,limit=10000)
if np.isnan(integral1[0]) == False:
output = integral1[0]
else:
output = 0
return output
if arrays == 1:
output = np.zeros([len(egLower)])
xgUpper = egUpper/(k*T)
for h in range(len(egLower)):
xgLower = egLower[h]/(k*T)
integral1 = integrate.quad(func,xgLower,xgUpper,limit=10000)
if np.isnan(integral1[0]) == False:
output[h] = integral1[0]
else:
output[h] = 0
return output
if arrays == 2:
output = np.zeros([len(egUpper)])
xgLower = egLower/(k*T)
for h in range(len(egUpper)):
xgUpper = egUpper[h]/(k*T)
integral1 = integrate.quad(func,xgLower,xgUpper,limit=10000)
if np.isnan(integral1[0]) == False:
output[h] = integral1[0]
else:
output[h] = 0
return output
if arrays == 3:
output = np.zeros([len(egLower),len(egUpper)])
for h in range(len(egLower)):
xgLower = egLower[h]/(k*T)
for i in range(len(egUpper)):
xgUpper = egUpper[i]/(k*T)
if xgLower < xgUpper:
integral1 = integrate.quad(func,xgLower,xgUpper,limit=10000)
if np.isnan(integral1[0]) == False:
output[h,i] = integral1[0]
else:
output[h,i] = 0
else:
print("Check your energy integral limits!")
output[h,i] = 0
return output
def func4(E,T,V):
return E**2/(np.exp((E-q*V)/(k*T))-1)
def Q_V(Eg,V):
"""This function calculates the recombination rate for a solar cell with bandgap energy Eg
operating at temperature T and voltage V. The integral will return the cube of whatever units k is in."""
output = np.zeros([len(V)])
for u in range(len(V)):
if Eg > q*V[u]:
integral1 = integrate.quad(func4,Eg,np.inf,limit=10000,args=(Tc,V[u]))
if np.isnan(integral1[0]) == False:
output[u] = integral1[0]
else:
output[u] = 0
else:
output[u] = 0
return output
def Q_V2(Eg1,Eg2,V):
"""This function is the same as Q_V, but with the upper limit added for MEG calculation."""
output = np.zeros_like(V)
for u in range(len(V)):
if Eg1 > q*V[u]:
integral1 = integrate.quad(func4,Eg1,Eg2,limit=10000,args=(Tc,V[u]))
if np.isnan(integral1[0]) == False:
output[u] = integral1[0]
else:
output[u] = 0
else:
print('Error: Voltage is larger than the bandgap!')
print('Eg = ', Eg1)
print('q*V = ', q*V)
output[u] = 0
return output
def realsolar(wl,P,eg2,eg1,arrays=0):
"""This function calculates the photon flux for the AM 1.5 solar spectrum imported in line 31.
he units of the returned value are in photons/m^2/s. wl is the wavelength data in nm, P is the
radiance/energy flux data in W/m^2/nm, and Eg is the bandgap energy of the material. The wavelength
is converted to energy using E = hc/wavelength (this will be in units of Joules regardless of what
option was chosen at the beginning of the script so that it will cancel with the units of P).
The energy flux P is then divided by the energy to get the photon flux, ie, W/m^2/nm == J/s/m^2/nm,
so (W/m^2/nm)/J == number of photons/s/m^2/nm. Integration across wavelength then produces units of
photons/s/m^2. Since it already has s and m^2 in the denominator, it doesn't need to be multiplied by g later."""
sum = 0
if Joules == 1:
energies = h*c*(wavelength/1e9)**(-1)
if eV == 1:
energies = h*c*(wavelength/1e9)**(-1)*q
fluxes = P/energies
if arrays == 0:
output = 0
for i in range(len(wl)):
if i+1 < len(wl):
wavelengthtemp = 0.5*(wl[i]+wl[i+1])
if wavelengthtemp <= h*c/eg2*1e9 and wavelengthtemp >= h*c/eg1*1e9: #the 1e9 factor is to convert the RHS to nm
dwl = np.abs((wl[i+1]-wl[i]))
output += 0.5*(fluxes[i]+fluxes[i+1])*dwl
if np.isnan(output) == True:
output = 0
return output
if arrays == 1:
output = np.zeros([len(eg2)])
for s in range(len(eg2)):
for i in range(len(wl)):
if i+1 < len(wl):
wavelengthtemp = 0.5*(wl[i]+wl[i+1])
if wavelengthtemp <= h*c/eg2[s]*1e9 and wavelengthtemp >= h*c/eg1*1e9: #the 1e9 factor is to convert the RHS to nm
dwl = np.abs((wl[i+1]-wl[i]))
output[s] += 0.5*(fluxes[i]+fluxes[i+1])*dwl
if np.isnan(output[s]) == True:
output[s] = 0
return output
if arrays == 2:
output = np.zeros([len(eg1)])
for s in range(len(eg1)):
for i in range(len(wl)):
if i+1 < len(wl):
wavelengthtemp = 0.5*(wl[i]+wl[i+1])
if wavelengthtemp <= h*c/eg2*1e9 and wavelengthtemp >= h*c/eg1[s]*1e9: #the 1e9 factor is to convert the RHS to nm
dwl = np.abs((wl[i+1]-wl[i]))
output[s] += 0.5*(fluxes[i]+fluxes[i+1])*dwl
if np.isnan(output[s]) == True:
output[s] = 0
return output
if arrays == 3:
output = np.zeros([len(eg2),len(eg1)])
for l in range(len(eg2)):
for s in range(len(eg1)):
for i in range(len(wl)):
if i+1 < len(wl):
wavelengthtemp = 0.5*(wl[i]+wl[i+1])
if wavelengthtemp <= h*c/eg2[l]*1e9 and wavelengthtemp >= h*c/eg1[s]*1e9: #the 1e9 factor is to convert the RHS to nm
dwl = np.abs((wl[i+1]-wl[i]))
output[l,s] += 0.5*(fluxes[i]+fluxes[i+1])*dwl
if np.isnan(output[l,s]) == True:
output[l,s] = 0
return output
def realpower(wl,P):
"""This function calculates the total incident power per m^2 from the AM 1.5 solar spectrum by
integrating the radiance P, in units of W/m^2/nm, over all wavelengths. """
sum = 0
for i in range(len(wl)):
if i+1 < len(wl):
dwl = np.abs((wl[i+1]-wl[i]))
sum += 0.5*(P[i]+P[i+1])*dwl
return sum
fignum = 1 #for plotting
if sq == 1:
if Joules == 1:
eg = np.linspace(0.2,3,200)*q #in Joules
P_in = 930 #page 8 of Solar Energy document -- for AM 1.5, intensity is approximately 930 W/m^2
elif eV == 1:
eg = np.linspace(0.2,3,200)
P_in = 930/q
vcount = 1000 #length of voltage array
efficiencies = np.zeros([len(eg),vcount])
efficienciesReal = np.zeros([len(eg),vcount])
maxEfficiencies = np.zeros_like(eg)
maxEfficienciesReal = np.zeros_like(eg)
J = np.zeros([len(eg),vcount])
JReal = np.zeros([len(eg),vcount])
##### Decoder: term1 = absorbed solar flux for blackbody spectrum
##### term1Real = absorbed solar flux for AM 1.5 spectrum
##### term2 = absorbed ambient flux
##### term3 = lost recombined radiation
for i in range(len(eg)):
if Joules == 1:
v = np.linspace(0,eg[i]-0.1,vcount)
if eV == 1:
v = np.linspace(0,eg[i]-0.1,vcount)/q #convert from Volts == Joules/Coulomb to eV/Coulomb
term1 = f*C*Q(eg[i],np.inf,Tsun)
term1Real = realsolar(wavelength,radiance,eg[i],np.inf)
term2 = (1-f*C)*Q(eg[i],np.inf,Tc)
for j in range(vcount):
## check to see if the sign of J has changed (but give the first two iterations an exemption) ##
if j < 2 or J[i,j-2]*J[i,j-1] > 0:
term3 = Q_V(eg[i],v[j])
J[i,j] = qg*(term1+term2-term3)
else:
J[i,j] = 0
if j < 2 or JReal[i,j-2]*JReal[i,j-1] > 0:
term3 = Q_V(eg[i],v[j])
JReal[i,j] = q*term1Real+qg*(term2-term3)
else:
JReal[i,j] = 0
efficiencies[i,j] = J[i,j]*v[j]/P_in
efficienciesReal[i,j] = JReal[i,j]*v[j]/P_in
## efficiencies and efficienciesReal are the efficiency values for all
## voltage values for just the current bandgap energy. The max needs to
## be taken to get the maximum possible efficiency at that bandgap energy
maxEfficiencies[i] = np.max(efficiencies[i])
maxEfficienciesReal[i] = np.max(efficienciesReal[i])
plt.figure(fignum)
fignum += 1
plt.plot(eg,maxEfficienciesReal,linewidth=3,color='red')
# plt.title('Single Junction Shockley-Queisser Efficiency Limit')
plt.xlabel('Bandgap Energy (eV)')
plt.ylabel('Efficiency')
print('Finished with Single Junction!')
if multijunction == 1:
### Take a smaller step size in regions further from the predicted optimal bandgap energy
### by concatenating three arrays with different step sizes for different energy ranges
egcount = 50
eglimit1 = 1.101
eglimit2 = 2.5
eglimit3 = 0.9
eglimit4 = 1.099
if Joules == 1:
eg1 = np.linspace(eglimit1,eglimit2,egcount)*q
eg2 = 1.1*q #bandgap for silicon
if siliconTopLayer == 1:
eg1_2 = 1.1
eg2_2 = np.linspace(eglimit3,eglimit4,egcount)*q
P_in = 900
elif eV == 1:
eg1 = np.linspace(eglimit1,eglimit2,egcount)
eg2 = 1.1
if siliconTopLayer == 1:
eg1_2 = 1.1
eg2_2 = np.linspace(eglimit3,eglimit4,egcount)
P_in = 900/q #page 8 of Goodnick's document, for AM1.5, intensity is approximately 930 W/m^2
vcount = 15000
rhs1 = q*realsolar(wavelength,radiance,eg1,np.inf,arrays=1)
# rhs1 = qg*f*C*Q(eg1,np.inf,Tsun,arrays=1)
rhs2 = q*realsolar(wavelength,radiance,eg2,eg1,arrays=2)
# rhs2 = qg*f*C*Q(eg2,eg1,Tsun,arrays=2)
# rhs3 = qg*(1-f*C)*Q(eg1,np.inf,Tc,arrays=1)
# rhs4 = qg*(1-f*C)*Q(eg2,eg1,Tc,arrays=2)
maxEfficiency = []
log = open("MJlog.txt","w")
for i in range(egcount):
if eg1[i] > eg2:
rhs2 = q*realsolar(wavelength,radiance,eg2,eg1[i],arrays=0)
print("{:.3f}".format(eg1[i]), " eV bandgap:")
print(' ')
log.write("{:.3f}".format(eg1[i]) + " eV bandgap:\n\n")
offset = 0.5
v1 = np.linspace(0,eg1[i]-0.1,vcount)/q
v2 = np.linspace(0,eg2-0.1,vcount)/q
bestDiff = 1
count = 0
while (bestDiff > 1e-16 and count < 20):
count += 1
minDiffList = []
minDiffArgs = []
rhs5 = qg*Q_V(eg2,v2)
rhs6 = qg*Q_V(eg1[i],v2)
lhs = qg*Q_V(eg1[i],v1)
# rhs = 2/3*(rhs1[i]-rhs2[i]+rhs3[i]-rhs4[i]+rhs5+rhs6)
rhs = 2/3*(rhs1[i]-rhs2[i]+rhs5+rhs6)
for u in range(vcount):
difference = np.abs(lhs[u]-rhs)
minDiffList.append(np.min(difference))
minDiffArgs.append(np.argmin(difference))
bestDiff = np.min(minDiffList)
v1argmin = np.argmin(minDiffList)
v2argmin = minDiffArgs[v1argmin]
v1best = v1[v1argmin]
v2best = v2[v2argmin]
print(np.min(minDiffList), ' at v1 = ', v1best*q, ' J/C and v2 = ', v2best*q, ' J/C.')
j1best = rhs1[i] - lhs[v1argmin] + rhs6[v2argmin]
j2best = rhs2[i] - rhs5[v2argmin] + 0.5*lhs[v1argmin]
efficiency = 0.5*(j1best+j2best)*(v1best+v2best)/P_in
print('The currents obtained are J1 = ', j1best, ' A/m^2 and J2 = ', j2best, ' A/m^2.')
log.write("Difference between current densities is " + str(np.min(minDiffList)) + " at v1 = " + str(v1best*q) + " J/C and v2 = " + str(v2best*q) + " J/C.\n")
log.write("The currents obtained are J1 = " + str(j1best) + " A/m^2 and J2 = " + str(j2best) + " A/m^2.\n\n")
skips = 1
while j1best < 0 or j2best < 0:
print('ERROR: negative currents obtained!')
log.write('ERROR: negative currents obtained!\n')
skips *= 2
minDiffList = []
minDiffArgs = []
for u in range(vcount):
difference = np.abs(lhs[u]-rhs)
difference[difference <= skips*np.min(difference)] = np.max(difference)
minDiffList.append(np.min(difference))
minDiffArgs.append(np.argmin(difference))
bestDiff = np.min(minDiffList)
v1argmin = np.argmin(minDiffList)
v2argmin = minDiffArgs[v1argmin]
v1best = v1[v1argmin]
v2best = v2[v2argmin]
print(np.min(minDiffList), ' at v1 = ', v1best*q, ' J/C and v2 = ', v2best*q, ' J/C.')
j1best = rhs1[i] - lhs[v1argmin] + rhs6[v2argmin]
j2best = rhs2[i] - rhs5[v2argmin] + 0.5*lhs[v1argmin]
print('The currents obtained are J1 = ', j1best, ' A/m^2 and J2 = ', j2best, ' A/m^2.')
log.write(str(np.min(minDiffList)) + ' at v1 = ' + str(v1best*q) + ' J/C and v2 = ' + str(v2best*q) + ' J/C.\n')
log.write('The currents obtained are J1 = ' + str(j1best) + ' A/m^2 and J2 = ' + str(j2best) + ' A/m^2.\n\n')
efficiency = 0.5*(j1best+j2best)*(v1best+v2best)/P_in
v1 = np.linspace(v1best*(1-offset),v1best*(1+offset),vcount)
v2 = np.linspace(v2best*(1-offset),v2best*(1+offset),vcount)
if count < 3:
offset *= 0.5
else:
offset *= 0.1
print(' ')
print(' ')
if count == 40:
print('WARNING: Convergence not satisfied')
log.write('WARNING: Convergence not satisfied\n')
print(' ')
print(' ')
print(' ')
print('The efficiency for a ', '{:.3f}'.format(eg1[i]), ' eV bandgap top layer is ', '{:.3f}'.format(efficiency))
log.write('\n\n\nThe efficiency for a ' + '{:.3f}'.format(eg1[i]) + ' eV bandgap top layer is ' + '{:.3f}'.format(efficiency) + '\n\n\n')
maxEfficiency.append(efficiency)
print(' ')
print(' ')
print(' ')
log.close()
plt.plot(eg1,maxEfficiency,'o')
plt.show()
e
if blackbody == 1:
maxEfficiencyMJ = []
if siliconTopLayer == 1:
maxEfficiencyMJ2 = []
if am1pt5 == 1:
maxEfficiencyMJReal = []
if siliconTopLayer == 1:
maxEfficiencyMJReal2 = []
##### This next part calculates all the terms that don't depend on the
##### voltages. For each, there is a check to make sure that a Not-a-number
##### wasn't returned by any of the integrals. This should have been mostly
##### fixed by taking the voltage arrays to the bandgap energies - 0.1 eV,
##### but the NaN checks only slow down the computation a little, and save
##### a lot of potential headaches, so I left them in.
##### Decoder: term1_1 = absorbed solar flux for top layer (first term, first layer).
##### term1_2 = absorbed solar flux for silicon base layer (first term, second layer).
##### term2_1 = absorbed ambient flux for top layer (second term, first layer).
##### term2_2 = absorbed ambient flux for silicon layer (second term, second layer).
##### term3_1 = recombined radiation given off by top layer (third term, first layer).
##### term3_2 = recomined radiation given off by silicon layer (third term, second layer).
##### term4_1 = absorbed flux given off by the silicon layer, absorbed by the top layer (fourth term, first layer).
##### term4_2 = absorbed flux given off by the top layer, absorbed by the bottom layer (fourth term, second layer).
term1_1 = np.zeros([egcount])
term1_1 = qg*f*C*Q(eg1,np.inf,Tsun,arrays=1)
term1_2 = np.zeros([egcount])
term1_2 = qg*f*C*Q(eg2,eg1,Tsun,arrays=2)
term1_1Real = np.zeros([egcount])
term1_1Real = q*realsolar(wavelength,radiance,eg1,np.inf,arrays=1)
term1_2Real = np.zeros([egcount])
term1_2Real = q*realsolar(wavelength,radiance,eg2,eg1,arrays=2)
term2_1 = np.zeros([egcount])
term2_1 = qg*(1-f*C)*Q(eg1,np.inf,Tc,arrays=1)
term2_2 = np.zeros([egcount])
term2_2 = qg*(1-f*C)*Q(eg2,eg1,Tc,arrays=2)
term3_1 = np.zeros([egcount,vcount])
term3_2 = np.zeros([vcount])
term4_1 = np.zeros([egcount,vcount])
term4_2 = np.zeros([egcount,vcount])
if siliconTopLayer == 1:
term1_1_2 = qg*f*C*Q(eg1_2,np.inf,Tsun,arrays=0)
term1_2_2 = np.zeros([egcount])
term1_2_2 = qg*f*C*Q(eg2_2,eg1_2,Tsun,arrays=1)
term1_1_2Real = q*realsolar(wavelength,radiance,eg1_2,np.inf,arrays=0)
term1_2_2Real = np.zeros([egcount])
term1_2_2Real = q*realsolar(wavelength,radiance,eg2_2,eg1_2,arrays=1)
term2_1_2 = qg*(1-f*C)*Q(eg1_2,np.inf,Tc,arrays=0)
term2_2_2 = np.zeros([egcount])
term2_2_2 = qg*(1-f*C)*Q(eg2_2,eg1_2,Tc,arrays=1)
term3_1_2 = np.zeros([vcount])
term3_2_2 = np.zeros([egcount,vcount])
term4_1_2 = np.zeros([vcount])
term4_2_2 = np.zeros([vcount])
if Joules == 1:
v2 = np.linspace(0,eg2-0.1,vcount)
if siliconTopLayer == 1:
v1_2 = np.linspace(0,eg1_2-0.01,vcount)
if eV == 1:
v2 = np.linspace(0,eg2-0.1,vcount)/q
if siliconTopLayer == 1:
v1_2 = np.linspace(0,eg1_2-0.01,vcount)/q
term3_2 = qg*Q_V(eg2,v2)
if siliconTopLayer == 1:
term3_1_2 = qg*Q_V(eg1_2,v1_2)
term4_2_2 = 0.5*term3_1_2
for i in range(egcount):
print(" ")
print(" ")
print("SILICON BOTTOM LAYER: ", "{:.3f}".format(eg1[i]), " eV bandgap.")
print(" ")
print("SILICON TOP LAYER: ", "{:.3f}".format(eg2_2[i]), " eV bandgap.")
print(" ")
if blackbody == 1:
j1 = np.zeros([vcount,vcount])
j2 = np.zeros([vcount,vcount])
if siliconTopLayer == 1:
j1_2 = np.zeros([vcount,vcount])
j2_2 = np.zeros([vcount,vcount])
if am1pt5 == 1:
j1Real = np.zeros([vcount,vcount])
j2Real = np.zeros([vcount,vcount])
if siliconTopLayer == 1:
j1_2Real = np.zeros([vcount,vcount])
j2_2Real = np.zeros([vcount,vcount])
if Joules == 1:
v1 = np.linspace(eg1[i]/4,eg1[i]*7/8,vcount)
if siliconTopLayer == 1:
v2_2 = np.linspace(0,eg2_2[i]*7/8,vcount)
if eV == 1:
v1 = np.linspace(eg1[i]/4,eg1[i]*7/8,vcount)/q
if siliconTopLayer == 1:
v2_2 = np.linspace(0,eg2_2[i]*7/8,vcount)/q
if blackbody == 1:
index1 = []
index2 = []
efficiency = []
diff = np.zeros([vcount,vcount])
if siliconTopLayer == 1:
index1_2 = []
index2_2 = []
efficiency2 = []
diff2 = np.zeros([vcount,vcount])
if am1pt5 == 1:
index1Real = []
index2Real = []
efficiencyReal = []
diffReal = np.zeros([vcount,vcount])
if siliconTopLayer == 1:
index1_2Real = []
index2_2Real = []
efficiencyReal2 = []
diffReal2 = np.zeros([vcount,vcount])
term3_1[i] = qg*Q_V(eg1[i],v1)
term4_1[i] = 0.5*qg*Q_V(eg1[i],v2)
term4_2[i] = 0.5*term3_1[i]
if siliconTopLayer == 1:
term3_2_2[i] = qg*Q_V(eg2_2[i],v2_2)
term4_1_2 = 0.5*qg*Q_V(eg1_2,v2_2)
##### This calculates both layers' current densities, then takes their
##### difference and their average. Any pair of current densities
##### that satisfies the requirement that their difference divided
##### by their average is less than the parameter "threshold" is
##### accepted, and their indices are stored in index1 and index2.
threshold = 1e-3
threshold2 = 1e-3
if blackbody == 1:
print('SILICON AS THE BOTTOM LAYER:')
for u in range(vcount):
j1[u] = term1_1[i]+term2_1[i]-term3_1[i,u]+term4_1[i]
j2[u] = term1_2[i]+term2_2[i]-term3_2+term4_2[i,u]
if siliconTopLayer == 1:
j1_2[u] = term1_1_2+term2_1_2-term3_1_2[u]+term4_1_2
j2_2[u] = term1_2_2[i]+term2_2_2[i]-term3_2_2[i]+term4_2_2[u]
diff = np.abs(j1 - j2)
diff[j1 <= 0] = np.nan
diff[j2 <= 0] = np.nan
diffTemp = diff[np.isnan(diff) == False]
bestDiffs = diffTemp[diffTemp<threshold]
while len(bestDiffs) < 10 or len(bestDiffs) > 200:
if len(bestDiffs) < 10:
print('No pairs found. Increasing threshold.')
threshold *= 2
bestDiffs = diffTemp[diffTemp<threshold]
elif len(bestDiffs) > 200:
print('Too many pairs found. Decreasing threshold.')
threshold *= 0.5
bestDiffs = diffTemp[diffTemp<threshold]
for l in range(len(bestDiffs)):
index1temp, index2temp = np.where(diff == bestDiffs[l])
index1.append(index1temp[0])
index2.append(index2temp[0])
print("Success!")
print("indices for ", eg1[i], ' eV are ', index1[0], ' and ', index2[0])
print(" ")
print('Minimum difference between the currents for bandgap ', eg1[i], ' eV is ', "{:.2f}".format(np.min(diff[np.isnan(diff) == False])), ' for voltages V1 = ', "{:.2f}".format(q*v1[index1[0]]), ' and V2 = ', "{:.2f}".format(q*v2[index2[0]]))
print('Currents are J1 = ', "{:.2f}".format(j1[index1[0],index2[0]]), ' and J2 = ', "{:.2f}".format(j2[index1[0],index2[0]]))
print(" ")
if siliconTopLayer == 1:
print('SILICON AS THE TOP LAYER:')
diff2 = np.abs(j1_2 - j2_2)
diff2[j1_2 <= 0] = np.nan
diff2[j2_2 <= 0] = np.nan
diffTemp2 = diff2[np.isnan(diff) == False]
bestDiffs2 = diffTemp2[diffTemp2<threshold2]
while len(bestDiffs2) < 10 or len(bestDiffs2) > 200:
if len(bestDiffs2) < 10:
print('No pairs found. Increasing threshold.')
threshold2 *= 2
bestDiffs2 = diffTemp2[diffTemp2<threshold2]
elif len(bestDiffs2) > 200:
print('Too many pairs found. Decreasing threshold.')
threshold2 *= 0.6
bestDiffs2 = diffTemp2[diffTemp2<threshold2]
for l in range(len(bestDiffs2)):
index1_2temp, index2_2temp = np.where(diff2 == bestDiffs2[l])
index1_2.append(index1_2temp[0])
index2_2.append(index2_2temp[0])
print("Success!")
print(" ")
print("indices for ", eg2_2[i], ' eV are ', index1_2[0], ' and ', index2_2[0])
print('Minimum difference between the currents for bandgap ', eg2_2[i], ' eV is ', np.min(diff2[np.isnan(diff2) == False]), ' for voltages V1 = ', q*v1_2[index1_2[0]], ' and V2 = ', q*v2_2[index2_2[0]])
print('Currents are J1 = ', j1_2[index1_2[0],index2_2[0]], ' and J2 = ', j2_2[index1_2[0],index2_2[0]])
print(" ")
if am1pt5 == 1:
print('SILICON AS THE BOTTOM LAYER:')
for u in range(vcount):
j1Real[u] = term1_1Real[i]+term2_1[i]-term3_1[i,u]+term4_1[i]
j2Real[u] = term1_2Real[i]+term2_2[i]-term3_2+term4_2[i,u]
if siliconTopLayer == 1:
j1_2Real[u] = term1_1_2Real+term2_1_2-term3_1_2[u]+term4_1_2
j2_2Real[u] = term1_2_2Real[i]+term2_2_2[i]-term3_2_2[i]+term4_2_2[u]
diffReal = np.abs(j1Real - j2Real)
diffReal[j1Real <= 0] = np.nan
diffReal[j2Real <= 0] = np.nan
diffTemp = diffReal[np.isnan(diffReal) == False]
bestDiffs = diffTemp[diffTemp<threshold]
count = 0
low = 10
high = 200
while (len(bestDiffs) < low or len(bestDiffs) > high) and count < 20:
while count < 20:
if len(bestDiffs) < low:
print('Not enough pairs found. Increasing threshold.')
threshold *= 1.2
bestDiffs = diffTemp[diffTemp<threshold]
elif len(bestDiffs) > high:
print('Too many pairs found. Decreasing threshold.')
threshold *= 0.8
bestDiffs = diffTemp[diffTemp<threshold]
count += 1
if len(bestDiffs) < low or len(bestDiffs) > high:
index1Real = np.array([0])
index2Real = np.array([0])
else:
for l in range(len(bestDiffs)):
index1temp, index2temp = np.where(diffReal == bestDiffs[l])
index1Real.append(index1temp[0])
index2Real.append(index2temp[0])
print("Success!")
print("indices for ", "{:.3f}".format(eg1[i]), ' eV are ', index1Real[0], ' and ', index2Real[0])
print(" ")
print('Minimum difference between the currents for bandgap ', "{:.3f}".format(eg1[i]), ' eV is ', np.min(diffReal[np.isnan(diffReal) == False]), ' for voltages V1 = ', "{:.2f}".format(q*v1[index1Real[0]]), ' and V2 = ', "{:.2f}".format(q*v2[index2Real[0]]))
print('Currents are J1 = ', "{:.4f}".format(j1Real[index1Real[0],index2Real[0]]), ' and J2 = ', "{:.4f}".format(j2Real[index1Real[0],index2Real[0]]))
print(" ")
if siliconTopLayer == 1:
print('SILICON AS THE TOP LAYER:')
diffReal2 = np.abs(j1_2Real - j2_2Real)
diffReal2[j1_2Real <= 0] = np.nan
diffReal2[j2_2Real <= 0] = np.nan
diffTemp2 = diffReal2[np.isnan(diffReal2) == False]
bestDiffs2 = diffTemp2[diffTemp2<threshold2]
count = 0
low = 10
if len(bestDiffs2) == 0:
print('No pairs found for this bandgap. Setting efficiency to 0.')
index1_2Real = np.array([0])
index2_2Real = np.array([0])
else:
for l in range(len(bestDiffs2)):
index1_2temp, index2_2temp = np.where(diffReal2 == bestDiffs2[l])
index1_2Real.append(index1_2temp[0])
index2_2Real.append(index2_2temp[0])
print("Success!")
print(" ")
print("indices for ", "{:.3f}".format(eg2_2[i]), ' eV are ', index1_2Real[0], ' and ', index2_2Real[0])
print('Minimum difference between the currents for bandgap ', "{:.3f}".format(eg2_2[i]), ' eV is ', np.min(diffReal2[np.isnan(diffReal2) == False]), ' for voltages V1 = ', "{:.2f}".format(q*v1_2[index1_2Real[0]]), ' and V2 = ', "{:.2f}".format(q*v2_2[index2_2Real[0]]))
print('Currents are J1 = ', "{:.4f}".format(j1_2Real[index1_2Real[0],index2_2Real[0]]), ' and J2 = ', "{:.3f}".format(j2_2Real[index1_2Real[0],index2_2Real[0]]))
print(" ")
##### This next part calculates the efficiency for each of the pairs of
##### current densities that passes the prior threshold criteria. The
##### average of the current densities and sum of the voltages are used
##### to calculate the produced power. The efficiency and efficiencyReal
##### lists are appended with the efficiencies for all of the pairs of
##### current densities. The best of these is stored in the maxEfficiencyMJ
##### and maxEfficiencyMJReal arrays and the efficiency and efficiencyReal
##### arrays will be cleared for the next bandgap energy value.
if blackbody == 1:
for b in range(len(index1)):
j1temp = j1[index1[b],index2[b]]
j2temp = j2[index1[b],index2[b]]
jtemp = 0.5*(np.abs(j1temp)+np.abs(j2temp))
v1temp = v1[index1[b]]
v2temp = v2[index2[b]]
vtemp = v1temp + v2temp
efficiency.append(jtemp*vtemp/P_in)
maxEfficiencyMJ.append(np.max(np.asarray(efficiency)))
if siliconTopLayer == 1:
for b in range(len(index1_2)):
j1_2temp = j1_2[index1_2[b],index2_2[b]]
j2_2temp = j2_2[index1_2[b],index2_2[b]]
jtemp2 = 0.5*(np.abs(j1_2temp)+np.abs(j2_2temp))
v1_2temp = v1_2[index1_2[b]]
v2_2temp = v2_2[index2_2[b]]
vtemp2 = v1_2temp + v2_2temp
efficiency2.append(jtemp2*vtemp2/P_in)
maxEfficiencyMJ2.append(np.max(np.asarray(efficiency2)))
if am1pt5 == 1:
for b in range(len(index1Real)):
j1tempReal = j1Real[index1Real[b],index2Real[b]]
j2tempReal = j2Real[index1Real[b],index2Real[b]]
jtempReal = 0.5*(np.abs(j1tempReal)+np.abs(j2tempReal))
v1tempReal = v1[index1Real[b]]
v2tempReal = v2[index2Real[b]]
vtempReal = v1tempReal + v2tempReal
efficiencyReal.append(jtempReal*vtempReal/P_in)
maxEfficiencyMJReal.append(np.max(np.asarray(efficiencyReal)))
if siliconTopLayer == 1:
for b in range(len(index1_2Real)):
j1_2tempReal = j1_2Real[index1_2Real[b],index2_2Real[b]]
j2_2tempReal = j2_2Real[index1_2Real[b],index2_2Real[b]]
jtempReal2 = 0.5*(np.abs(j1_2tempReal)+np.abs(j2_2tempReal))
v1_2tempReal = v1_2[index1_2Real[b]]
v2_2tempReal = v2_2[index2_2Real[b]]
vtempReal2 = v1_2tempReal + v2_2tempReal
efficiencyReal2.append(jtempReal2*vtempReal2/P_in)
maxEfficiencyMJReal2.append(np.max(np.asarray(efficiencyReal2)))
plt.figure(fignum)
fignum += 1
if blackbody == 1:
plt.plot(eg1,maxEfficiencyMJ)
if am1pt5 == 1:
plt.plot(eg1,maxEfficiencyMJReal,linewidth=3,color='black')
if siliconTopLayer == 1:
plt.plot(eg2_2,maxEfficiencyMJReal2,linewidth=3,color='blue')
# plt.title('Two-Layer Tandem Solar Cell with Silicon Base Layer')
plt.xlabel('Bandgap Energy of Top Layer (eV)')
plt.ylabel('Efficiency')
plt.xlim(0.2,3.2)
plt.ylim(0,0.49)
print('Finished with the Multi-Junction!')
# plt.figure(fignum)
# fignum += 1
# plt.plot(v1*q,term3_1[0],color='blue')
# plt.plot(v1*q,term3_1[1],color='red')
# plt.plot(v1*q,term3_1[2],color='green')
# plt.axhline(term1_1Real[0]+term2_1[0],color='orange')
# plt.axhline(term1_1Real[1]+term2_1[1],color='firebrick')
# plt.axhline(term1_1Real[2]+term2_1[2],color='cyan')
# plt.axhline(term1_2Real[0]+term2_2[0],color='teal')
# plt.axhline(term1_2Real[1]+term2_2[1],color='gold')
# plt.axhline(term1_2Real[2]+term2_2[2],color='skyblue')
# plt.ylabel(r'Current Density J (A/m$^2$)')
# plt.xlabel('Top Layer Voltage (J/C)')
# plt.ylim(-15,500)
if multiexciton == 1:
vcount = 200
egcount = 150
eglower = 0.2
egupper = 2.5
if Joules == 1:
eg = np.linspace(eglower,egupper,egcount)*q #in Joules
P_in = 930
elif eV == 1:
eg = np.linspace(eglower,egupper,egcount)
P_in = 930/q #page 8 of Goodnick's document, for AM1.5, intensity is approximately 930 W/m^2
def efficiency(meg):
"""This function takes a maximum value of excitons generated per photon
and calculates the efficiency of a cell in which every photon
generates the max number of excitons possible for its energy, up to the
value of meg. It returns an array of maximum efficiencies as a function
of bandgap energy."""
if blackbody == 1:
n = np.zeros([egcount,vcount])
nmax = np.zeros_like(eg)
j1 = np.zeros([egcount,vcount])
if am1pt5 == 1:
n_real = np.zeros([egcount,vcount])
nmax_real = np.zeros_like(eg)
j1real = np.zeros([egcount,vcount])
for i in range(egcount):
if Joules == 1:
v = np.linspace(0,eg[i]-0.1,vcount)
if eV == 1:
v = np.linspace(0,eg[i]-0.1,vcount)/q
if blackbody == 1:
term1 = np.zeros([meg])
if am1pt5 == 1:
term1_real = np.zeros([meg])
term2 = np.zeros([meg])
##### This for loop will repeat for every possible number of excitons
##### generated. Term 1, the incident solar photon flux, and term 2,
##### the ambient incident photon flux, have been split into a series of
##### integrals, ie, integral(Eg to inf) is replaced with integral(Eg to 2*Eg)
##### + 2*integral(2*Eg to 3*Eg) + 3*integral(3*Eg to 4*Eg) + ... up to
##### the maximum provided by meg. This loop cycles through every interval,
##### from Eg to 2Eg, 2Eg to 3Eg, etc, and stores the result of each integral
##### inside of term1[u], term1_real, term2[u].
for u in range(meg):
if u+1 < meg:
if blackbody == 1:
term1[u] = (u+1)*f*C*Q((u+1)*eg[i],(u+2)*eg[i],Tsun)
if am1pt5 == 1:
term1_real[u] = (u+1)*realsolar(wavelength,radiance,(u+1)*eg[i],(u+2)*eg[i])
term2[u] = (u+1)*(1-f*C)*Q((u+1)*eg[i],(u+2)*eg[i],Tc)
elif u+1 == meg:
if blackbody == 1:
term1[u] = (u+1)*f*C*Q((u+1)*eg[i],np.inf,Tsun)
if am1pt5 == 1:
term1_real[u] = (u+1)*realsolar(wavelength,radiance,(u+1)*eg[i],np.inf)
term2[u] = (u+1)*(1-f*C)*Q((u+1)*eg[i],np.inf,Tc)
##### This for loop cycles through all of the voltage values, calculates
##### term3 for each, then loops through meg again to add all of the
##### term1[u], term2[u], etc integrals found in the previous loop.
##### The efficiency is calculated for each current density found.
for j in range(vcount):
term3 = np.zeros([meg])
for u in range(meg):
if u+1 < meg:
term3[u] = (u+1)*Q_V2((u+1)*eg[i],(u+2)*eg[i],v[j])
elif u+1 == meg:
term3[u] = (u+1)*Q_V2((u+1)*eg[i],np.inf,v[j])
if blackbody == 1:
if j < 2 or j1[i,j-2]*j1[i,j-1] > 0:
for u in range(meg):
j1[i,j] += qg*(term1[u]+term2[u])
j1[i,j] -= qg*term3[u]
else:
j1[i,j] = 0
n[i,j] = j1[i,j]*v[j]/P_in
if am1pt5 == 1:
if j < 2 or j1real[i,j-2]*j1real[i,j-1] > 0:
for u in range(meg):
j1real[i,j] += q*term1_real[u]+qg*term2[u]
j1real[i,j] -= qg*term3[u]
else:
j1real[i,j] = 0
n_real[i,j] = j1real[i,j]*v[j]/P_in
if blackbody == 1:
nmax[i] = np.max(n[i])
if am1pt5 == 1:
nmax_real[i] = np.max(n_real[i])
if blackbody == 1:
if am1pt5 == 1:
return nmax, nmax_real
if am1pt5 == 0:
return nmax
else:
return nmax_real
##### This next loop will cycle through meg_cap and will return the max
##### efficiencies for each possible max number of excitons less than or equal
##### to meg_cap. Ie, for meg_cap = 2, the max efficiencies for single-exciton
##### generation *and* double-exciton generation will be returned and graphed.
if blackbody == 1:
nmax_array = np.zeros([meg_cap,egcount])
if am1pt5 == 1:
nmax_real_array = np.zeros([meg_cap,egcount])
plt.figure(fignum)
fignum += 1
for jk in range(meg_cap):
if blackbody == 1:
if am1pt5 == 1:
nmax_array[jk],nmax_real_array[jk] = efficiency(jk+1)
plt.plot(eg,nmax_real_array[jk])
plt.plot(eg,nmax_real[jk])
if am1pt5 == 0:
nmax_array[jk] = efficiency(jk+1)
plt.plot(eg,nmax_real[jk],linewidth=3)
else:
nmax_real_array[jk] = efficiency(jk+1)
plt.plot(eg,nmax_real_array[2],linewidth=3,color='blue')
plt.plot(eg,nmax_real_array[3],linewidth=3,color='purple')
plt.plot(eg,nmax_real_array[5],linewidth=3,color='black')
# plt.title('Shockley-Queisser Limit with Multi-Exciton Generation for ' + str(meg_cap) + ' Max Exciton(s) per Photon')
plt.xlabel('Bandgap Energy (eV)')
plt.ylim(0,0.45)
plt.xlim(0,2.5)
plt.ylabel('Efficiency')
print('Finished with the multi-exciton generation!')
if multijunctionMEG == 1:
egcount = 30
eglimit1 = 1.101
eglimit2 = 3.0
eglimit3 = 0.4
eglimit4 = 1.099
if Joules == 1:
eg1 = np.linspace(eglimit1,eglimit2,egcount)*q
eg2 = 1.1*q #bandgap for silicon
P_in = 930
if siliconTopLayer == 1:
eg1flip = 1.1
eg2flip = np.linspace(eglimit3,eglimit4,egcount)*q
elif eV == 1:
eg1 = np.linspace(eglimit1,eglimit2,egcount)
eg2 = 1.1
P_in = 930/q #page 8 of Goodnick's document, for AM1.5, intensity is approximately 930 W/m^2
if siliconTopLayer == 1:
eg1flip = 1.1
eg2flip = np.linspace(eglimit3,eglimit4,egcount)
vcount = 6000
if Joules == 1:
v2 = np.linspace(0,eg2-0.1,vcount)
if siliconTopLayer == 1:
v1flip = np.linspace(0,eg1flip-0.1,vcount)
if eV == 1:
v2 = np.linspace(0,eg2-0.1,vcount)/q
if siliconTopLayer == 1:
v1flip = np.linspace(0,eg1flip-0.1,vcount)/q
if blackbody == 1:
j1 = np.zeros([vcount,vcount])
j2 = np.zeros([vcount,vcount])
maxEfficiency = []
finalDiff = []
if siliconTopLayer == 1:
j1flip = np.zeros([vcount,vcount])
j2flip = np.zeros([vcount,vcount])
maxEfficiencyflip = []
finalDiff_flip = []
if am1pt5 == 1:
j1AM15 = np.zeros([vcount,vcount])
j2AM15 = np.zeros([vcount,vcount])
maxEfficiencyAM15 = []
finalDiffAM15 = []
if siliconTopLayer == 1:
j1AM15flip = np.zeros([vcount,vcount])
j2AM15flip = np.zeros([vcount,vcount])
maxEfficiencyAM15flip = []
finalDiffAM15flip = []
term1_1 = np.zeros([egcount])
term1_2 = np.zeros([egcount])
term1_1AM15 = np.zeros([egcount])
term1_2AM15 = np.zeros([egcount])
term2_1 = np.zeros([egcount])
term2_2 = np.zeros([egcount])
term3_1 = np.zeros([egcount,vcount])
term3_2 = np.zeros([egcount,vcount])
term4_1 = np.zeros([egcount,vcount])
term4_2 = np.zeros([egcount,vcount])
if siliconTopLayer == 1:
term1_1flip = np.zeros([egcount])
term1_2flip = np.zeros([egcount])
term1_1AM15flip = np.zeros([egcount])
term1_2AM15flip = np.zeros([egcount])
term2_1flip = np.zeros([egcount])
term2_2flip = np.zeros([egcount])
term3_1flip = np.zeros([egcount,vcount])
term3_2flip = np.zeros([egcount,vcount])
term4_1flip = np.zeros([egcount,vcount])
term4_2flip = np.zeros([egcount,vcount])
max_meg_layer2 = meg_cap
for i in range(egcount):
max_si_meg = eg1[i]/eg2
max_si_meg = max_si_meg.astype(int)
if max_si_meg > meg_cap:
max_si_meg = meg_cap
max_bottom_meg_flip = eg1flip/eg2flip[i]
max_bottom_meg_flip = max_bottom_meg_flip.astype(int)
if max_bottom_meg_flip > meg_cap:
max_bottom_meg_flip = meg_cap
if Joules == 1:
v1 = np.linspace(0,eg1[i]-0.1,vcount)
if siliconTopLayer == 1:
v2flip = np.linspace(0,eg2flip[i]-0.1,vcount)
if eV == 1:
v1 = np.linspace(0,eg1[i]-0.1,vcount)/q
if siliconTopLayer == 1:
v2flip = np.linspace(0,eg2flip[i]-0.1,vcount)/q
if blackbody == 1:
index1 = []
index2 = []
efficiency = []
diff = np.zeros([vcount,vcount])
if siliconTopLayer == 1:
index1flip = []
index2flip = []
efficiencyflip = []
diff_flip = np.zeros([vcount,vcount])
if am1pt5 == 1:
index1AM15 = []
index2AM15 = []
efficiencyAM15 = []
diffAM15 = np.zeros([vcount,vcount])
if siliconTopLayer == 1:
index1AM15flip = []
index2AM15flip = []
efficiencyAM15flip = []
diffAM15flip = np.zeros([vcount,vcount])
for u in range(max_meg_layer2):
if u+1 < max_meg_layer2:
if blackbody == 1:
term1_1[i] += (u+1)*qg*f*C*Q((u+1)*eg1[i],(u+2)*eg1[i],Tsun)
if siliconTopLayer == 1:
term1_1flip[i] += (u+1)*qg*f*C*Q((u+1)*eg1flip,(u+2)*eg1flip,Tsun)
if am1pt5 == 1:
term1_1AM15[i] += (u+1)*q*realsolar(wavelength,radiance,(u+1)*eg1[i],(u+2)*eg1[i])
if siliconTopLayer == 1:
term1_1AM15flip[i] += (u+1)*q*realsolar(wavelength,radiance,(u+1)*eg1flip,(u+2)*eg1flip)
term2_1[i] += (u+1)*qg*(1-f*C)*Q((u+1)*eg1[i],(u+2)*eg1[i],Tc)
term3_1[i] += (u+1)*qg*Q_V2((u+1)*eg1[i],(u+2)*eg1[i],v1)
term4_1[i] += 0.5*(u+1)*qg*Q_V2((u+1)*eg1[i],(u+2)*eg1[i],v2)
if siliconTopLayer == 1:
term2_1flip[i] += (u+1)*qg*(1-f*C)*Q((u+1)*eg1flip,(u+2)*eg1flip,Tc)
term3_1flip[i] += (u+1)*qg*Q_V2((u+1)*eg1flip,(u+2)*eg1flip,v1flip)
term4_1[i] += 0.5*(u+1)*qg*Q_V2((u+1)*eg1[i],(u+2)*eg1flip,v2flip)
elif u+1 == max_meg_layer2:
if blackbody == 1:
term1_1[i] += (u+1)*qg*f*C*Q((u+1)*eg1[i],np.inf,Tsun)
if siliconTopLayer == 1:
term1_1flip[i] += (u+1)*qg*f*C*Q((u+1)*eg1flip,np.inf,Tsun)
if am1pt5 == 1:
term1_1AM15[i] += (u+1)*q*realsolar(wavelength,radiance,(u+1)*eg1[i],np.inf)
if siliconTopLayer == 1:
term1_1AM15flip[i] += (u+1)*q*realsolar(wavelength,radiance,(u+1)*eg1flip,np.inf)
term2_1[i] += (u+1)*qg*(1-f*C)*Q((u+1)*eg1[i],np.inf,Tc)
term3_1[i] += (u+1)*qg*Q_V2((u+1)*eg1[i],np.inf,v1)
term4_1[i] += 0.5*(u+1)*qg*Q_V2((u+1)*eg1[i],np.inf,v2)
if siliconTopLayer == 1:
term2_1flip[i] += (u+1)*qg*(1-f*C)*Q((u+1)*eg1flip,np.inf,Tc)
term3_1flip[i] += (u+1)*qg*Q_V2((u+1)*eg1flip,np.inf,v1flip)
term4_1flip[i] += 0.5*(u+1)*qg*Q_V2((u+1)*eg1flip,np.inf,v2flip)
for r in range(max_si_meg):
if r+1 < max_si_meg:
if blackbody == 1:
term1_2[i] += (r+1)*qg*f*C*Q((r+1)*eg2,(r+2)*eg2,Tsun)
if am1pt5 == 1:
term1_2AM15[i] += (r+1)*q*realsolar(wavelength,radiance,(r+1)*eg2,(r+2)*eg2)
term2_2[i] += (r+1)*qg*(1-f*C)*Q((r+1)*eg2,(r+2)*eg2,Tc)
term3_2[i] += (r+1)*qg*Q_V2((r+1)*eg2,(r+2)*eg2,v2)
elif r+1 == max_si_meg:
if blackbody == 1:
term1_2[i] += (r+1)*qg*f*C*Q((r+1)*eg2,eg1[i],Tsun)
if am1pt5 == 1:
term1_2AM15[i] += (r+1)*q*realsolar(wavelength,radiance,(r+1)*eg2,eg1[i])
term2_2[i] += (r+1)*qg*(1-f*C)*Q((r+1)*eg2,eg1[i],Tc)
term3_2[i] += (r+1)*qg*Q_V2((r+1)*eg2,np.inf,v2)
term4_2[i] = 0.5*term3_1[i]
if siliconTopLayer == 1:
for r in range(max_bottom_meg_flip):
if r+1 < max_bottom_meg_flip:
if blackbody == 1:
term1_2flip[i] += (r+1)*qg*f*C*Q((r+1)*eg2flip[i],(r+2)*eg2flip[i],Tsun)
if am1pt5 == 1:
term1_2AM15flip[i] += (r+1)*q*realsolar(wavelength,radiance,(r+1)*eg2flip[i],(r+2)*eg2flip[i])
term2_2flip[i] += (r+1)*qg*(1-f*C)*Q((r+1)*eg2flip[i],(r+2)*eg2flip[i],Tc)
term3_2flip[i] += (r+1)*qg*Q_V2((r+1)*eg2flip[i],(r+2)*eg2flip[i],v2flip)
elif r+1 == max_si_meg:
if blackbody == 1:
term1_2flip[i] += (r+1)*qg*f*C*Q((r+1)*eg2flip[i],eg1flip,Tsun)
if am1pt5 == 1:
term1_2AM15flip[i] += (r+1)*q*realsolar(wavelength,radiance,(r+1)*eg2flip[i],eg1flip)
term2_2flip[i] += (r+1)*qg*(1-f*C)*Q((r+1)*eg2flip[i],eg1flip,Tc)
term3_2flip[i] += (r+1)*qg*Q_V2((r+1)*eg2flip[i],np.inf,v2flip)
term4_2flip[i] = 0.5*term3_1flip[i]
threshold = 1e-3
threshold2 = 1e-2
if blackbody == 1:
print('SILICON AS THE BOTTOM LAYER:')
for u in range(vcount):
j1[u] = term1_1[i]+term2_1[i]-term3_1[i,u]+term4_1[i]
j2[u] = term1_2[i]+term2_2[i]-term3_2[i]+term4_2[i,u]
if siliconTopLayer == 1:
j1flip[u] = term1_1flip[i]+term2_1flip[i]-term3_1flip[i,u]+term4_1flip[i]
j2flip[u] = term1_2flip[i]+term2_2flip[i]-term3_2flip[i]+term4_2flip[u]
diff = np.abs(j1 - j2)
diff[j1 <= 0] = np.nan
diff[j2 <= 0] = np.nan
diffTemp = diff[np.isnan(diff) == False]
bestDiffs = diffTemp[diffTemp<threshold]
while len(bestDiffs) < 10 or len(bestDiffs) > 200:
if len(bestDiffs) < 10:
print('No pairs found. Increasing threshold.')
threshold *= 2
bestDiffs = diffTemp[diffTemp<threshold]
elif len(bestDiffs) > 200:
print('Too many pairs found. Decreasing threshold.')
threshold *= 0.5
bestDiffs = diffTemp[diffTemp<threshold]
for l in range(len(bestDiffs)):
index1temp, index2temp = np.where(diff == bestDiffs[l])
index1.append(index1temp[0])
index2.append(index2temp[0])
print("Success!")
print("indices for ", eg1[i], ' eV are ', index1[0], ' and ', index2[0])
print(" ")
print('Minimum difference between the currents for bandgap ', eg1[i], ' eV is ', "{:.2f}".format(np.min(diff[np.isnan(diff) == False])), ' for voltages V1 = ', "{:.2f}".format(q*v1[index1[0]]), ' and V2 = ', "{:.2f}".format(q*v2[index2[0]]))
print('Currents are J1 = ', "{:.2f}".format(j1[index1[0],index2[0]]), ' and J2 = ', "{:.2f}".format(j2[index1[0],index2[0]]))
print(" ")
if siliconTopLayer == 1:
print('SILICON AS THE TOP LAYER:')
diff_flip = np.abs(j1flip - j2flip)
diff_flip[j1flip <= 0] = np.nan
diff_flip[j2flip <= 0] = np.nan
diffTempflip = diff_flip[np.isnan(diff_flip) == False]
bestDiffsflip = diffTempflip[diffTempflip<threshold2]
while len(bestDiffsflip) < 10 or len(bestDiffsflip) > 200:
if len(bestDiffsflip) < 10:
print('No pairs found. Increasing threshold.')
threshold2 *= 2
bestDiffsflip = diffTempflip[diffTempflip<threshold2]
elif len(bestDiffsflip) > 200:
print('Too many pairs found. Decreasing threshold.')
threshold2 *= 0.6
bestDiffsflip = diffTempflip[diffTempflip<threshold2]
for l in range(len(bestDiffsflip)):
index1_2temp, index2_2temp = np.where(diff_flip == bestDiffsflip[l])
index1flip.append(index1_2temp[0])
index2flip.append(index2_2temp[0])
print("Success!")
print(" ")
print("indices for ", eg2flip[i], ' eV are ', index1flip[0], ' and ', index2flip[0])
print('Minimum difference between the currents for bandgap ', eg2flip[i], ' eV is ', np.min(diff_flip[np.isnan(diff_flip) == False]), ' for voltages V1 = ', q*v1flip[index1flip[0]], ' and V2 = ', q*v2flip[index2flip[0]])
print('Currents are J1 = ', j1flip[index1flip[0],index2flip[0]], ' and J2 = ', j2flip[index1flip[0],index2flip[0]])
print(" ")
if am1pt5 == 1:
print('SILICON AS THE BOTTOM LAYER:')
for u in range(vcount):
j1AM15[u] = term1_1AM15[i]+term2_1[i]-term3_1[i,u]+term4_1[i]
j2AM15[u] = term1_2AM15[i]+term2_2[i]-term3_2[i]+term4_2[i,u]
if siliconTopLayer == 1:
j1AM15flip[u] = term1_1AM15flip[i]+term2_1flip[i]-term3_1flip[i,u]+term4_1flip[i]
j2AM15flip[u] = term1_2AM15flip[i]+term2_2flip[i]-term3_2flip[i]+term4_2flip[i,u]
diffAM15 = np.abs(j1AM15 - j2AM15)
diffAM15[j1AM15 <= 0] = np.nan
diffAM15[j2AM15 <= 0] = np.nan
diffTemp = diffAM15[np.isnan(diffAM15) == False]
bestDiffs = diffTemp[diffTemp<threshold]
count = 0
low = 10
high = 200
while (len(bestDiffs) < low or len(bestDiffs) > high):
while count < 20:
if len(bestDiffs) < low:
print('Not enough pairs found. Increasing threshold.')
threshold *= 2
bestDiffs = diffTemp[diffTemp<threshold]
elif len(bestDiffs) > high:
print('Too many pairs found. Decreasing threshold.')
threshold *= 0.6
bestDiffs = diffTemp[diffTemp<threshold]
count += 1
low = int(low/2)
high *= 2
for l in range(len(bestDiffs)):
index1temp, index2temp = np.where(diffAM15 == bestDiffs[l])
index1AM15.append(index1temp[0])
index2AM15.append(index2temp[0])
print("Success!")
print("indices for ", "{:.3f}".format(eg1[i]), ' eV are ', index1AM15[0], ' and ', index2AM15[0])
print(" ")
print('Minimum difference between the currents for bandgap ', "{:.3f}".format(eg1[i]), ' eV is ', np.min(diffAM15[np.isnan(diffAM15) == False]), ' for voltages V1 = ', "{:.2f}".format(q*v1[index1AM15[0]]), ' and V2 = ', "{:.2f}".format(q*v2[index2AM15[0]]))
print('Currents are J1 = ', "{:.4f}".format(j1AM15[index1AM15[0],index2AM15[0]]), ' and J2 = ', "{:.4f}".format(j2AM15[index1AM15[0],index2AM15[0]]))
print(" ")
if siliconTopLayer == 1:
print('SILICON AS THE TOP LAYER:')
diffAM15flip = np.abs(j1AM15flip - j2AM15flip)
diffAM15flip[j1AM15flip <= 0] = np.nan
diffAM15flip[j2AM15flip <= 0] = np.nan
diffTempflip = diffAM15flip[np.isnan(diffAM15flip) == False]
bestDiffsflip = diffTempflip[diffTempflip<threshold2]
low = 10
high = 200
max = 1000
count = 0
while (len(bestDiffsflip) < low or len(bestDiffsflip) > high) and count < 20:
count += 1
if len(bestDiffsflip) < low:
print('Not enough pairs found. Increasing threshold.')
threshold2 *= 1.5
bestDiffsflip = diffTempflip[diffTempflip<threshold2]
if len(bestDiffsflip) > high:
print('Too many pairs found. Attempting to lower threshold.')
threshold2 *= 0.8
bestDiffsflip = diffTempflip[diffTempflip<threshold2]
count += 1
print(len(bestDiffsflip))
if len(bestDiffsflip) != 0 and len(bestDiffsflip) < high:
for l in range(len(bestDiffsflip)):
index1tempflip, index2tempflip = np.where(diffAM15flip == bestDiffsflip[l])
index1AM15flip.append(index1tempflip[0])
index2AM15flip.append(index2tempflip[0])
if len(bestDiffsflip) > high or len(bestDiffsflip) == 0:
print('Decreasing the threshold is not working. Selecting the minimum non-zero difference.')
diffTempflip[diffTempflip == 0] = np.max(diffTempflip)
bestDiffsflip = diffTempflip[np.argmin(diffTempflip)]
index1tempflip, index2tempflip = np.where(diffAM15flip == bestDiffsflip)
index1AM15flip.append(index1tempflip[0])
index2AM15flip.append(index2tempflip[0])
print("Success!")
print(" ")
print("indices for ", "{:.3f}".format(eg2flip[i]), ' eV are ', index1AM15flip[0], ' and ', index2AM15flip[0])
print('Minimum difference between the currents for bandgap ', "{:.3f}".format(eg2flip[i]), ' eV is ', np.min(diffAM15flip[np.isnan(diffAM15flip) == False]), ' for voltages V1 = ', "{:.2f}".format(q*v1flip[index1AM15flip[0]]), ' and V2 = ', "{:.2f}".format(q*v2flip[index2AM15flip[0]]))
print('Currents are J1 = ', "{:.4f}".format(j1AM15flip[index1AM15flip[0],index2AM15flip[0]]), ' and J2 = ', "{:.3f}".format(j2AM15flip[index1AM15flip[0],index2AM15flip[0]]))
print(" ")
##### This next part calculates the efficiency for each of the pairs of
##### current densities that passes the prior threshold criteria. The
##### average of the current densities and sum of the voltages are used
##### to calculate the produced power. The efficiency and efficiencyAM15
##### lists are appended with the efficiencies for all of the pairs of
##### current densities. The best of these is stored in the maxEfficiencyMJ
##### and maxEfficiencyMJAM15 arrays and the efficiency and efficiencyAM15
##### arrays will be cleared for the next bandgap energy value.
if blackbody == 1:
for b in range(len(index1)):
j1temp = j1[index1[b],index2[b]]
j2temp = j2[index1[b],index2[b]]
jtemp = 0.5*(np.abs(j1temp)+np.abs(j2temp))
v1temp = v1[index1[b]]
v2temp = v2[index2[b]]
vtemp = v1temp + v2temp
efficiency.append(jtemp*vtemp/P_in)
maxEfficiency.append(np.max(np.asarray(efficiency)))
if siliconTopLayer == 1:
for b in range(len(index1_2)):
j1tempflip = j1flip[index1flip[b],index2flip[b]]
j2tempflip = j2flip[index1flip[b],index2flip[b]]
jtempflip = 0.5*(np.abs(j1tempflip)+np.abs(j2tempflip))
v1tempflip = v1flip[index1flip[b]]
v2tempflip = v2flip[index2flip[b]]
vtempflip = v1tempflip + v2tempflip
efficiencyflip.append(jtempflip*vtempflip/P_in)
maxEfficiencyflip.append(np.max(np.asarray(efficiencyflip)))
if am1pt5 == 1:
for b in range(len(index1AM15)):
j1tempAM15 = j1AM15[index1AM15[b],index2AM15[b]]
j2tempAM15 = j2AM15[index1AM15[b],index2AM15[b]]
jtempAM15 = 0.5*(np.abs(j1tempAM15)+np.abs(j2tempAM15))
v1tempAM15 = v1[index1AM15[b]]
v2tempAM15 = v2[index2AM15[b]]
vtempAM15 = v1tempAM15 + v2tempAM15
efficiencyAM15.append(jtempAM15*vtempAM15/P_in)
maxEfficiencyAM15.append(np.max(np.asarray(efficiencyAM15)))
if siliconTopLayer == 1:
j1tempAM15flip = np.zeros([len(index1AM15flip)])
j2tempAM15flip = np.zeros([len(index1AM15flip)])
v1tempAM15flip = np.zeros([len(index1AM15flip)])
v2tempAM15flip = np.zeros([len(index1AM15flip)])
efficiencyAM15flip = np.zeros([len(index1AM15flip)])
for b in range(len(index1AM15flip)):
j1tempAM15flip[b] = j1AM15flip[index1AM15flip[b],index2AM15flip[b]]
j2tempAM15flip[b] = j2AM15flip[index1AM15flip[b],index2AM15flip[b]]
v1tempAM15flip[b] = v1flip[index1AM15flip[b]]
v2tempAM15flip[b] = v2flip[index2AM15flip[b]]
jtempAM15flip = 0.5*(np.abs(j1tempAM15flip)+np.abs(j2tempAM15flip))
vtempAM15flip = v1tempAM15flip + v2tempAM15flip
efficiencyAM15flip = jtempAM15flip*vtempAM15flip/P_in
maxEfficiencyAM15flip.append(np.max(np.asarray(efficiencyAM15flip)))
plt.figure(fignum)
fignum += 1
if blackbody == 1:
plt.plot(eg1,maxEfficiency)
if am1pt5 == 1:
plt.plot(eg1,maxEfficiencyAM15,linewidth=3,color='black')
if siliconTopLayer == 1:
plt.plot(eg2flip,maxEfficiencyAM15flip,linewidth=3,color='blue')
# plt.title('Two-Layer Tandem Solar Cell with Silicon Base Layer')
plt.xlabel('Bandgap Energy of Top Layer (eV)')
plt.ylabel('Efficiency')
plt.xlim(0.2,3.2)
plt.ylim(0,0.49)
print('Finished with the Multi-Junction!')
print('Finished with the Multi-Junction!')
plt.show()
e
| UTF-8 | Python | false | false | 62,121 | py | 4 | sq.py | 3 | 0.530078 | 0.480514 | 0 | 1,356 | 44.811947 | 301 |
gisat/puma | 17,712,445,139,599 | cea4e22957a4af97f5f39ec42e2683458fa05e98 | 611652db543d7a2fe7403a10bf214022d67c88d6 | /scripts/mtile.py | 95ad93391f00bc1fc65bf6bb667f7d4810e054a0 | []
| no_license | https://github.com/gisat/puma | 33345275b0cd6b2d311bdc640aafd2a0dfdb011b | 319b1183eae47f34fdd98f7f7225f6209ae7ddbb | refs/heads/dev | 2022-06-21T11:06:53.891984 | 2019-03-15T13:40:26 | 2019-03-15T13:40:26 | 11,924,172 | 2 | 1 | null | false | 2022-05-20T20:51:38 | 2013-08-06T13:04:13 | 2020-03-30T04:22:17 | 2022-05-20T20:51:38 | 138,510 | 4 | 0 | 32 | JavaScript | false | false | #!/usr/bin/env python
# -------------------------
# vytvori z rastru dlazdoce
# o velikosti ts_x a ts_y
#
# 2010-08-03 Vaclav Vobora, G2EUR
# -------------------------
import os
import re
import sys
try:
from osgeo.gdalconst import *
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
except ImportError:
from gdalconst import *
import gdal
import ogr
import osr
# -------------------------
def Usage():
name = os.path.basename(sys.argv[0])
print 'Usage: %s [-of format] [-co NAME=VALUE]* [-ts ts_x ts_y]' % name
print '\t\t [-o dst_dir] [src_filename]'
print ''
sys.exit(1)
# -------------------------
# --------------
# Mainline
# --------------
co = []
ts_x = 2500
ts_y = 2000
format = 'GTiff'
src_filename = None
dst_dir = None
i = 1
while (i < len(sys.argv)):
if (sys.argv[i] == '-of'):
format = (sys.argv[i+1])
i = i + 1
elif (sys.argv[i] == '-co'):
co.append(sys.argv[i+1])
i = i + 1
elif (sys.argv[i] == '-ts'):
ts_x = float(sys.argv[i+1])
ts_y = float(sys.argv[i+2])
i = i + 2
elif (sys.argv[i] == '-o'):
dst_dir = sys.argv[i+1]
i = i + 1
elif (src_filename is None):
src_filename = sys.argv[i]
else:
Usage()
i = i + 1
# end while
if (src_filename is None):
Usage()
if (dst_dir is None):
Usage()
print 'create option: %s' % co
print 'format: %s' % format
print 'ts: %s,%s' % (ts_x, ts_x)
print 'src_filename: %s' % src_filename
print 'dst_dir: %s' % dst_dir
# print '---'
ext = {'GTiff':'.tif', 'PCIDSK':'.pix', 'HFA':'.img', 'VRT':'.vrt', 'AAIGrig':'.asc'}
src_ds = gdal.Open(src_filename, GA_ReadOnly)
if (src_ds is None):
print 'Could not open file!'
sys.exit(1)
# print src_ds.RasterXSize
# print src_ds.RasterYSize
xpos = 0
ypos = 0
blockno = 0
blocksizex = ts_x
blocksizey = ts_y
xdim = src_ds.RasterXSize
ydim = src_ds.RasterYSize
while (ypos <= ydim):
while (xpos <= xdim):
# print xpos, ypos, blockno
# print src_ds.RasterXSize
# print src_ds.RasterYSize
_blocksizex = blocksizex
if (xpos + blocksizex) >= xdim:
_blocksizex = (xdim - xpos)
_blocksizey = blocksizey
if (ypos + blocksizey) >= ydim:
_blocksizey = (ydim - ypos)
if ((_blocksizex != 0) and (_blocksizey != 0)):
# dst_filename = os.path.join(dst_dir, os.path.splitext(os.path.basename(src_filename))[0] + '_' + str(blockno) + ext[format])
dst_filename = os.path.join(dst_dir, '%s_%.6d%s' % (os.path.splitext(os.path.basename(src_filename))[0], blockno, ext[format]))
if co:
cmd = 'gdal_translate -of %s %s -srcwin %s %s %s %s %s %s' % (format, '-co ' + ' -co '.join(co), xpos, ypos, _blocksizex, _blocksizey, src_filename, dst_filename)
else:
cmd = 'gdal_translate -of %s -srcwin %s %s %s %s %s %s' % (format, xpos, ypos, _blocksizex, _blocksizey, src_filename, dst_filename)
# end if
# testovaci prikaz
# cmd = 'gdal_translate -srcwin %s %s %s %s %s %s' % (xpos, ypos, _blocksizex, _blocksizey, src_filename, dst_filename)
print cmd
os.system(cmd)
# vytvoreni pyramid, pouze pro GTiff
# cmd = 'gdaladdo --config HFA_USE_RRD YES --config USE_RRD YES %s 2 4 8' % (dst_filename)
# print cmd
# os.system(cmd)
# end if
# print '# ---'
blockno = blockno + 1
xpos = xpos + blocksizex
# end while
ypos = ypos + blocksizey
xpos = 0
# end while
src_ds = None
| UTF-8 | Python | false | false | 3,738 | py | 498 | mtile.py | 430 | 0.521937 | 0.510166 | 0 | 159 | 22.509434 | 178 |
qiushye/traffic-data-process | 163,208,762,573 | 1939e565477cbc858ed259e4060321e4689062a6 | 86387b266cc19f8c9e367fde1d370271e75e18fc | /data_padding.py | ad4edbeefb31bafdd383cbfce66a3c30074d03e2 | []
| no_license | https://github.com/qiushye/traffic-data-process | 6a1b012e233b9773846a9f6bb66c4aa8d1d28e5c | f30e161845e716a148f31a215451e63f5e27d190 | refs/heads/master | 2020-02-29T13:54:26.867142 | 2017-07-16T08:14:55 | 2017-07-16T08:14:55 | 89,364,865 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 17 08:48:41 2017
@author: qiushye
"""
'''--这里采用的是一天48个时段--'''
import math
import pylab as pl
import copy
import csv
def init_day_data(period_num):
#day_data:date,day_velocity,weekday,period_usable
day_data = []
date = ''
day_velocity = {}
for i in range(period_num):
day_velocity[i] = -1
weekday = -1
period_usable = []
day_data.append(date)
day_data.append(day_velocity)
day_data.append(weekday)
day_data.append(period_usable)
return day_data
def get_week_data(period_num,data_dir,file_name):
week_data = {}
for i in range(7):
week_data[str(i)] = []
day_data = init_day_data(period_num)
with open(data_dir+file_name,'r') as f:
lines = f.readlines()
temp = lines[0].strip().split(',')
for line in lines:
row = line.strip().split(',')
if row[7] == temp[7]:
day_data[1][int(row[9])] = round(float(row[6]),2)
day_data[3].append(int(row[9]))
else:
day_data[0] = temp[7]
day_data[2] = temp[8]
week_data[day_data[2]].append(day_data)
temp = row
day_data = init_day_data(period_num)
if line == lines[-1]:
day_data[0] = temp[7]
day_data[2] = temp[8]
week_data[day_data[2]].append(day_data)
return week_data
def cal_sim(list1,list2):
#余弦相似性
sum1 = 0
sum2 = 0
sum3 = 0
for i in range(len(list1)):
if list1[i] != -1 and list2[i] != -1:
sum1 += list1[i] * list2[i]
sum2 += list1[i] ** 2
sum3 += list2[i] ** 2
if list1 != [] or list2 != []:
#print sum2,sum3
sim = sum1/(math.sqrt(sum2)*math.sqrt(sum3))
else:
sim = 0
return sim
def data_padding(week_data,period_num):
padded_data = {}
for k in xrange(7):
'''period_common = []
for period in range(period_num):
period_common.append(period)
period_common = set(period_common)'''
#period_total = set()
temp_data = week_data[str(k)]
sim_dic = {}
#for day_data_ in temp_data:
#period_common = period_common&set(day_data_[3])
#period_total = period_total|set(day_data_[3])
#print period_total
for i in range(len(temp_data)):
#max_sim = 0
sim_dic[i] = []
for j in range(len(temp_data)):
if i != j:
period_common = set(temp_data[i][3])&set(temp_data[j][3])
data_period_i = [temp_data[i][1][p] for p in period_common]
data_period_j = [temp_data[j][1][p] for p in period_common]
#print data_period_i
#print data_period_j
sim = cal_sim(data_period_i,data_period_j)
sim_dic[i].append([j,round(sim,2)])
sorted(sim_dic[i],key = lambda l : l[-1])[::-1]
if k == 1:
print sim_dic
for i in range(len(temp_data)):
for set_ in sim_dic[i]:
#print period
#print temp_data[i][1]
period_total = set()
period_total = set(temp_data[i][3])|set(temp_data[set_[0]][3])
for period in period_total:
if temp_data[i][1][period] == -1 and temp_data[set_[0]][1][period] != -1:
#直接填充?
#print str(k) +'--padding'
temp_data[i][1][period] = temp_data[set_[0]][1][period]
padded_data[str(k)] = temp_data
return padded_data
period_num = 49
data_dir = '/home/qiushye/road_id_division2_2/'
file_name = '59566302803.txt'
week_data = get_week_data(period_num,data_dir,file_name)
week_data_ori = copy.deepcopy(week_data)
for j in range(7):
print 'Day' + str(j)
for i in xrange(len(week_data[str(j)])):
print len([a for a in week_data[str(j)][i][1].values() if a != -1])
period_v_1 = {}
period_v_0 = {}
period_v_2 = {}
sorted(period_v_0.items(), key = lambda item:item[0])
padded_data = data_padding(week_data,period_num)
for period in xrange(period_num):
if week_data_ori['5'][1][1][period] != -1:
period_v_1[period] = week_data_ori['5'][1][1][period]
key_1 = sorted(period_v_1.keys())
period_v_1_ = [period_v_1[key] for key in key_1]
if week_data_ori['5'][0][1][period] != -1:
period_v_0[period] = week_data_ori['5'][0][1][period]
key_0 = sorted(period_v_0.keys())
period_v_0_ = [period_v_0[key] for key in key_0]
if padded_data['5'][1][1][period] != -1:
period_v_2[period] = padded_data['5'][1][1][period]
key_2 = sorted(period_v_2.keys())
period_v_2_ = [period_v_2[key] for key in key_2]
with open('/home/qiushye/velocity_compare_5.csv','wb') as csvfile:
spamwriter = csv.writer(csvfile,dialect='excel')
spamwriter.writerow(key_1)
spamwriter.writerow(period_v_1_)
spamwriter.writerow(key_0)
spamwriter.writerow(period_v_0_)
spamwriter.writerow(key_2[0:16])
spamwriter.writerow(period_v_2_[0:16])
spamwriter.writerow(key_2[16:])
spamwriter.writerow(period_v_2_[16:])
for key in sorted(period_v_1.keys()):
print str(key)+':'+str(period_v_1[key])
print '----'
for key in sorted(week_data_ori['5'][4][1].keys()):
if week_data_ori['5'][3][1][key] != -1:
print str(key)+':'+str(week_data_ori['5'][3][1][key])
#print week_data['1'][1][1]
x = []
for period in range(period_num):
x.append(period)
for i in xrange(len(week_data_ori['5'])):
day_data_last = week_data_ori['5'][i]
day_data_next = padded_data['5'][i]
#x1 = []
y1 = []
#x2 = []
y2 = []
n1 = 0
n2 = 0
for period in range(period_num):
if day_data_last[1][period] != -1:
#x1.append(period)
y1.append(day_data_last[1][period])
n1 += 1
else:
y1.append(-1)
if day_data_next[1][period] != -1:
#x2.append(period)
y2.append(day_data_next[1][period])
n2 += 1
else:
y2.append(-1)
print n1,n2
pl.plot(x,y1,'bo')
pl.plot(x,y2,'y^')
pl.title('data compare in ' + week_data['5'][i][0] )
pl.show() | UTF-8 | Python | false | false | 6,493 | py | 18 | data_padding.py | 17 | 0.518364 | 0.483651 | 0 | 194 | 32.268041 | 93 |
k0staa/JupyterNotebookForStock-RNN | 13,975,823,613,638 | 90f9ac98ceca66d2ac1208b5e959947e2376a8ad | 62ef57ebd90cab900e5d99364f8076c67eee8186 | /notebooks/stock_rnn/scripts/train_model.py | 9c0587c766b1e61d16f5e29fc3f383b7e4a2ee45 | []
| no_license | https://github.com/k0staa/JupyterNotebookForStock-RNN | 3db5920b4e3f56cb4989f3e27fd7297db8504c6c | 599802259066aa4c8f71ec498f599e5964c75331 | refs/heads/master | 2020-03-10T23:05:15.029169 | 2018-04-15T17:18:26 | 2018-04-15T17:18:26 | 129,633,358 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Run the following command to check Tensorboard:
$ tensorboard --logdir ./_logs
"""
import json
import os
import sys; sys.path.append("..")
import tensorflow as tf
from .build_graph import build_lstm_graph_with_config
from .config import DEFAULT_CONFIG, MODEL_DIR
from ..data_model import StockDataSet
def load_data(stock_name, input_size, num_steps):
stock_dataset = StockDataSet(stock_name, input_size=input_size, num_steps=num_steps,
test_ratio=0.1, close_price_only=True)
print ("Train data size:", len(stock_dataset.train_X))
print ("Test data size:", len(stock_dataset.test_X))
return stock_dataset
def _compute_learning_rates(config=DEFAULT_CONFIG):
learning_rates_to_use = [
config.init_learning_rate * (
config.learning_rate_decay ** max(float(i + 1 - config.init_epoch), 0.0)
) for i in range(config.max_epoch)
]
print( "Middle learning rate:", learning_rates_to_use[len(learning_rates_to_use) // 2])
return learning_rates_to_use
def train_lstm_graph(stock_name, lstm_graph, config=DEFAULT_CONFIG):
"""
stock_name (str)
lstm_graph (tf.Graph)
"""
stock_dataset = load_data(stock_name,
input_size=config.input_size,
num_steps=config.num_steps)
final_prediction = []
final_loss = None
graph_name = "%s_lr%.2f_lr_decay%.3f_lstm%d_step%d_input%d_batch%d_epoch%d" % (
stock_name,
config.init_learning_rate, config.learning_rate_decay,
config.lstm_size, config.num_steps,
config.input_size, config.batch_size, config.max_epoch)
print ("Graph Name:", graph_name)
learning_rates_to_use = _compute_learning_rates(config)
with tf.Session(graph=lstm_graph) as sess:
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter('_logs/' + graph_name, sess.graph)
writer.add_graph(sess.graph)
graph = tf.get_default_graph()
tf.global_variables_initializer().run()
inputs = graph.get_tensor_by_name('inputs:0')
targets = graph.get_tensor_by_name('targets:0')
learning_rate = graph.get_tensor_by_name('learning_rate:0')
test_data_feed = {
inputs: stock_dataset.test_X,
targets: stock_dataset.test_y,
learning_rate: 0.0
}
loss = graph.get_tensor_by_name('train/loss_mse:0')
minimize = graph.get_operation_by_name('train/loss_mse_adam_minimize')
prediction = graph.get_tensor_by_name('output_layer/add:0')
for epoch_step in range(config.max_epoch):
current_lr = learning_rates_to_use[epoch_step]
for batch_X, batch_y in stock_dataset.generate_one_epoch(config.batch_size):
train_data_feed = {
inputs: batch_X,
targets: batch_y,
learning_rate: current_lr
}
train_loss, _ = sess.run([loss, minimize], train_data_feed)
if epoch_step % 10 == 0:
test_loss, _pred, _summary = sess.run([loss, prediction, merged_summary], test_data_feed)
assert len(_pred) == len(stock_dataset.test_y)
print ("Epoch %d [%f]:" % (epoch_step, current_lr), test_loss)
if epoch_step % 50 == 0:
print ("Predictions:", [(
map(lambda x: round(x, 4), _pred[-j]),
map(lambda x: round(x, 4), stock_dataset.test_y[-j])
) for j in range(5)])
writer.add_summary(_summary, global_step=epoch_step)
print ("Final Results:")
final_prediction, final_loss = sess.run([prediction, loss], test_data_feed)
print (final_prediction, final_loss)
graph_saver_dir = os.path.join(MODEL_DIR, graph_name)
if not os.path.exists(graph_saver_dir):
os.mkdir(graph_saver_dir)
saver = tf.train.Saver()
saver.save(sess, os.path.join(
graph_saver_dir, "stock_rnn_model_%s.ckpt" % graph_name), global_step=epoch_step)
with open("final_predictions.{}.json".format(graph_name), 'w') as fout:
fout.write(json.dumps(final_prediction.tolist()))
def main(config=DEFAULT_CONFIG):
lstm_graph = build_lstm_graph_with_config(config=config)
train_lstm_graph('SP500', lstm_graph, config=config)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 4,475 | py | 7 | train_model.py | 1 | 0.596201 | 0.590168 | 0 | 121 | 35.983471 | 105 |
kedder/kdrvario | 2,851,858,322,691 | 59c6e4a592d25529f983487a41b2e46fd1f95f2c | fee42e456fa7e181f095bc4d63b085b67e1a1e22 | /monitor/stats.py | b21fb3f5c382521af371c35480726524a3201bf0 | []
| no_license | https://github.com/kedder/kdrvario | 531e2ea23e60dccfc55fe01d547fd26fa719cefd | 30fd7ddc8f4f268f825a24ab70f7316c5be61750 | refs/heads/master | 2020-03-30T03:31:06.174722 | 2012-10-09T18:59:16 | 2012-10-09T18:59:16 | 2,585,948 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import math
from kdrvmon.hardware import Hardware, SerialDataFeed, FileDataFeed
from kdrvmon.vario import Vario
def avg(data):
return sum(data) / len(data)
def rms(data, value):
return math.sqrt(avg([(x - value)**2 for x in data]))
def main():
log = sys.argv[1]
print "Analizing", log
feed = FileDataFeed(log)
feed.realtime = False
feed.autorewind = False
feed.open()
hw = Hardware(feed)
data = []
while True:
t, d = hw.read_serial()
if t == None:
break
if t == "pressure":
data.append(float(d))
print "Records read: %s" % len(data)
mean = avg(data)
print " Average:", mean, "pa"
print " RMS:", rms(data, mean), "pa"
# convert to altitude
vario = Vario()
data = [vario.pressure_to_alt(x) for x in data]
mean = avg(data)
print " Average:", mean, "m"
print " RMS:", rms(data, mean), "m"
main()
| UTF-8 | Python | false | false | 952 | py | 29 | stats.py | 23 | 0.57563 | 0.573529 | 0 | 47 | 19.255319 | 67 |
SrujanAakurathi/30-Days-of-Code-Hackerrank | 15,702,400,458,425 | 043d41ed76829c08bd7184ba1c2e951c9257734f | e02d3747bb60734d70fc56273627d357566d4fec | /Recursion.py | 12385343a13cb2929c8465503ac2809bcd9f8d9d | []
| no_license | https://github.com/SrujanAakurathi/30-Days-of-Code-Hackerrank | acc17ffaa4d4e7e5e8408be713943889521e459f | 93beeed186b48d36f37899e8b25ec3f4d721342a | refs/heads/master | 2021-01-22T18:32:54.818953 | 2017-03-15T15:58:59 | 2017-03-15T15:58:59 | 85,088,954 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def memoize(f):
c = {}
def fact(*args):
if args in c:
return c[args]
else:
c[args] = f(*args)
return c[args]
return fact
def factorial(n):
if ((n == 0) or (n == 1)):
return 1
else:
return n * factorial(n - 1)
print(factorial(int(input()))) | UTF-8 | Python | false | false | 333 | py | 11 | Recursion.py | 11 | 0.45045 | 0.438438 | 0 | 21 | 14.904762 | 35 |
zeidspex/f2019-dm | 13,572,096,681,237 | 95911dc72b25befdd6727b7c6e2f1dea2d8c13bc | ab300f7f3c330e7c3301afdd0955a3d0f87c2613 | /classification.py | d535667fdf9f992dcb5ea23ad0afcab09faa338e | []
| no_license | https://github.com/zeidspex/f2019-dm | 7ebcb11e7cf46976ff0eca7825ce83fc025113fa | 14f98b5c1f97e43a239293d1200d205e05c2cb6e | refs/heads/master | 2020-09-02T17:09:55.265986 | 2019-11-28T07:29:33 | 2019-11-28T07:29:33 | 219,265,981 | 0 | 0 | null | true | 2019-11-03T07:26:59 | 2019-11-03T07:26:58 | 2019-11-02T03:53:53 | 2019-11-02T03:53:51 | 511 | 0 | 0 | 0 | null | false | false | import io
import os
import pickle
import tarfile
import keras as ks
import numpy as np
import pandas as pd
import sklearn.cluster as cluster
import sklearn.metrics as metrics
class Classifier:
"""
A classifier based on K-means clustering and previously created
mappings between cluster labels and true labels
"""
def __init__(self, embedding_model, kmeans):
"""
:param embedding_model: Keras model for converting images to embeddings
:param kmeans: K-means model
"""
self.embedding_model = embedding_model
self.kmeans = kmeans
def predict(self, x):
"""
:param x: features (images)
:return: predicted classes
"""
z = self.embedding_model.predict(x)
yp = np.array(self.kmeans.predict(z))
return yp
def cluster_data(z_train, centroids):
"""
:param z_train: training data embeddings
:param centroids: initial centroids of the cluster
:return: predicted labels for training data (yp_train)
"""
return cluster.KMeans(
n_clusters=10, max_iter=1, n_jobs=-1, n_init=1, init=centroids
).fit(z_train)
def create_model(autoencoder, embedding_layer, x_train, y_train, sample_size):
"""
:param autoencoder: trained autoencoder model
:param embedding_layer: index of embedding layer
:param x_train: training features
:param y_train: training labels
:param sample_size: sample size for cluster labeling
:return: a classifier
"""
# Create embedding model
embedding_model = ks.models.Model(
inputs=autoencoder.inputs, outputs=autoencoder.layers[embedding_layer].output
)
# Train K-means model
z_train = embedding_model.predict(x_train)
centroids = np.array([
np.mean(
z_train[np.argwhere(y_train == i)].reshape(-1, z_train.shape[1])[0:sample_size],
axis=0
)
for i in range(10)
])
kmeans = cluster_data(z_train, centroids)
# Create classifier from embeddings model and K-means model and return it
return Classifier(embedding_model, kmeans)
def load_model(model_path):
"""
:param model_path: classifier path
:return: load classifier from hard drive
"""
with tarfile.open(model_path, mode='r') as in_file:
with open('embeddings.h5', 'wb') as out_file:
out_file.write(in_file.extractfile('embeddings.h5').read())
embeddings = ks.models.load_model('embeddings.h5')
os.remove('embeddings.h5')
kmeans = pickle.loads(in_file.extractfile('kmeans.pkl').read())
return Classifier(embeddings, kmeans)
def save_model(model, model_path):
"""
:param model: model to save
:param model_path: output path
:return: save classifier to the hard drive
"""
ks.models.save_model(model.embedding_model, model_path)
with open(model_path, 'rb') as in_file:
embedding_model = in_file.read()
with tarfile.open(model_path, mode='w') as out_file:
names = ['embeddings.h5', 'kmeans.pkl']
objects = [
embedding_model,
pickle.dumps(model.kmeans),
]
for name, data in zip(names, objects):
info = tarfile.TarInfo(name)
info.size = len(data)
out_file.addfile(info, io.BytesIO(data))
def test_model(clf, x_test, y_test, class_names, out_path=None):
"""
:param clf: classifier to test
:param x_test: features
:param y_test: labels
:param class_names: class names
:param out_path: path to save the CSV to (including file name)
:return: None
"""
yp_test = clf.predict(x_test)
precision = metrics.precision_score(y_test, yp_test, average=None)
recall = metrics.recall_score(y_test, yp_test, average=None)
f1 = metrics.f1_score(y_test, yp_test, average=None)
pd.set_option('display.max_columns', 10)
data = np.array(list(zip(precision, recall, f1)))
data = pd.DataFrame(
data.T, columns=class_names, index=['Precision', 'Recall', 'F1 Score']).round(2)
print(data.loc['Precision'].mean())
print(data.loc['Recall'].mean())
if out_path:
data.to_csv(out_path)
print(data)
| UTF-8 | Python | false | false | 4,220 | py | 17 | classification.py | 13 | 0.63673 | 0.63128 | 0 | 141 | 28.929078 | 92 |
vdhadda2/quiz | 15,753,940,083,856 | 373b2a263939654d432240a60d726118c12b1873 | 28e1cadf4f88115f0c0fe8d20973f11f260c7230 | /quiz.py | 0d33d61b585e5081a437eadf70485110feb87fd3 | []
| no_license | https://github.com/vdhadda2/quiz | 73e7737b4666ae08f84bdfc91eeb441956c36480 | c3218c5a3dc994282a8a71aef62088fa2080547c | refs/heads/master | 2021-01-23T00:09:51.746420 | 2017-03-21T13:52:47 | 2017-03-21T13:52:47 | 85,704,926 | 0 | 0 | null | true | 2017-03-21T13:26:20 | 2017-03-21T13:26:20 | 2017-03-21T13:10:50 | 2017-03-21T13:10:50 | 0 | 0 | 0 | 0 | null | null | null | """
-----------------------------------------------------------------------
Question 1.
Given two int values, return True if one is negative and one is
positive. Except if the parameter "negative" is True, then return True
only if both are negative.
-----------------------------------------------------------------------
"""
def pos_neg(a, b, negative):
pass
public boolean posNeg(int a, int b,boolean negative) {
if (negative && a < 0 && b < 0) {
return true;
}
else if (!negative && ((a < 0 && b > 0) || (a > 0 && b < 0))) {
return true;
}
return false;
}
# Expected outputs:
print(pos_neg(1, -1, False))
# True
print(pos_neg(-1, 1, False))
# True
print(pos_neg(-4, -5, True))
# True
print(pos_neg(-2, -5, False))
# False
print(pos_neg(1, 2, False))
# False
"""
-----------------------------------------------------------------------
Question 2.
A year with 366 days is called a leap year. Leap years are necessary to
keep the calendar synchronized with the sun because the earth revolves
around the sun once every 365.25 days. Actually, that figure is not
entirely precise, and for all dates after 1582 the Gregorian correction
applies. Usually years that are divisible by 4 are leap years, for
example 1996. However, years that are divisible by 100 (for example,
1900) are not leap years, but years that are divisible by 400 are leap
years (for example, 2000).
-----------------------------------------------------------------------
"""
welcome the user
prompt for input of INPUT_YEAR type = int
store the input in constants INPUT_YEAR
if INPUT_YEAR < 1582
out put "i can not count that far back. I Can only evaluate years after 1582."
else if ((INPUT_YEAR % 4 == 0 && INPUT_YEAR % 100 > 0) OR (INPUT_YEAR % 400 == 0))
out put "INPUT_YEAR is a leap year"
else
out put "INPUT_YEAR is a leap year
def leap_year(year):
pass
# When you've completed your function, uncomment the
# following lines and run this file to test!
# print(leap_year(1900))
# print(leap_year(2016))
# print(leap_year(2017))
# print(leap_year(2000))
"""
-----------------------------------------------------------------------
Question 3:
Write a function with loops that computes the sum of all squares between
1 and n (inclusive).
-----------------------------------------------------------------------
"""
public class
{
public static void main(String []args ) {
Scanner reader = new Scanner(System.in);
int n = 1;
int sum = 0;
while (n <= 100) {
n = (n*n);
n++;
sum = (sum + n);
}
System.out.println(sum);
}
}
def sum_squares(n):
pass
# When you've completed your function, uncomment the
# following lines and run this file to test!
# print(sum_squares(1))
# print(sum_squares(100))
| UTF-8 | Python | false | false | 2,965 | py | 1 | quiz.py | 1 | 0.527487 | 0.495784 | 0 | 114 | 24.008772 | 90 |
sreekanthpv/mypythonworks | 14,568,529,069,848 | 119196649e9a3fc21ded40f6a36a3d025391e36f | ddcc93b5b59e1a0d36a5b2ce6b55f13e7239e0f3 | /advanced_python_test/q11.py | eae8dd7702181d75c60f6991fcbcdb9fda12d9cc | []
| no_license | https://github.com/sreekanthpv/mypythonworks | a449d8a0d0c9dc1973e03ef627c54dfc1b151fb1 | c141ab99627bf7a082621329dfb2546da651bd6b | refs/heads/master | 2023-07-10T13:12:15.011257 | 2021-08-01T14:40:15 | 2021-08-01T14:40:15 | 385,128,323 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 11. Write a Python program to find the sequences of one upper case letter followed by lower case letters?
import re
x='[A-Z]{1}[a-z]+'
a=input('enter string')
v=re.fullmatch(x,a)
if v is not None:
print('valid')
else:
print('invalid') | UTF-8 | Python | false | false | 245 | py | 218 | q11.py | 198 | 0.685714 | 0.673469 | 0 | 10 | 23.6 | 107 |
ORNL-CEES/thermochimica | 15,736,760,175,553 | 316ff92bfeb434c91d029d1fd83d0e24f070b535 | 0ca9620ce0091d826e5cc76bd3c2c6b441445373 | /python/binaryPhaseDiagramGUI.py | b7af42f153a6676dce4e97ed5f80b42db6a6481b | [
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
]
| permissive | https://github.com/ORNL-CEES/thermochimica | 42b8b5c7ffcceebc37acf4f146290ada93d098e1 | 7cda2eaae5cf73b67c76a3e7602df0f83d8a5611 | refs/heads/master | 2023-08-08T20:36:20.457000 | 2023-07-21T20:55:32 | 2023-07-21T20:55:32 | 105,085,437 | 48 | 28 | BSD-3-Clause | false | 2023-07-21T20:55:34 | 2017-09-28T01:02:02 | 2023-03-29T07:49:05 | 2023-07-21T20:55:32 | 12,083 | 36 | 21 | 6 | Fortran | false | false | import binaryPhaseDiagramFunctions
import PySimpleGUI as sg
import os
import sys
import pickle
import copy
import matplotlib.pyplot as plt
import numpy as np
import thermoToolsGUI
class CalculationWindow:
def __init__(self, parent, datafile, nElements, elements, active):
self.parent = parent
self.datafile = datafile
self.nElements = nElements
self.elements = elements
self.makeLayout()
self.sgw = sg.Window(f'Phase Diagram Setup: {os.path.basename(self.datafile)}', self.layout, location = [400,0], finalize=True)
windowList.append(self)
self.children = []
self.calculation = binaryPhaseDiagramFunctions.diagram(self.datafile, True, True)
self.macro = []
self.macroSaveName = 'macroPhaseDiagram.py'
def close(self):
for child in self.children:
child.close()
for fig in self.calculation.figureList:
plt.close(fig=fig)
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED or event == 'Exit':
self.close()
elif event =='Run':
cancelRun = False
grid_density = 10
try:
tempstep = int(values['-grid_density-'])
if tempstep >= 0:
grid_density = tempstep
except:
pass
if grid_density > 200:
cancelRun = True
confirmLayout = [[sg.Text('The selected calculation is large and may take some time.')],[sg.Button('Continue'), sg.Button('Cancel')]]
confirmWindow = sg.Window('Large calculation confirmation', confirmLayout, location = [400,0], finalize=True, keep_on_top = True)
while True:
event, values = confirmWindow.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED or event == 'Cancel':
break
elif event == 'Continue':
cancelRun = False
break
confirmWindow.close()
self.calculation.makeBackup()
pressure = 1
try:
tempPress = float(values['-pressure-'])
if 1e-6 < tempPress < 1e6:
pressure = float(values['-pressure-'])
except:
pass
tunit = values['-tunit-']
punit = values['-punit-']
tlo = 300
try:
templo = float(values['-temperature-'])
if 295 <= templo <= 6000:
tlo = templo
except:
pass
thi = 1000
try:
temphi = float(values['-endtemperature-'])
if 295 <= temphi <= 6000:
thi = temphi
except:
pass
el1 = values['-el1-']
el2 = values['-el2-']
try:
if (str(el1) == str(el2)) or (float(tlo) == float(thi)):
cancelRun = True
repeatLayout = [[sg.Text('Values cannot be equal.')],[sg.Button('Cancel')]]
repeatWindow = sg.Window('Repeat value notification', repeatLayout, location = [400,0], finalize=True, keep_on_top = True)
while True:
event, values = repeatWindow.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED or event == 'Cancel':
break
repeatWindow.close()
return
except ValueError:
errorLayout = [[sg.Text('Invalid value detected.')],[sg.Button('Cancel')]]
errorWindow = sg.Window('Invalid value notification', errorLayout, location = [400,0], finalize=True, keep_on_top = True)
while True:
event, values = errorWindow.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED or event == 'Cancel':
break
errorWindow.close()
return
if not cancelRun:
self.calculation.run(grid_density,grid_density,pressure,tunit,punit,0,1,tlo,thi,el1,el2,'moles',fuzzy=values["-fuzzy-"])
self.calculation.makePlot()
self.sgw.Element('Refine').Update(disabled = False)
self.sgw.Element('Auto Refine').Update(disabled = False)
self.sgw.Element('Auto Smoothen').Update(disabled = False)
self.sgw.Element('Add Label').Update(disabled = False)
self.sgw.Element('Auto Label').Update(disabled = False)
self.sgw.Element('Plot').Update(disabled = False)
self.sgw.Element('Undo').Update(disabled = False)
self.sgw.Element('Inspect').Update(disabled = False)
self.sgw.Element('Export Diagram Data').Update(disabled = False)
self.sgw.Element('Export Plot').Update(disabled = False)
self.macro.append(f'macroPD.run({grid_density},{grid_density},{pressure},"{tunit}","{punit}",{0},{1},{tlo},{thi},"{el1}","{el2}","moles",fuzzy={values["-fuzzy-"]})')
elif event =='Refine':
refineWindow = RefineWindow(self)
self.children.append(refineWindow)
elif event =='Auto Refine':
self.calculation.makeBackup()
self.calculation.refinery()
self.calculation.makePlot()
self.macro.append('macroPD.makeBackup()')
self.macro.append('macroPD.refinery()')
elif event =='Auto Smoothen':
self.calculation.makeBackup()
self.sgw.Element('Undo').Update(disabled = False)
self.calculation.autoSmooth()
self.calculation.makePlot()
self.macro.append('macroPD.makeBackup()')
self.macro.append('macroPD.autoSmooth()')
elif event =='Add Label':
labelWindow = LabelWindow(self)
self.children.append(labelWindow)
elif event =='Auto Label':
self.calculation.makeBackup()
self.calculation.autoLabel()
self.calculation.makePlot()
self.sgw.Element('Remove Label').Update(disabled = False)
self.macro.append('macroPD.makeBackup()')
self.macro.append('macroPD.autoLabel()')
elif event =='Remove Label':
removeWindow = RemoveWindow(self)
self.children.append(removeWindow)
elif event =='Plot':
self.calculation.makePlot()
elif event =='Export Plot':
exportStatus = self.calculation.exportPlot()
if exportStatus:
errorLayout = [[sg.Text('The export failed, try changing plot settings.')],[sg.Button('Continue'), sg.Button('Cancel')]]
errorWindow = sg.Window('Plot export failed', errorLayout, location = [400,0], finalize=True, keep_on_top = True)
while True:
event, values = errorWindow.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED or event == 'Continue':
break
errorWindow.close()
else:
self.macro.append(f'macroPD.makePlot()')
self.macro.append(f'macroPD.exportPlot()')
elif event =='Plot Settings':
settingsWindow = SettingsWindow(self)
self.children.append(settingsWindow)
elif event =='Undo':
for fig in self.calculation.figureList:
plt.close(fig=fig)
backup = copy.deepcopy(self.calculation.backup)
self.calculation = self.calculation.backup
self.calculation.backup = backup
self.macro.append('backup = copy.deepcopy(macroPD.backup)')
self.macro.append('macroPD = macroPD.backup')
self.macro.append('macroPD.backup = backup')
self.calculation.makePlot()
self.sgw.Element('Refine').Update(disabled = False)
self.sgw.Element('Auto Refine').Update(disabled = False)
self.sgw.Element('Auto Smoothen').Update(disabled = False)
self.sgw.Element('Add Label').Update(disabled = False)
self.sgw.Element('Auto Label').Update(disabled = False)
self.sgw.Element('Plot').Update(disabled = False)
if len(self.calculation.labels) > 0:
self.sgw.Element('Remove Label').Update(disabled = False)
elif event =='Add Data':
self.calculation.makeBackup()
addDataWindow = thermoToolsGUI.PhaseDiagramAddDataWindow(self,windowList)
self.children.append(addDataWindow)
elif event =='Inspect':
self.calculation.makeBackup()
inspectWindow = InspectWindow(self)
self.children.append(inspectWindow)
elif event =='Export Diagram Data':
saveDataWindow = SaveDataWindow(self)
self.children.append(saveDataWindow)
elif event =='Load Diagram':
self.calculation.makeBackup()
loadDataWindow = LoadDataWindow(self)
self.children.append(loadDataWindow)
elif event =='Clear Macro':
self.macro = []
elif event =='Export Macro':
with open('python/' + self.macroSaveName, 'w') as f:
f.write('import binaryPhaseDiagramFunctions\n')
f.write('import copy\n')
f.write(f'macroPD = binaryPhaseDiagramFunctions.diagram("{self.datafile}", False, False)\n')
for command in self.macro:
f.write(f'{command}\n')
f.write('macroPD.makePlot()\n')
elif event =='Run Macro':
if 'macroPhaseDiagram' in sys.modules:
del sys.modules['macroPhaseDiagram']
import macroPhaseDiagram
self.calculation = macroPhaseDiagram.macroPD
self.calculation.active = True
self.calculation.interactivePlot = True
elif event =='Macro Settings':
macroSettingsWindow = thermoToolsGUI.PhaseDiagramMacroSettingsWindow(self,windowList)
self.children.append(macroSettingsWindow)
def makeLayout(self):
elSelectLayout = [sg.Column([[sg.Text('Element 1')],[sg.Combo(self.elements[:self.nElements],default_value=self.elements[0],key='-el1-')]],vertical_alignment='t'),
sg.Column([[sg.Text('Element 2')],[sg.Combo(self.elements[:self.nElements],default_value=self.elements[1],key='-el2-')]],vertical_alignment='t')]
# xLayout = [sg.Column([[sg.Text('Start Element 2 Concentration')],[sg.Input(key='-xlo-',size=(thermoToolsGUI.inputSize,1))],
# [sg.Text('Concentration unit')],[sg.Combo(['mole fraction'],default_value='mole fraction',key='-munit-')]],vertical_alignment='t'),
# sg.Column([[sg.Text('End Element 2 Concentration')],[sg.Input(key='-xhi-',size=(thermoToolsGUI.inputSize,1))]],vertical_alignment='t'),
# sg.Column([[sg.Text('# of steps')],[sg.Input(key='-nxstep-',size=(8,1))]],vertical_alignment='t')]
tempLayout = [sg.Column([[sg.Text('Minimum Temperature')],[sg.Input(key='-temperature-',size=(thermoToolsGUI.inputSize,1))],
[sg.Text('Temperature unit')],[sg.Combo(['K', 'C', 'F'],default_value='K',key='-tunit-')]],vertical_alignment='t'),
sg.Column([[sg.Text('Maximum Temperature')],[sg.Input(key='-endtemperature-',size=(thermoToolsGUI.inputSize,1))]],vertical_alignment='t')]
presLayout = [sg.Column([[sg.Text('Pressure')],[sg.Input(key='-pressure-',size=(thermoToolsGUI.inputSize,1))],
[sg.Text('Pressure unit')],[sg.Combo(['atm', 'Pa', 'bar'],default_value='atm',key='-punit-')]],vertical_alignment='t')]
densityLayout = [sg.Column([[sg.Text('Initial grid density')],[sg.Input(key='-grid_density-',size=(8,1))],
[sg.Checkbox('Use fuzzy stoichiometry',key='-fuzzy-')]],vertical_alignment='t')]
buttonLayout = [
sg.Column([[sg.Button('Run', size = thermoToolsGUI.buttonSize)],
[sg.Button('Undo', disabled = True, size = thermoToolsGUI.buttonSize)],
[sg.Exit(size = thermoToolsGUI.buttonSize)],
[sg.Button('Add Data', size = thermoToolsGUI.buttonSize)],
[sg.Button('Macro Settings', size = thermoToolsGUI.buttonSize)]],vertical_alignment='t'),
sg.Column([[sg.Button('Refine', disabled = True, size = thermoToolsGUI.buttonSize)],
[sg.Button('Auto Refine', disabled = True, size = thermoToolsGUI.buttonSize)],
[sg.Button('Auto Smoothen', disabled = True, size = thermoToolsGUI.buttonSize)],
[sg.Button('Inspect', disabled = True, size = thermoToolsGUI.buttonSize)],
[sg.Button('Run Macro', size = thermoToolsGUI.buttonSize)]],vertical_alignment='t'),
sg.Column([[sg.Button('Add Label', disabled = True, size = thermoToolsGUI.buttonSize)],
[sg.Button('Auto Label', disabled = True, size = thermoToolsGUI.buttonSize)],
[sg.Button('Remove Label', disabled = True, size = thermoToolsGUI.buttonSize)],
[sg.Button('Load Diagram', size = thermoToolsGUI.buttonSize)],
[sg.Button('Export Macro', size = thermoToolsGUI.buttonSize)]],vertical_alignment='t'),
sg.Column([[sg.Button('Plot', disabled = True, size = thermoToolsGUI.buttonSize)],
[sg.Button('Export Plot', disabled = True, size = thermoToolsGUI.buttonSize)],
[sg.Button('Plot Settings', size = thermoToolsGUI.buttonSize)],
[sg.Button('Export Diagram Data', disabled = True, size = thermoToolsGUI.buttonSize)],
[sg.Button('Clear Macro', size = thermoToolsGUI.buttonSize)]],vertical_alignment='t')
]
self.layout = [elSelectLayout,tempLayout,presLayout,densityLayout,buttonLayout]
class RefineWindow:
def __init__(self, parent):
self.parent = parent
windowList.append(self)
xRefLayout = [sg.Column([[sg.Text('Start Concentration')],[sg.Input(key='-xlor-',size=(thermoToolsGUI.inputSize,1))]],vertical_alignment='t'),
sg.Column([[sg.Text('End Concentration')],[sg.Input(key='-xhir-',size=(thermoToolsGUI.inputSize,1))]],vertical_alignment='t'),
sg.Column([[sg.Text('# of steps')],[sg.Input(key='-nxstepr-',size=(8,1))]],vertical_alignment='t')]
tempRefLayout = [sg.Column([[sg.Text('Minimum Temperature')],[sg.Input(key='-temperaturer-',size=(thermoToolsGUI.inputSize,1))]],vertical_alignment='t'),
sg.Column([[sg.Text('Maximum Temperature')],[sg.Input(key='-endtemperaturer-',size=(thermoToolsGUI.inputSize,1))]],vertical_alignment='t'),
sg.Column([[sg.Text('# of steps',key='-tsteplabel-')],[sg.Input(key='-ntstepr-',size=(8,1))]],vertical_alignment='t')]
refineLayout = [xRefLayout,tempRefLayout,[sg.Button('Refine'), sg.Button('Cancel')]]
self.sgw = sg.Window('Phase diagram refinement', refineLayout, location = [400,0], finalize=True)
self.children = []
def close(self):
for child in self.children:
child.close()
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED or event == 'Cancel':
self.close()
elif event =='Refine':
cancelRun = False
ntstep = 10
try:
tempstep = int(values['-ntstepr-'])
if tempstep >= 0:
ntstep = tempstep
except:
pass
nxstep = 10
try:
tempstep = int(values['-nxstepr-'])
if tempstep >= 0:
nxstep = tempstep
except:
pass
if (float(ntstep) * float(nxstep)) > 50000:
cancelRun = True
confirmLayout = [[sg.Text('The selected calculation is large and may take some time.')],[sg.Button('Continue'), sg.Button('Cancel')]]
confirmWindow = sg.Window('Large calculation confirmation', confirmLayout, location = [400,0], finalize=True, keep_on_top = True)
while True:
event, values = confirmWindow.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED or event == 'Cancel':
break
elif event == 'Continue':
cancelRun = False
break
confirmWindow.close()
xlo = 0
try:
templo = float(values['-xlor-'])
if 0 <= templo <= 1:
xlo = templo
except:
pass
xhi = 1
try:
temphi = float(values['-xhir-'])
if 0 <= temphi <= 1:
xhi = temphi
except:
pass
tlo = 300
try:
templo = float(values['-temperaturer-'])
if 295 <= templo <= 6000:
tlo = templo
except:
pass
thi = 1000
try:
temphi = float(values['-endtemperaturer-'])
if 295 <= temphi <= 6000:
thi = temphi
except:
pass
if not cancelRun:
self.parent.calculation.makeBackup()
self.parent.sgw.Element('Undo').Update(disabled = False)
self.parent.calculation.writeInputFile(xlo,xhi,nxstep,tlo,thi,ntstep)
self.parent.calculation.runCalc()
self.parent.calculation.makePlot()
self.parent.macro.append('macroPD.makeBackup()')
self.parent.macro.append(f'macroPD.writeInputFile({xlo},{xhi},{nxstep},{tlo},{thi},{ntstep})')
self.parent.macro.append('macroPD.runCalc()')
class LabelWindow:
def __init__(self, parent):
self.parent = parent
windowList.append(self)
xLabLayout = [[sg.Text('Element 2 Concentration')],[sg.Input(key='-xlab-',size=(thermoToolsGUI.inputSize,1))]]
tLabLayout = [[sg.Text('Temperature')],[sg.Input(key='-tlab-',size=(thermoToolsGUI.inputSize,1))]]
labelLayout = [xLabLayout,tLabLayout,[sg.Button('Add Label'), sg.Button('Cancel')]]
self.sgw = sg.Window('Add phase label', labelLayout, location = [400,0], finalize=True)
self.children = []
def close(self):
for child in self.children:
child.close()
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED or event == 'Cancel':
self.close()
elif event =='Add Label':
try:
try:
xlab = float(values['-xlab-'])
except ValueError:
num, den = values['-xlab-'].split('/')
xlab = float(num)/float(den)
tlab = float(values['-tlab-'])
if (0 <= xlab <= 1) and (295 <= tlab <= 6000):
self.parent.calculation.makeBackup()
self.parent.sgw.Element('Undo').Update(disabled = False)
self.parent.calculation.addLabel(xlab,tlab)
self.parent.calculation.makePlot()
self.parent.sgw.Element('Remove Label').Update(disabled = False)
self.parent.macro.append('macroPD.makeBackup()')
self.parent.macro.append(f'macroPD.addLabel({xlab},{tlab})')
except:
pass
class RemoveWindow:
def __init__(self, parent):
self.parent = parent
windowList.append(self)
headingsLayout = [[sg.Text('Label Text', size = [55,1],justification='left'),
sg.Text('Concentration',size = [15,1],justification='center'),
sg.Text('Temperature', size = [15,1],justification='center'),
sg.Text('Remove Label?',size = [15,1])]]
labelListLayout = []
for i in range(len(self.parent.calculation.labels)):
labelListLayout.append([[sg.Text(self.parent.calculation.labels[i][1],size = [55,1],justification='left'),
sg.Text("{:.3f}".format(float(self.parent.calculation.labels[i][0][0])),size = [15,1],justification='center'),
sg.Text("{:.0f}".format(float(self.parent.calculation.labels[i][0][1])),size = [15,1],justification='center'),
sg.Checkbox('',key='-removeLabel'+str(i)+'-',pad=[[40,0],[0,0]])]])
removeLayout = [headingsLayout,labelListLayout,[sg.Button('Remove Label(s)'), sg.Button('Cancel')]]
self.sgw = sg.Window('Remove phase label', removeLayout, location = [400,0], finalize=True)
self.children = []
def close(self):
for child in self.children:
child.close()
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED or event == 'Cancel':
self.close()
if event == 'Remove Label(s)':
self.parent.calculation.makeBackup()
self.parent.macro.append('macroPD.makeBackup()')
self.parent.sgw.Element('Undo').Update(disabled = False)
tempLength = len(self.parent.calculation.labels)
for i in reversed(range(tempLength)):
try:
if values['-removeLabel'+str(i)+'-']:
del self.parent.calculation.labels[i]
self.parent.macro.append(f'del macroPD.labels[{i}]')
except KeyError:
# If a new label was created since this window was opened, this will occur
continue
if len(self.parent.calculation.labels) == 0:
self.parent.sgw.Element('Remove Label').Update(disabled = True)
self.parent.calculation.makePlot()
self.close()
class SettingsWindow:
def __init__(self, parent):
self.parent = parent
windowList.append(self)
if self.parent.calculation.plotMarker == '-':
line = True
point = False
both = False
elif self.parent.calculation.plotMarker == '.':
line = False
point = True
both = False
else:
line = False
point = False
both = True
if self.parent.calculation.plotColor == 'colorful':
colorful = True
bland = False
else:
colorful = False
bland = True
if self.parent.calculation.experimentColor == 'colorful':
expcolorful = True
expbland = False
else:
expcolorful = False
expbland = True
settingsLayout = [[sg.Text('Marker Style:')],
[sg.Radio('Lines', 'mstyle', default=line, enable_events=True, key='-mline-')],
[sg.Radio('Points','mstyle', default=point, enable_events=True, key='-mpoint-')],
[sg.Radio('Both', 'mstyle', default=both, enable_events=True, key='-mboth-')],
[sg.Text('Plot Colors:')],
[sg.Radio('Colorful', 'mcolor', default=colorful, enable_events=True, key='-mcolorful-')],
[sg.Radio('Black', 'mcolor', default=bland, enable_events=True, key='-mbland-')],
[sg.Text('Experimental Data Colors:')],
[sg.Radio('Colorful', 'mexpcolor', default=expcolorful, enable_events=True, key='-mexpcolorful-')],
[sg.Radio('Black', 'mexpcolor', default=expbland, enable_events=True, key='-mexpbland-')],
[sg.Text('Show:')],
[sg.Checkbox('Experimental Data', default=self.parent.calculation.showExperiment, key='-showExperiment-'),
sg.Checkbox('Loaded Diagram', default=self.parent.calculation.showLoaded, key='-showLoaded-')],
[sg.Text('Auto-Label Settings:')],
[sg.Checkbox('1-Phase Regions', default=self.parent.calculation.label1phase, key='-label1phase-'),
sg.Checkbox('2-Phase Regions', default=self.parent.calculation.label2phase, key='-label2phase-')],
[sg.Text('Export Filename'),sg.Input(key='-filename-',size=(thermoToolsGUI.inputSize,1))],
[sg.Text('Export Format'),sg.Combo(['png', 'pdf', 'ps', 'eps', 'svg'],default_value='png',key='-format-')],
[sg.Text('Export DPI'),sg.Input(key='-dpi-',size=(thermoToolsGUI.inputSize,1))],
[sg.Button('Accept')]]
self.sgw = sg.Window('Plot Settings', settingsLayout, location = [400,0], finalize=True)
self.children = []
def close(self):
# Log settings in macro before closing
self.parent.macro.append(f'macroPD.plotMarker = "{self.parent.calculation.plotMarker}"')
self.parent.macro.append(f'macroPD.plotColor = "{self.parent.calculation.plotColor}"')
self.parent.macro.append(f'macroPD.experimentColor = "{self.parent.calculation.experimentColor}"')
self.parent.macro.append(f'macroPD.showExperiment = {self.parent.calculation.showExperiment}')
self.parent.macro.append(f'macroPD.exportFileName = "{self.parent.calculation.exportFileName}"')
self.parent.macro.append(f'macroPD.exportFormat = "{self.parent.calculation.exportFormat}"')
self.parent.macro.append(f'macroPD.exportDPI = {self.parent.calculation.exportDPI}')
self.parent.macro.append(f'macroPD.showLoaded = {self.parent.calculation.showLoaded}')
self.parent.macro.append(f'macroPD.label1phase = {self.parent.calculation.label1phase}')
self.parent.macro.append(f'macroPD.label2phase = {self.parent.calculation.label2phase}')
for child in self.children:
child.close()
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED:
self.close()
elif event == '-mline-':
self.parent.calculation.plotMarker = '-'
elif event =='-mpoint-':
self.parent.calculation.plotMarker = '.'
elif event =='-mboth-':
self.parent.calculation.plotMarker = '.-'
elif event =='-mcolorful-':
self.parent.calculation.plotColor = 'colorful'
elif event =='-mbland-':
self.parent.calculation.plotColor = 'bland'
elif event =='-mexpcolorful-':
self.parent.calculation.experimentColor = 'colorful'
elif event =='-mexpbland-':
self.parent.calculation.experimentColor = 'bland'
elif event =='Accept':
self.parent.calculation.showExperiment = values['-showExperiment-']
self.parent.calculation.showLoaded = values['-showLoaded-']
self.parent.calculation.label1phase = values['-label1phase-']
self.parent.calculation.label2phase = values['-label2phase-']
try:
if str(values['-filename-']) != '':
self.parent.calculation.exportFileName = str(values['-filename-'])
except:
pass
self.parent.calculation.exportFormat = values['-format-']
try:
tempDPI = int(values['-dpi-'])
if tempDPI > 0 > 10000:
self.parent.calculation.exportDPI = int(values['-dpi-'])
except:
pass
self.parent.calculation.makePlot()
self.close()
class InspectWindow:
def __init__(self,parent):
self.parent = parent
windowList.append(self)
dataColumn = [
[sg.Text('Data Points')],
[sg.Listbox(values=[], enable_events=True, size=(30, 50), key='-dataList-')]
]
outputColumn = [
[sg.Text('Calculation Details')],
[sg.Multiline(key='-details-', size=(50,10), no_scrollbar=True)],
[sg.Text(key = '-status-')],
[sg.Button('Toggle Active/Suppressed Status', disabled = True)],
[sg.Text('Filter points', font='underline')],
[sg.Text('Temperature Range:')],
[sg.Input(key='-tfilterlow-',size=(thermoToolsGUI.inputSize,1)),sg.Input(key='-tfilterhi-',size=(thermoToolsGUI.inputSize,1))],
[sg.Text(f'{self.parent.calculation.el2} Concentration Range:')],
[sg.Input(key='-xfilterlow-',size=(thermoToolsGUI.inputSize,1)),sg.Input(key='-xfilterhi-',size=(thermoToolsGUI.inputSize,1))],
[sg.Text('Contains Phases:')],
[sg.Combo(['']+self.parent.calculation.phases, key = '-pfilter1-'),sg.Combo(['']+self.parent.calculation.phases, key = '-pfilter2-')],
[sg.Text('Active/Suppressed Status:')],
[sg.Combo(['','Active','Suppressed'], key = '-activefilter-')],
[sg.Button('Apply Filter')]
]
self.data = [[i, f'{self.parent.calculation.ts[i]:6.2f} K {self.parent.calculation.x1[i]:4.3f} {self.parent.calculation.x2[i]:4.3f}'] for i in range(len(self.parent.calculation.ts))]
self.sgw = sg.Window('Data inspection',
[[sg.Pane([
sg.Column(dataColumn, element_justification='l', expand_x=True, expand_y=True),
sg.Column(outputColumn, element_justification='c', expand_x=True, expand_y=True)
], orientation='h', k='-PANE-')]],
location = [0,0], finalize=True)
self.sgw['-dataList-'].update(self.data)
self.children = []
self.index = -1
def close(self):
for child in self.children:
child.close()
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED or event == 'Exit':
self.close()
elif event == '-dataList-':
self.index = self.parent.calculation.pointIndex[values['-dataList-'][0][0]]
self.sgw['-details-'].update(self.parent.calculation.pointDetails[self.index])
self.sgw['Toggle Active/Suppressed Status'].update(disabled = False)
self.sgw['-status-'].update(f'{"Suppressed" if self.parent.calculation.suppressed[self.index] else "Active"}')
elif event == 'Toggle Active/Suppressed Status':
if self.index >= 0:
self.parent.calculation.suppressed[self.index] = not(self.parent.calculation.suppressed[self.index])
self.parent.macro.append(f'macroPD.suppressed[{self.index}] = not(macroPD.suppressed[{self.index}])')
self.sgw['-status-'].update(f'{"Suppressed" if self.parent.calculation.suppressed[self.index] else "Active"}')
elif event == 'Apply Filter':
tlo = -np.Inf
thi = np.Inf
xlo = -np.Inf
xhi = np.Inf
try:
tlo = float(values['-tfilterlow-'])
except:
pass
try:
thi = float(values['-tfilterhi-'])
except:
pass
try:
xlo = float(values['-xfilterlow-'])
except:
pass
try:
xhi = float(values['-xfilterhi-'])
except:
pass
self.data = []
for i in range(len(self.parent.calculation.ts)):
# Check temperature
tfilt = tlo <= self.parent.calculation.ts[i] and thi >= self.parent.calculation.ts[i]
# Check concentration
xfilt = (xlo <= self.parent.calculation.x1[i] and xhi >= self.parent.calculation.x1[i]) or (xlo <= self.parent.calculation.x2[i] and xhi >= self.parent.calculation.x2[i])
# Check phases present
pfilt = (values['-pfilter1-'] == '' or values['-pfilter1-'] == self.parent.calculation.p1[i] or values['-pfilter1-'] == self.parent.calculation.p2[i]) and (values['-pfilter2-'] == '' or values['-pfilter2-'] == self.parent.calculation.p1[i] or values['-pfilter2-'] == self.parent.calculation.p2[i])
# Check active/suppressed status
afilt = (values['-activefilter-'] == '') or ((values['-activefilter-'] == 'Suppressed') == self.parent.calculation.suppressed[self.parent.calculation.pointIndex[i]])
# If all filters pass, add to display list
if tfilt and xfilt and pfilt and afilt:
self.data.append([i, f'{self.parent.calculation.ts[i]:6.2f} K {self.parent.calculation.x1[i]:4.3f} {self.parent.calculation.x2[i]:4.3f}'])
self.sgw['-dataList-'].update(self.data)
class SaveData(object):
def __init__(self,ts,x1,x2,boundaries,phases,b,x0data,x1data,mint,maxt):
self.ts = ts
self.x1 = x1
self.x2 = x2
self.boundaries = boundaries
self.phases = phases
self.b = b
self.x0data = x0data
self.x1data = x1data
self.mint = mint
self.maxt = maxt
class SaveDataWindow:
def __init__(self, parent):
self.parent = parent
windowList.append(self)
self.children = []
layout = [[sg.Input(key='-saveName-',size=(thermoToolsGUI.inputSize,1)), sg.Text('.pkl')],
[sg.Button('Save'), sg.Button('Cancel')]]
self.sgw = sg.Window('Save Diagram Data', layout, location = [400,0], finalize=True)
def close(self):
for child in self.children:
child.close()
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED or event == 'Cancel':
self.close()
elif event =='Save':
try:
tempName = str(values['-saveName-'])
if not tempName == '':
self.parent.calculation.saveDataName = tempName
except:
pass
saveData = SaveData(self.parent.calculation.ts,
self.parent.calculation.x1,
self.parent.calculation.x2,
self.parent.calculation.boundaries,
self.parent.calculation.phases,
self.parent.calculation.b,
self.parent.calculation.x0data,
self.parent.calculation.x1data,
self.parent.calculation.mint,
self.parent.calculation.maxt)
with open('outputs/'+self.parent.calculation.saveDataName+'.pkl','wb') as outp:
pickle.dump(saveData, outp, pickle.HIGHEST_PROTOCOL)
self.close()
class LoadDataWindow:
def __init__(self,parent):
self.parent = parent
windowList.append(self)
file_list_column = [
[
sg.Text("Phase Diagram Data Folder"),
sg.In(size=(25, 1), enable_events=True, key="-FOLDER-"),
sg.FolderBrowse(),
],
[
sg.Listbox(
values=[], enable_events=True, size=(40, 20), key="-FILE LIST-"
)
],
]
self.folder = os.getcwd() + '/outputs'
try:
file_list = os.listdir(self.folder)
except:
file_list = []
fnames = [
f
for f in file_list
if os.path.isfile(os.path.join(self.folder, f))
and f.lower().endswith((".pkl"))
]
fnames = sorted(fnames, key=str.lower)
self.sgw = sg.Window('Phase diagram data selection', file_list_column, location = [0,0], finalize=True)
self.sgw["-FILE LIST-"].update(fnames)
self.children = []
def close(self):
for child in self.children:
child.close()
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED or event == 'Exit':
self.close()
elif event == "-FOLDER-":
self.folder = values["-FOLDER-"]
try:
file_list = os.listdir(self.folder)
except:
file_list = []
fnames = [
f
for f in file_list
if os.path.isfile(os.path.join(self.folder, f))
and f.lower().endswith((".pkl"))
]
fnames = sorted(fnames, key=str.lower)
self.sgw["-FILE LIST-"].update(fnames)
elif event == "-FILE LIST-": # A file was chosen from the listbox
filename = values["-FILE LIST-"][0]
datafile = os.path.join(self.folder, filename)
with open(datafile, 'rb') as inp:
self.parent.calculation.loadedDiagram = pickle.load(inp)
self.parent.calculation.loaded = True
self.close()
if not(os.path.isfile('bin/InputScriptMode')):
errorLayout = [[sg.Text('No Thermochimica executable available.')],
[sg.Text('Either Thermochimica has not been built (run make),')],
[sg.Text('or this script was not executed from Thermochimica root directory.')],
[sg.Button('Exit')]]
errorWindow = sg.Window('Thermochimica Error Message', errorLayout, location = [0,0], finalize=True, keep_on_top = True)
while True:
event, values = errorWindow.read(timeout=thermoToolsGUI.timeout)
if event == sg.WIN_CLOSED or event == 'Exit':
break
errorWindow.close()
sys.exit()
windowList = []
dataWindow = thermoToolsGUI.DataWindow(windowList,CalculationWindow,thermoToolsGUI.DatFileParse)
while len(windowList) > 0:
for window in windowList:
window.read()
| UTF-8 | Python | false | false | 39,689 | py | 388 | binaryPhaseDiagramGUI.py | 21 | 0.545995 | 0.538361 | 0 | 754 | 51.637931 | 313 |
robsondejesus1996/Python-Graph-Matplotlib | 17,721,035,082,806 | 1ceb1abe332d58d0ac68829da2c35729858bb82c | 846e346449118574838ea8bfb574907b0d8f089f | /Projeto Gráficos/graficos/teoriaAprendizagem.py | afdb6a140b84d584dd8599707f6d22652f25e384 | []
| no_license | https://github.com/robsondejesus1996/Python-Graph-Matplotlib | 08987b6267c227895e3893d4556de99aca350594 | 87d17166d6e8fe6d536bf6bff721ca2e4def02df | refs/heads/master | 2020-06-18T17:39:10.079229 | 2019-07-11T14:42:18 | 2019-07-11T14:42:18 | 196,384,507 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from numpy.core.tests.test_scalarinherit import A
N = 18
#Citações para gráfico doidão
#Zero = (10,4,5,5,9,21,8)
#Um = (1,6,2,2,3,1,0)
#Citação gráfico cumulativo
ABP = (0,0,0,0,0,0,0,0,0,0,0,0,0,2,5,6,7,7)
C = (1,1,1,1,1,1,2,2,3,9,9,9,9,12,12,15,17,18)
CT = (0,0,1,1,4,5,9,11,12,23,26,35,43,53,73,91,100,109)
#DC =(0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,2)
#DD = (0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,3)
DT = (0,0,0,0,0,0,0,0,0,1,4,8,8,12,15,15,15,15)
#ID = (0,0,0,0,0,0,0,0,0,0,0,1,1,1,3,3,3,4)
LB = (0,0,1,1,1,1,1,3,3,4,7,8,12,19,24,27,30,36)
nenhuma = (0,0,0,1,1,1,1,1,2,2,3,3,4,4,4,6,7,7)
PC = (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7,9)
PS = (0,1,1,1,1,1,1,1,1,1,1,2,2,3,3,3,3,3)
#RF = (0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,2)
#TL = (0,0,0,0,0,0,0,0,0,0,0,0,1,1,2,2,2,3)
ind = np.arange(N) # the x locations for the groups
width = 0.6 # the width of the bars
#Definir padrões
plt.plot(CT,marker='o',linestyle='-',color='#D8BFD8',label='Construtivismo (CT)')
plt.plot(LB,marker='*',linestyle='-',color='#4B0082',label='Experimental (LB)')
plt.plot(C,marker='*',linestyle='-',color='#DC143C',label='Construcionismo (C)')
plt.plot(DT,marker='X',linestyle='-',color='#F0E68C',label='Demonstrativo (DT)')
plt.plot(PC,marker='s',linestyle='-',color='#FFA500',label='Pensamento computacional (PC)')
plt.plot(nenhuma,marker='o',linestyle='-',color='red',label='Nenhuma')
#plt.plot(ID,marker='.',linestyle='-',color='#FF1493',label='Indutivo (ID)')
#plt.plot(TL,marker='.',linestyle='-',color='#B0E0E6',label='Teaching Learning (TL)')
plt.plot(ABP,marker='.',linestyle='-',color='#000000',label='Abordagem Baseada em Problemas (ABP)')
#plt.plot(DC,marker='s',linestyle='-',color='#000080',label=' Directiva (DC)')
#plt.plot(DD,marker='+',linestyle='-',color='#006400',label='Dedutiva (DD)')
plt.plot(PS,marker='+',linestyle='-',color='#7FFFD4',label='Solução de Problemas (PS)')
#plt.plot(RF,marker='X',linestyle='-',color='#8B4513',label='Reflexivo (RF)')
plt.legend()
plt.xticks(ind, ('2001','2002', '2003', '2004', '2005', '2006', '2007','2008','2009','2010','2011','2012','2013','2014','2015','2016','2017','2018'))
plt.xticks(rotation=90)
#plt.yscale('log')
#plt.show()
plt.savefig('teoriaAprendizagem.pdf')
#plt.close() | UTF-8 | Python | false | false | 2,283 | py | 18 | teoriaAprendizagem.py | 18 | 0.628245 | 0.450066 | 0 | 55 | 40.345455 | 149 |
Thib-G/cs50project3 | 1,941,325,265,061 | a8c49096b4a416b4a36415a060893fbdf59b6161 | dbfc0e0fc2067bf21269dd0a83a529bae238a011 | /orders/views.py | bec5ab11225b6914ce9c0f87217418326d36c3d3 | []
| no_license | https://github.com/Thib-G/cs50project3 | 351cd117752beeeddf57a63f36be6fc1dfae2243 | cf1b2a0efb520805ac77e3b157c85ff7ea374821 | refs/heads/master | 2021-06-21T05:32:27.493974 | 2020-12-21T16:36:33 | 2020-12-21T16:36:33 | 144,454,723 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views.decorators.http import require_http_methods
from django.views import generic
from .models import Category, Pricing, Cart, CartItem
# Create your views here.
class IndexView(generic.ListView):
template_name = 'orders/index.html'
context_object_name = 'categories'
def get_queryset(self):
"""Return the categories sorted by order_nr"""
return Category.objects.order_by('order_nr')
class CartView(generic.DetailView):
model = Cart
def get_object(self):
if not self.request.session.has_key('cart_id'):
return None
cart_id = self.request.session['cart_id']
return Cart.objects.get(pk=cart_id)
@require_http_methods(["POST"])
def add_to_cart(request):
pricing_id = int(request.POST['pricing_id'])
pricing_item = Pricing.objects.get(pk=pricing_id)
if request.session.has_key('cart_id'):
cart_id = request.session['cart_id']
cart = Cart.objects.get(pk=cart_id)
cart.cartitem_set.add(CartItem(pricing_item=pricing_item), bulk=False)
cart.save()
else:
cart = Cart()
cart.save()
cart.cartitem_set.add(CartItem(pricing_item=pricing_item), bulk=False) # pylint: disable=E1101
cart.save()
request.session['cart_id'] = cart.pk
return HttpResponseRedirect(reverse('orders:cart'))
@require_http_methods(["POST"])
def delete_cart(request):
cart_id = request.session['cart_id']
cart = Cart.objects.get(pk=cart_id)
cart.is_deleted = True
cart.save()
del request.session['cart_id']
return HttpResponseRedirect(reverse('orders:cart'))
| UTF-8 | Python | false | false | 1,713 | py | 15 | views.py | 10 | 0.672504 | 0.670169 | 0 | 56 | 29.589286 | 102 |
ecoon/watershed-workflow | 11,201,274,712,406 | d647bd7aefb53ae782a14d110e94c3d4c3d325e9 | 96c9db91b4296ea8957b037e234e3230ed67067f | /watershed_workflow/sources/manager_soilgrids_2017.py | bdb285974eaa577f8c67b49671859e35a577ff97 | [
"BSD-3-Clause"
]
| permissive | https://github.com/ecoon/watershed-workflow | 8458b1a3ea52c3dcec7746b265c88016fd380968 | 148540168f43049618360907ce2ea648f62b9d8b | refs/heads/master | 2022-10-04T04:48:19.476978 | 2022-09-19T13:15:10 | 2022-09-19T13:15:10 | 211,087,561 | 34 | 15 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Manager for downloading SoilGrids250m-2017 products."""
import os, sys
import logging
import numpy as np
import shapely
import rasterio
import rasterio.mask
import watershed_workflow.sources.utils as source_utils
import watershed_workflow.sources.names
import watershed_workflow.warp
from watershed_workflow.sources.manager_raster import FileManagerRaster
class FileManagerSoilGrids2017:
"""SoilGrids 250m (2017) datasets.
SoilGrids 2017 maintains, to date, the only complete
characterization of all soil properties needed for a hydrologic
model. The resolution is decent, and the accuracy is ok, but most
importantly it is complete.
.. [SoilGrids2017] https://www.isric.org/explore/soilgrids/faq-soilgrids-2017
.. [hengl2014soilgrids] Hengl, Tomislav, et al. "SoilGrids1km—global soil information based on automated mapping." PloS one 9.8 (2014): e105992.
.. [hengl2017soilgrids] Hengl, Tomislav, et al. "SoilGrids250m: Global gridded soil information based on machine learning." PLoS one 12.2 (2017): e0169748.
See the above link for a complete listing of potential variable
names; included here are a subset used by this code. That said,
any 2017 filename can be used with this source manager.
.. list-table::
:widths: 25 25 75
* - name
- units
- description
* - BDTICM
- :math:`cm`
- Absolute depth to continuous, unfractured bedrock.
* - BLDFIE
- :math:`kg m^-3`
- Bulk density of fine earth
* - CLYPPT
- :math:`%`
- percent clay
* - SLTPPT
- :math:`%`
- percent silt
* - SNDPPT
- :math:`%`
- percent sand
* - WWP
- :math:`%`
- Soil water capacity % at wilting point
"""
URL = "https://files.isric.org/soilgrids/former/2017-03-10/data/"
DEPTHS = [0, 0.05, 0.15, 0.3, 0.6, 1.0, 2.0]
def __init__(self, variant=None):
if variant == 'US':
self.name = 'SoilGrids2017_US'
self.names = watershed_workflow.sources.names.Names(
self.name, 'soil_structure', self.name, '{variable}_M_{soillevel}250m_ll_us.tif')
else:
self.name = 'SoilGrids2017'
self.names = watershed_workflow.sources.names.Names(
self.name, 'soil_structure', self.name, '{variable}_M_{soillevel}250m_ll.tif')
def get_raster(self, shply, crs, variable, layer=None, force_download=False):
"""Download and read a raster for this shape, clipping to the shape.
Parameters
----------
shply : fiona or shapely shape or bounds
Shape to provide bounds of the raster.
crs : CRS
CRS of the shape.
force_download : bool, optional
Download or re-download the file if true.
Returns
-------
profile : rasterio profile
Profile of the raster.
raster : np.ndarray
Array containing the elevation data.
Note that the raster provided is in SoilGrids native CRS
(which is in the rasterio profile), not the shape's CRS.
"""
# download (or hopefully don't) the file
filename, profile = self._download(variable, layer)
logging.info(f"CRS: {profile['crs']}")
# load the raster
logging.info(f"filename: {filename}")
manager = FileManagerRaster(filename)
return manager.get_raster(shply, crs)
def get_depth_to_bedrock(self, shply, crs, force_download=False):
return self.get_raster(shply, crs, 'BDTICM', None, force_download)
def get_soil_texture(self, shply, crs, layer, force_download=False):
rasters = []
if layer == -1:
layer = 7
for i, variable in enumerate(['SNDPPT', 'SLTPPT', 'CLYPPT']):
prof, raster = self.get_raster(shply, crs, variable, layer, force_download)
rasters.append(raster)
rasters = np.array(rasters)
return prof, rasters
def get_all_soil_texture(self, shply, crs, force_download=False):
rasters = []
for layer in range(1, 8):
prof, raster = self.get_soil_texture(shply, crs, layer, force_download)
rasters.append(raster)
rasters = np.array(rasters)
return prof, rasters
def get_bulk_density(self, shply, crs, layer, force_download=False):
if layer == -1:
layer = 7
return self.get_raster(shply, crs, 'BLDFIE', layer, force_download)
def get_all_bulk_density(self, shply, crs, force_download=False):
rasters = []
for layer in range(1, 8):
prof, raster = self.get_bulk_density(shply, crs, layer, force_download)
rasters.append(raster)
rasters = np.array(rasters)
return prof, rasters
def get_layer7(self, shply, crs, force_download=False):
data = dict()
prof, data['bulk density [kg m^-3]'] = self.get_bulk_density(shply, crs, -1, force_download)
_, data['texture [%]'] = self.get_soil_texture(shply, crs, -1, force_download)
_, data['depth to bedrock [cm]'] = self.get_depth_to_bedrock(shply, crs, force_download)
return prof, data
def get_all(self, shply, crs, force_download=False):
data = dict()
prof, data['bulk density [kg m^-3]'] = self.get_all_bulk_density(shply, crs, force_download)
_, data['texture [%]'] = self.get_all_soil_texture(shply, crs, force_download)
_, data['depth to bedrock [cm]'] = self.get_depth_to_bedrock(shply, crs, force_download)
return prof, data
def _download(self, variable, layer=None, force=False):
"""Downloads individual files via direct download."""
os.makedirs(self.names.folder_name(), exist_ok=True)
if layer is None:
soillevel = ''
else:
soillevel = f'sl{layer}_'
filename = self.names.file_name(variable=variable, soillevel=soillevel)
# download file
filename_base = self.names.file_name_base(variable=variable, soillevel=soillevel)
url = self.URL + filename_base
source_utils.download(url, filename, force)
# return raster profile
with rasterio.open(filename, 'r') as fid:
profile = fid.profile
return filename, profile
| UTF-8 | Python | false | false | 6,412 | py | 135 | manager_soilgrids_2017.py | 80 | 0.614197 | 0.594072 | 0 | 170 | 36.705882 | 159 |
mrs-eload/bybyepp-test | 309,237,695,480 | b33ba2b002461789eeddbf80eb60000fc295853a | 0bcc82e17bdab466caf1a83683537a4b178c9250 | /app/test/fixtures.py | 8e9ed09c86ff66e1a02b53e82573c98ad2d1dec4 | []
| no_license | https://github.com/mrs-eload/bybyepp-test | 2f83d42e8274d056ae96fbd06e76f0f9d1a42750 | 594f1dfdb152ee573adcac189f9571ed296a66af | refs/heads/master | 2023-01-03T11:37:39.024937 | 2020-10-21T15:27:58 | 2020-10-21T15:27:58 | 281,674,651 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import json
import random
import logging
from app.models.Structure import Structure
from app.models.Ligand import Ligand
from app.services.RedisService import redis_service
logger = logging.getLogger("uvicorn")
def load_fixtures():
logger.info("Loading structures fixtures ...")
codes = ["5dls", "1uyd", "2po6", "4bdj"]
redis = redis_service.connect("3decision_data",0)
json_data = {}
json_data['structures'] = {}
for code in codes:
logger.info(("Saving into redis", code))
structure = Structure()
structure.id = random.randint(1,100000)
structure.name = code + ' structure'
structure.external_code = code
json_data['structures'][code] = structure.json()
redis.hmset('structures', json_data['structures'])
logger.info(type(json_data))
logger.info(json_data)
| UTF-8 | Python | false | false | 861 | py | 16 | fixtures.py | 14 | 0.670151 | 0.653891 | 0 | 31 | 26.774194 | 56 |
pynucastro/pynucastro | 1,683,627,204,682 | 686dab1702c925a75766f57287e1c5769b8b78f9 | c491226c6aaa5729cb4361897b3a92a304317343 | /examples/triple-alpha/triple-alpha-cxx.py | 98bfb2c7a9090872268daecffd9764759de8f4b1 | [
"BSD-3-Clause"
]
| permissive | https://github.com/pynucastro/pynucastro | 0a319fa23cf76faf0059876011251cc83263c29d | 0b07fc46b3f69cf2065164105f95d0ca854e1526 | refs/heads/main | 2023-08-31T06:58:42.274848 | 2023-08-24T18:55:36 | 2023-08-24T18:55:36 | 47,992,908 | 33 | 29 | BSD-3-Clause | false | 2023-09-12T16:46:08 | 2015-12-14T18:18:03 | 2023-08-21T21:11:06 | 2023-09-12T16:46:06 | 99,962 | 41 | 25 | 43 | Python | false | false | # triple-alpha rate module generator
from pynucastro.networks import AmrexAstroCxxNetwork
files = ["c12-gaa-he4-fy05",
"he4-aag-c12-fy05"]
triple_alpha_net = AmrexAstroCxxNetwork(files)
triple_alpha_net.write_network()
| UTF-8 | Python | false | false | 232 | py | 192 | triple-alpha-cxx.py | 99 | 0.75 | 0.706897 | 0 | 9 | 24.666667 | 52 |
Mbarley/python_mud | 11,905,649,344,695 | 1996697dd71e1b28fb12b9d4d5ddc2142c4bcc08 | 071a34eb5ba13f44f96eff6f44a9e13be209da29 | /Command/Drop.py | 72ecc11ffbb65b63090fdf3746b95c78ae960f75 | []
| no_license | https://github.com/Mbarley/python_mud | bc01d72b64e2313c871e24096670f1b8921d2f2a | 9eaa3ca3edcc9d39ccb2f4cf64fe1c78da3e783a | refs/heads/master | 2020-12-03T03:31:13.510092 | 2012-10-03T20:33:18 | 2012-10-03T20:33:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Event.Event import Event
from Command import Command
import Engine.ActorEngine
class Drop(Command):
def __init__(self):
Command.__init__(self)
def execute(self, source, args):
if args == None or len(args) == 0:
args = ['']
if len(args) == 1:
args.append('')
dropEvent = Event()
dropEvent.attributes['signature'] = 'actor_attempted_item_drop'
dropEvent.attributes['data']['itemName'] = args[0]
dropEvent.attributes['data']['args'] = args[1:]
dropEvent.attributes['data']['actor'] = source
Engine.ActorEngine.emitEvent(dropEvent) | UTF-8 | Python | false | false | 584 | py | 103 | Drop.py | 86 | 0.655822 | 0.648973 | 0 | 23 | 24.434783 | 67 |
khahux/PersonalWebsite | 8,048,768,737,113 | 58b494c9e3ce82eb868e47ab29f125085846ca81 | 5a72406a7a10f78b2b3fd8c7c8bddbf97d0b2cf5 | /create_tables.py | 189b5f733e053032caf1da0f82f1907eee283e12 | [
"MIT"
]
| permissive | https://github.com/khahux/PersonalWebsite | 4e34f5cad8b7f654679c1355c7ed9948eabc8f55 | 1d1d667b5490b2fa3d993f44cca072ae7f93c014 | refs/heads/master | 2016-03-22T09:01:40.819063 | 2015-06-06T22:17:08 | 2015-06-06T22:17:08 | 33,525,370 | 1 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
# email: khahux@163.com
from models.base import db
from models.user import User
from models.blog import Blog, BlogTags, BlogCategory
from models.tags import Tags
from models.category import Category
from models.note import Note
from models.word import Word
from models.record import AccessRecord
def create_tables():
tables = [User, Blog, BlogTags, BlogCategory,
Tags, Category, Note, Word, AccessRecord]
for table in tables:
if table.table_exists():
table.drop_table()
db.create_table(table)
if __name__ == '__main__':
create_tables() | UTF-8 | Python | false | false | 610 | py | 103 | create_tables.py | 42 | 0.695082 | 0.688525 | 0 | 24 | 24.458333 | 55 |
jihunchoi/skip-thought-pytorch | 10,411,000,773,768 | b5b94959b8e989de021f0b3ef986ab946c17bfff | 569d8444a16f1884bd78901f02bc2b2bfe8640fb | /models/decoders.py | fd7b42248e6bb0c5f9501462379a1c05f9efc79f | []
| no_license | https://github.com/jihunchoi/skip-thought-pytorch | 70ac311fcaafbef8b245678d498a0fb1eb0874d9 | fb6ccd7b75c8f1bae0e72e649b254a80551b543c | refs/heads/master | 2021-05-07T03:30:01.611375 | 2017-11-17T12:23:02 | 2017-11-17T12:23:02 | 110,930,709 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from torch import nn
from torch.autograd import Variable
from torch.nn import init
class RecurrentDecoder(nn.Module):
def __init__(self, rnn_type, num_words, word_dim, hidden_dim,
num_layers, dropout_prob):
super().__init__()
self.rnn_type = rnn_type
self.num_words = num_words
self.word_dim = word_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_prob = dropout_prob
self.dropout = nn.Dropout(dropout_prob)
self.word_embedding = nn.Embedding(num_embeddings=num_words,
embedding_dim=word_dim)
if rnn_type == 'gru':
self.rnn = nn.GRU(
input_size=word_dim, hidden_size=hidden_dim,
num_layers=num_layers, dropout=dropout_prob)
elif rnn_type == 'lstm':
self.rnn = nn.LSTM(
input_size=word_dim, hidden_size=hidden_dim,
num_layers=num_layers, dropout=dropout_prob)
else:
raise ValueError('Unknown RNN type!')
self.output_linear = nn.Linear(in_features=hidden_dim,
out_features=num_words)
self.reset_parameters()
def reset_parameters(self):
init.normal(self.word_embedding.weight.data, mean=0, std=0.01)
for i in range(self.num_layers):
weight_ih = getattr(self.rnn, f'weight_ih_l{i}')
weight_hh = getattr(self.rnn, f'weight_hh_l{i}')
bias_ih = getattr(self.rnn, f'bias_ih_l{i}')
bias_hh = getattr(self.rnn, f'bias_hh_l{i}')
init.orthogonal(weight_hh.data)
init.kaiming_normal(weight_ih.data)
init.constant(bias_ih.data, val=0)
init.constant(bias_hh.data, val=0)
if self.rnn_type == 'lstm': # Set initial forget bias to 1
bias_ih.data.chunk(4)[1].fill_(1)
init.kaiming_normal(self.output_linear.weight.data)
init.constant(self.output_linear.bias.data, val=0)
def forward(self, words, prev_state):
"""
Args:
words (Variable): A long variable of size
(length, batch_size) that contains indices of words.
prev_state (Variable): The previous state of the decoder.
Returns:
logits (Variable): A float variable containing unnormalized
log probabilities. It has the same size as words.
state (Variable): The current state of the decoder.
"""
words_emb = self.word_embedding(words)
words_emb = self.dropout(words_emb)
rnn_outputs, rnn_state = self.rnn(input=words_emb, hx=prev_state)
logits = self.output_linear(rnn_outputs)
return logits, rnn_state
| UTF-8 | Python | false | false | 2,801 | py | 11 | decoders.py | 10 | 0.576937 | 0.57301 | 0 | 68 | 40.191176 | 73 |
aldew5/CCC-Solutions | 16,303,695,902,383 | ec9370943d599e2e1de485ace422ab367df943ae | 66e81c9587dc17e7c99823e13dab1573aa76ebb0 | /2013/J3.py | 588db9bbb02ef1d00327c335b7ea267f18c26aa4 | []
| no_license | https://github.com/aldew5/CCC-Solutions | a104f2d0cf2f7c46718a0852be0ef3f40679b4a1 | b4c4565cd26d6ea830ddbf0f4563a824f25e4625 | refs/heads/master | 2022-04-12T09:15:31.846229 | 2020-04-04T15:11:54 | 2020-04-04T15:11:54 | 225,699,845 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # "From 1987 to 2013" J3 2013
# Alec Dewulf
# January 14, 2019
year = int(input())
numbers = []
double_num = 0
# infinite loop
while True:
year += 1
year = str(year)
# append to a list of new ints or mark it as a double
for i in year:
if i not in numbers:
numbers.append(i)
elif i in numbers:
double_num += 1
if double_num == 0:
# this is a perfect year
break
else:
# reset the variables
numbers = []
double_num = 0
# so year can be added onto
year = int(year)
# output results
print(year)
| UTF-8 | Python | false | false | 626 | py | 35 | J3.py | 34 | 0.538339 | 0.5 | 0 | 34 | 17.264706 | 57 |
Anton-L-GitHub/Learning | 8,693,013,813,271 | ad3e33bd05bb57d2858bddd54c9937ff0093166e | 85958019ffb79d74efd9198386ab149c1116dccd | /Python/1_PROJECTS/Python_bok/Prov (övningar)/Kap11/ovn11-5.py | c2ef3e9084ec94a4901a4531d2b1192c7e8fd1f7 | []
| no_license | https://github.com/Anton-L-GitHub/Learning | b70e5324c578a05bca5478bf8629d046ba30fabb | 7a685cc05590ca276acb3627eef3a2144d8b6c61 | refs/heads/master | 2023-08-13T21:33:40.154733 | 2021-09-15T21:32:51 | 2021-09-15T21:32:51 | 293,071,776 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sys import argv
assert len(argv) == 4
txt = argv[3]
with open(argv[1], 'r') as f1, open(argv[2], 'w') as f2:
for r in f1:
if r.find(txt) >= 0:
f2.write(r) | UTF-8 | Python | false | false | 183 | py | 219 | ovn11-5.py | 206 | 0.535519 | 0.486339 | 0 | 7 | 25.285714 | 56 |
slamj1/hostthedocs | 3,307,124,865,878 | 30969477c935e40c3a9dc0adb65695ddc402fe12 | 027d7cf528ade48c2feca29ff2e4de9fcc5bebc3 | /hostthedocs/util.py | 18a1205cf98c188a0f968d7a18846d0618975cbf | [
"MIT"
]
| permissive | https://github.com/slamj1/hostthedocs | d14550a9b73640f9ec1ee5ae73c20e7da2faa763 | e9910819e3c184e8f9de97a9c2317d8de453c0c9 | refs/heads/master | 2021-05-01T01:49:29.092942 | 2016-09-19T05:39:14 | 2016-09-19T05:39:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Provides utility methods.
"""
def get_filestream_from_request(request):
"""
Extract the file-stream of the first file object from
a :class:`werkzeug.wrappers.BaseRequest`.
:param werkzeug.wrappers.BaseRequest request: the `werkzeug` request
:return: the file-stream of the first file within the request
:raises ValueError: if no files exist within the request
"""
try:
return list(request.files.values())[0].stream
except IndexError:
raise ValueError('Request does not contain uploaded file')
| UTF-8 | Python | false | false | 552 | py | 2 | util.py | 2 | 0.701087 | 0.699275 | 0 | 18 | 29.666667 | 72 |
xyleroo/py-sma-modbus2 | 14,783,277,482,993 | 0faeb2e2140f45053cbacdff9e1af728fd14855f | 16474d4e0462607eb5c7247a942ec049bc697a2c | /format_unit.py | 113137d3e598404f8b2963cac3b8d1a7bf7cf3c0 | []
| no_license | https://github.com/xyleroo/py-sma-modbus2 | 4162f70144c9f8800dbeaf829d9912cd607be597 | 6d6727e6acba2c4f0ba75aa6401b685d90442708 | refs/heads/master | 2023-08-29T03:24:59.331904 | 2021-10-24T21:40:12 | 2021-10-24T21:40:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
SI_PREFIX = {
-18 : {'multiplier' : 10 ** 18, 'prefix' : 'a'},
-17 : {'multiplier' : 10 ** 18, 'prefix' : 'a'},
-16 : {'multiplier' : 10 ** 18, 'prefix' : 'a'},
-15 : {'multiplier' : 10 ** 15, 'prefix' : 'f'},
-14 : {'multiplier' : 10 ** 15, 'prefix' : 'f'},
-13 : {'multiplier' : 10 ** 15, 'prefix' : 'f'},
-12 : {'multiplier' : 10 ** 12, 'prefix' : 'p'},
-11 : {'multiplier' : 10 ** 12, 'prefix' : 'p'},
-10 : {'multiplier' : 10 ** 12, 'prefix' : 'p'},
-9 : {'multiplier' : 10 ** 9, 'prefix' : 'n'},
-8 : {'multiplier' : 10 ** 9, 'prefix' : 'n'},
-7 : {'multiplier' : 10 ** 9, 'prefix' : 'n'},
-6 : {'multiplier' : 10 ** 6, 'prefix' : 'µ'},
-5 : {'multiplier' : 10 ** 6, 'prefix' : 'µ'},
-4 : {'multiplier' : 10 ** 6, 'prefix' : 'µ'},
-3 : {'multiplier' : 10 ** 3, 'prefix' : 'm'},
-2 : {'multiplier' : 10 ** 3, 'prefix' : 'm'},
-1 : {'multiplier' : 10 ** 3, 'prefix' : 'm'},
0 : {'multiplier' : 1, 'prefix' : ''},
1 : {'multiplier' : 1, 'prefix' : ''},
2 : {'multiplier' : 1, 'prefix' : ''},
3 : {'multiplier' : 10 ** -3, 'prefix' : 'k'},
4 : {'multiplier' : 10 ** -3, 'prefix' : 'k'},
5 : {'multiplier' : 10 ** -3, 'prefix' : 'k'},
6 : {'multiplier' : 10 ** -6, 'prefix' : 'M'},
7 : {'multiplier' : 10 ** -6, 'prefix' : 'M'},
8 : {'multiplier' : 10 ** -6, 'prefix' : 'M'},
9 : {'multiplier' : 10 ** -9, 'prefix' : 'G'},
10 : {'multiplier' : 10 ** -9, 'prefix' : 'G'},
11 : {'multiplier' : 10 ** -9, 'prefix' : 'G'},
12 : {'multiplier' : 10 ** -12, 'prefix' : 'T'},
13 : {'multiplier' : 10 ** -12, 'prefix' : 'T'},
14 : {'multiplier' : 10 ** -12, 'prefix' : 'T'},
15 : {'multiplier' : 10 ** -15, 'prefix' : 'P'},
16 : {'multiplier' : 10 ** -15, 'prefix' : 'P'},
17 : {'multiplier' : 10 ** -15, 'prefix' : 'P'},
18 : {'multiplier' : 10 ** -18, 'prefix' : 'E'},
}
# some units are not useful with prefix!
NO_Format = {"%","°C","h","kWh","ms","MWh","s",""}
def convertNumberToNumberWithPrefix(number):
if number == 0:
return [number,'']
exponent = math.floor(math.log10(math.fabs( number)));
exponent = max(min(exponent,18),-18)
return [number * SI_PREFIX[exponent]['multiplier'], SI_PREFIX[exponent]['prefix']];
def formatWithPrefix(number, precision:int =0, unit=""):
if unit not in NO_Format:
n ,p = convertNumberToNumberWithPrefix(number)
else:
n=number
p=''
return f"{n:.{precision}f} {p}{unit}"
# tests
if __name__ == '__main__':
print(
formatWithPrefix(1.189404E+022),
formatWithPrefix(-4.07237500000000E+007),
formatWithPrefix(1.943596E-005,2,"F"),
formatWithPrefix(1),
formatWithPrefix(0.1),
formatWithPrefix(0.001,3,"A"),
formatWithPrefix(0.002),
formatWithPrefix(0.0011),
formatWithPrefix(0.000999,2),
formatWithPrefix(5),
formatWithPrefix(10),
formatWithPrefix(100),
formatWithPrefix(1000),
formatWithPrefix(0),
formatWithPrefix(-0.001),
formatWithPrefix(-0.0011),
formatWithPrefix(-0.000999,1,"V"),
formatWithPrefix(-10),
formatWithPrefix(-100),
formatWithPrefix(-1000)
) | UTF-8 | Python | false | false | 3,379 | py | 14 | format_unit.py | 8 | 0.48563 | 0.401185 | 0 | 84 | 38.202381 | 87 |
mystuart/CatStone-MyCRT | 4,148,938,420,671 | 61ae4257a1e8406076e9dbbb82de4e01f5709648 | 8f7c0bc64773146e81cc1a32c92ac6486a6f1b19 | /mycrt-backend/tests/replay/test_replay.py | 591fa878f49aa50a49cea199864cf86b295412f7 | [
"Apache-2.0"
]
| permissive | https://github.com/mystuart/CatStone-MyCRT | 9bb6f154ceafd14be4cf8a77081301075dab52b5 | 55a28221969dd0db49dd3b8007ccadf09c06202b | refs/heads/master | 2023-03-17T04:43:42.156889 | 2018-06-07T16:14:10 | 2018-06-07T16:14:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/../..'))
from tests.mocking.mockBoto import mockBoto
from tests.mocking.mockUser import MockUser
from src.replay.replay import get_transactions
class TestReplay(unittest.TestCase):
def setUp(self):
self.user = MockUser('my_access_key', 'my_secret_key')
def test_get_transactions(self):
testBoto = mockBoto(0)
response = get_transactions("test_alias", "test_bucket", self.user, testBoto)
expected = {'Error': {'Code': '400', 'Message': 'Generic Error'}}
self.assertEqual(response, expected)
if __name__ == '__main__':
unittest.main() | UTF-8 | Python | false | false | 687 | py | 75 | test_replay.py | 56 | 0.6754 | 0.669578 | 0 | 22 | 30.272727 | 85 |
firefly707/nebpyclient | 8,804,682,994,993 | 7d3b000525f929c86d35e9ab254be78bc6541f4e | aec330b4e49e2e689aec3c73ac98bde20f450069 | /nebpyclient/api/tokens.py | 663eb423c8c6dcc01dc9915b809ebb7c09348f93 | [
"MIT"
]
| permissive | https://github.com/firefly707/nebpyclient | 91afbebdc3213e1252884f59936c9b190072e6b9 | 8ea044096bd18aaccbfb81eca4e26ec29895a18c | refs/heads/master | 2023-07-12T11:22:56.401434 | 2021-08-31T23:42:34 | 2021-08-31T23:42:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# Copyright 2021 Nebulon, Inc.
# All Rights Reserved.
#
# DISCLAIMER: THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO
# EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
# OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import requests
from .common import read_value
from .issues import Issues
TOKEN_TIMEOUT_SECONDS = 30
"""Timeout for token delivery"""
class MustSendTargetDNS:
"""Used in mutations for on-premises infrastructure via security triangle
Represents a definition of SPUs that a security token needs to be sent to.
"""
def __init__(
self,
response: dict
):
"""Constructs a new MustSendTargetDNS object
This constructor expects a ``dict`` object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__control_port_dns = read_value(
"controlPortDNS", response, str, True)
self.__data_port_dns = read_value(
"dataPortDNS", response, str, True)
@property
def control_port_dns(self) -> str:
"""The DNS name of the SPU's control port"""
return self.__control_port_dns
@property
def data_port_dns(self) -> [str]:
"""List of DNS names of the SPU's data ports"""
return self.__data_port_dns
@staticmethod
def fields():
return [
"controlPortDNS",
"dataPortDNS",
]
class TokenResponse:
"""Used in mutations for on-premises infrastructure via security triangle
Represents a response for a mutation that alters the customers'
on-premises infrastructure and requires the completion of the security
triangle.
"""
def __init__(
self,
response: dict
):
"""Constructs a new TokenResponse object
This constructor expects a ``dict`` object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__token = read_value(
"token", response, str, True)
self.__wait_on = read_value(
"waitOn", response, str, True)
self.__target_ips = read_value(
"targetIPs", response, str, True)
self.__data_target_ips = read_value(
"dataTargetIPs", response, str, False)
self.__must_send_target_dns = read_value(
"mustSendTargetDNS", response, MustSendTargetDNS, False)
self.__issues = read_value(
"issues", response, Issues, False)
@property
def token(self) -> str:
"""Token that needs to be delivered to on-premises SPUs"""
return self.__token
@property
def wait_on(self) -> str:
"""Unique identifier of the resource that is about to be created"""
return self.__wait_on
@property
def target_ips(self) -> [str]:
"""List of control IP addresses of SPUs involved in the mutation"""
return self.__target_ips
@property
def data_target_ips(self) -> [str]:
"""List of data IP addresses of SPUs involved in the mutation"""
return self.__data_target_ips
@property
def must_send_target_dns(self) -> [MustSendTargetDNS]:
"""List of data IP addresses of SPUs involved in the mutation"""
return self.__must_send_target_dns
@property
def issues(self) -> Issues:
"""List of errors and warnings associated with the mutation"""
return self.__issues
@staticmethod
def fields():
return [
"token",
"waitOn",
"targetIPs",
"dataTargetIPs",
"mustSendTargetDNS{%s}" % ",".join(MustSendTargetDNS.fields()),
"issues{%s}" % ",".join(Issues.fields()),
]
def _issue_one_token(
self,
ip: str
) -> any:
url = "https://%s" % ip
try:
response = requests.post(
url=url,
data=self.token,
timeout=TOKEN_TIMEOUT_SECONDS
)
if 200 <= response.status_code < 300:
response_text = response.text.strip()
if response_text == "OK" or response_text == "\"OK\"":
return True
return response.json()
# if we got here, there was an error
reason = response.text
except requests.exceptions.ConnectTimeout:
reason = "request timed out"
print("Failed to deliver token to %s: %s" % (ip, reason))
return False
def deliver_token(self) -> any:
"""Delivers the token to SPUs
For recipe engine v1 requests, a boolean value is returned that
indicates if the request was successful. For recipe v2 requests a dict
is returned that includes ``recipe_id_to_wait_on`` and
``pod_uuid_to_wait_on`` that can be used to query the status of the
recipe and its completion.
:raises Exception: When token delivery failed.
:returns any: The response received from nebulon ON through the proxy
services processing unit (SPU).
"""
# first to must_send_target_dns - need to send to all of them
if self.must_send_target_dns is not None:
for cur in self.must_send_target_dns:
# first send the token to the control port
if self._issue_one_token(cur.control_port_dns):
continue
# if this failed, send the token to the data ports
delivery_success = False
for dp in cur.data_port_dns:
if self._issue_one_token(dp):
delivery_success = True
break
if not delivery_success:
raise Exception("Unable to deliver token to mandatory SPUs")
# second, send the token to one of the remaining SPUs
ips = self.target_ips
if self.data_target_ips is not None:
ips = ips + self.data_target_ips
for ip in ips:
result = self._issue_one_token(ip)
if result:
return result
raise Exception("Unable to deliver token any SPU")
class PodTokenResponse:
"""Response from the server used for nPod related mutations
__This object is deprecated__
Represents a response for a nPod related mutation that alters the customers'
on-premises infrastructure and requires the completion of the security
triangle.
"""
def __init__(
self,
response: dict
):
"""Constructs a new PodTokenResponse object
This constructor expects a ``dict`` object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__token_resp = read_value(
"tokenResp", response, TokenResponse, False)
self.__issues_res = read_value(
"IssuesRes", response, Issues, False)
@property
def token(self) -> TokenResponse:
"""Token that needs to be delivered to on-premises SPUs"""
return self.__token_resp
@property
def issues(self) -> Issues:
"""List of errors and warnings associated with the mutation"""
return self.__issues_res
@staticmethod
def fields():
return [
"tokenResp{%s}" % ",".join(TokenResponse.fields()),
"IssuesRes{%s}" % ",".join(Issues.fields()),
]
| UTF-8 | Python | false | false | 8,443 | py | 44 | tokens.py | 35 | 0.597701 | 0.596042 | 0 | 263 | 31.087452 | 80 |
denisecase/chapstack | 13,907,104,152,283 | 347c64acd1ef5f0c47c912020e9231f7beefec59 | 12e61e591b886433ed7b9eff8d8e9745bffac199 | /scripts/09-files.py | 3eeb6c24473ec29c4afc76fef999feab9b1f5f1a | [
"MIT"
]
| permissive | https://github.com/denisecase/chapstack | 1776ca29c42103857c06160f4c8da03d9fe12875 | ffe006bbbd61853bc7142ae36ed0e09e8a7e9fc1 | refs/heads/main | 2023-01-20T13:09:07.466772 | 2020-11-29T03:31:38 | 2020-11-29T03:31:38 | 316,328,276 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | """09-files.py
This script will show how we can read files for processing with Python.
""" | UTF-8 | Python | false | false | 91 | py | 17 | 09-files.py | 10 | 0.736264 | 0.714286 | 0 | 4 | 22 | 71 |
joanvila/aioredlock | 3,685,081,974,406 | 8e9dd9caf36a7d7b60daba1666aed5878e4d1d2e | 47f45e1dae9addc03df9869a80bc02bdc2468187 | /examples/sentinel.py | e79fe23e740d55199d6f68d8ff21dffcb08c9ba0 | [
"MIT"
]
| permissive | https://github.com/joanvila/aioredlock | d54f6686646222246ef19818a789ca60d0299984 | 685d1b5b08113738083ac8a449cc56b5ce10cee2 | refs/heads/master | 2022-06-20T03:22:49.768666 | 2022-05-19T16:43:27 | 2022-05-19T16:43:27 | 80,053,206 | 271 | 57 | MIT | false | 2022-05-19T16:43:28 | 2017-01-25T20:10:37 | 2022-05-09T12:19:52 | 2022-05-19T16:43:27 | 247 | 233 | 42 | 7 | Python | false | false | """
This example script demonstrates how to use ``aioredlock`` with Sentinels_.
Sentinels are useful when you want to make sure that you are always hitting
the master redis instance in your cluster, even after failover.
In order to run this script, make sure to start the docker-compose setup.
.. code-block:: bash
docker-compose up -d
docker-compose logs -f sentinel # to follow the logs for the sentinel to see the failover
And then in another terminal run the following command to execute this script.
.. code-block:: bash
python -m examples.sentinel
.. note::
If you are running on a Mac, you will need to enable TunTap_ so that the
docker container ip addresses on the bridge are accessible from the mac
host.
.. note::
This example script requires that the ``example`` extras be installed.
.. code-block:: bash
pip install -e .[examples]
.. _Sentinels: https://redis.io/topics/sentinel
.. _TunTap: https://github.com/AlmirKadric-Published/docker-tuntap-osx
"""
import asyncio
import logging
import aiodocker
from aioredlock import Aioredlock, LockError, LockAcquiringError, Sentinel
async def get_container(name):
docker = aiodocker.Docker()
return await docker.containers.get(name)
async def get_container_ip(name, network=None):
container = await get_container(name)
return container['NetworkSettings']['Networks'][network or 'aioredlock_backend']['IPAddress']
async def lock_context():
sentinel_ip = await get_container_ip('aioredlock_sentinel_1')
lock_manager = Aioredlock([
Sentinel('redis://{0}:26379/0?master=leader'.format(sentinel_ip)),
Sentinel('redis://{0}:26379/1?master=leader'.format(sentinel_ip)),
Sentinel('redis://{0}:26379/2?master=leader'.format(sentinel_ip)),
Sentinel('redis://{0}:26379/3?master=leader'.format(sentinel_ip)),
])
if await lock_manager.is_locked("resource"):
print('The resource is already acquired')
try:
# if you dont set your lock's lock_timeout, its lifetime will be automatically extended
async with await lock_manager.lock("resource") as lock:
assert lock.valid is True
assert await lock_manager.is_locked("resource") is True
# pause leader to simulate a failing node and cause a failover
container = await get_container('aioredlock_leader_1')
await container.pause()
# Do your stuff having the lock
await asyncio.sleep(lock_manager.internal_lock_timeout * 2)
# lock manager will extend the lock automatically
assert await lock_manager.is_locked(lock)
# or you can extend your lock's lifetime manually
await lock.extend()
# Do more stuff having the lock and if you spend much more time than you expected, the lock might be freed
await container.unpause()
assert lock.valid is False # lock will be released by context manager
except LockAcquiringError:
print('Something happened during normal operation. We just log it.')
except LockError:
print('Something is really wrong and we prefer to raise the exception')
raise
assert lock.valid is False
assert await lock_manager.is_locked("resource") is False
await lock_manager.destroy()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
loop.run_until_complete(lock_context())
| UTF-8 | Python | false | false | 3,503 | py | 27 | sentinel.py | 18 | 0.687411 | 0.678561 | 0 | 102 | 33.343137 | 118 |
rocky1990/AStar-Pathfinder | 6,820,408,103,418 | fe6e2cdd332c25b9ee49daa8c13bfeccee96cf4a | 32b0f928d9174ef79843edc6c24a69bdd5399ae8 | /pathfinder/heap.py | 4c8f22e077d15b044079dbf8063daa1bc40fe6b3 | []
| no_license | https://github.com/rocky1990/AStar-Pathfinder | c13893daa87801045ee79c932c9c502a73b33c08 | 1affd8ebf57f2a49954c60d12fae41de244a89cf | refs/heads/master | 2021-01-20T04:25:02.052114 | 2011-02-13T13:33:05 | 2011-02-13T13:33:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import heapq
class Heap:
def __init__(self):
self.heap = []
self.set = {}
self.nodes = []
self.free_spaces = []
self.next_free = 0
def len(self):
return len(self.nodes)
def push(self, node):
if node.gridsquare in self.set:
index = self.set[node.gridsquare]
old_node = self.nodes[index]
if node.g_cost < old_node.g_cost:
self.nodes[index] = node
else:
if len(self.free_spaces) == 0:
index = self.next_free
self.next_free += 1
self.nodes.append(node)
else:
index = self.free_spaces.pop()
self.nodes[index] = node
heapq.heappush(self.heap, (node.cost, index))
self.set[node.gridsquare] = index
#removes and returns the item with the smallest key from the heap
def pop(self):
if self.len() <= 0:
return None
else:
(cost, index) = heapq.heappop(self.heap)
self.free_spaces.append(index)
return self.nodes[index]
| UTF-8 | Python | false | false | 1,164 | py | 7 | heap.py | 7 | 0.494845 | 0.491409 | 0 | 40 | 27.925 | 69 |
KatjaSchimmel/Robofont-helpers | 12,429,635,359,863 | 07c774662042c67fa11006373077632f8dc9a35b | 3eca9317d9ef47f1653c0c694c222a86978a9f6d | /draw_guideline_in_middle_of_stems.py | c62e50f08d041c73e508874c3c4208d53e702290 | []
| no_license | https://github.com/KatjaSchimmel/Robofont-helpers | 9a283ab749e4fd942eeca0c55bf1e9910f20e3d5 | 50ea1bdc9074d73851f0f83224fb1658315aa4ad | refs/heads/master | 2020-04-27T05:55:34.970752 | 2020-02-19T10:02:31 | 2020-02-19T10:02:31 | 174,094,505 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # select two points on a stem
# check direction of the contour (horizontal or vertical)
# draw a horizontal or vertical guideline in the middle between those points
font = CurrentFont()
glyph = CurrentGlyph()
points_selected = []
#find coordinates of selected points and add them to list
for contour in glyph:
for seg in contour:
for point in seg:
if point.selected:
points_selected.append((point.x, point.y))
if len(points_selected) == 0:
print('!!! no points selected !!!')
elif len(points_selected) == 1:
print('!!! select one more point !!!')
elif len(points_selected) >= 3:
print('!!! too many points selected !!!')
elif len(points_selected) == 2:
def middle(distance):
return distance / 2
def starting_point(coordinate):
if p1[coordinate] > p2[coordinate]:
sp = p2[coordinate]
elif p2[coordinate] > p1[coordinate]:
sp = p1[coordinate]
else: # points are at the same x value
sp = p2[coordinate]
return sp
def guide_position(p, middle):
return p + middle #adding x or y value of middle to the first point
#assigning coordinates to points
p1 = points_selected[0]
p2 = points_selected[1]
#defining the distance and middle between points
distance_x = abs(p2[0]-p1[0])
distance_y = abs(p2[1]-p1[1])
middle_x = middle(distance_x)
middle_y = middle(distance_y)
#x and y will be used as 'coordinate' and be placed in []
x = 0 # x = 0 because the x-value is the first value of p1 and p2
y = 1 # x = 1 because the y-value is the second value of p1 and p2
p_x = starting_point(x)
p_y = starting_point(y)
guide_x = guide_position(p_x, middle_x)
guide_y = guide_position(p_y, middle_y)
# check area around points (north, east, south, west) to decide contour direction
p1n = (p1[0], p1[1]+1)
p1e = (p1[0] +1, p1[1])
p1s = (p1[0], p1[1]-1)
p1w = (p1[0] -1, p1[1])
p2n = (p2[0], p2[1]+1)
p2e = (p2[0] +1, p2[1])
p2s = (p2[0], p2[1]-1)
p2w = (p2[0] -1, p2[1])
v_guidelines = [] #vertical guidelines
h_guidelines = [] #horizontal guidelines
#adding all existing guidelines to a list
for guidelines in glyph:
guidelines = list(glyph.guidelines)
for guideline in guidelines:
# split x and y coordinates of the guidelines and add them to the list of horizontal or vertical guidelines
v_guidelines.append(guideline.x)
h_guidelines.append(guideline.y)
# draw a horizontal or vertical guideline depending on which points are inside and outside the contour of the glyph
if any (guide_x in v_guidelines for guideline in guidelines): #check list of vertical guidelines if there is already a guideline at this position
print('gibtsscho')
elif glyph.pointInside((p1w)) == True and glyph.pointInside((p1e)) == False and glyph.pointInside((p2e)) == True and glyph.pointInside((p2w)) == False:
print(glyph.name, ', ', 'p1', p1,', p2', p2, ', vertical stem, ', 'width:', distance_x,', middle guide at x', guide_x)
glyph.appendGuideline((guide_x,-100), 90)
elif glyph.pointInside((p1e)) == True and glyph.pointInside((p1w)) == False and glyph.pointInside((p2w)) == True and glyph.pointInside((p2e)) == False:
print(glyph.name, ', ', 'p1', p1,', p2', p2,', vertical stem, ', 'width:', distance_x,', middle guide at x', guide_x)
glyph.appendGuideline((guide_x,-100), 90)
if any (guide_y in h_guidelines for guideline in guidelines): #check list of horizontal guidelines if there is already a guideline at this position
print('gibtsscho')
elif glyph.pointInside((p1n)) == True and glyph.pointInside((p1s)) == False and glyph.pointInside((p2s)) == True and glyph.pointInside((p2n)) == False:
print(glyph.name, ', ', 'p1', p1,', p2', p2, ', horizontal stem, ', 'width:', distance_y,', middle guide at y', guide_y)
glyph.appendGuideline((-100, guide_y), 0)
elif glyph.pointInside((p1s)) == True and glyph.pointInside((p1n)) == False and glyph.pointInside((p2n)) == True and glyph.pointInside((p2s)) == False:
print(glyph.name, ', ', 'p1', p1,', p2', p2, ', horizontal stem, ', 'width:', distance_y,', middle guide at y', guide_y)
glyph.appendGuideline((-100, guide_y), 0)
| UTF-8 | Python | false | false | 4,476 | py | 10 | draw_guideline_in_middle_of_stems.py | 9 | 0.619303 | 0.590259 | 0 | 98 | 44.663265 | 156 |
mlukewizard/3DSegmentation | 5,798,205,894,091 | 4a50a770917fb1065d842057c4cd937e04e28d20 | e4902b146a6d77026944d08c67e155690ac9f6ad | /predict3D.py | 18acdeb7b59c3c6dc7450ffb3504a810913dc40b | []
| no_license | https://github.com/mlukewizard/3DSegmentation | 61e589d579d6b00d983ed08338f4e7d9b0d6f7fa | 5665bc70579c0f288e68bd8b65177e98c177f0e7 | refs/heads/master | 2021-05-06T17:50:09.136491 | 2019-01-18T21:57:25 | 2019-01-18T21:57:25 | 111,852,420 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import print_function
from keras.callbacks import ModelCheckpoint
from keras.models import Model, load_model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, ConvLSTM2D, TimeDistributed, Bidirectional
from keras.optimizers import Adam
from keras import losses
import numpy as np
import h5py
import matplotlib
import matplotlib.pyplot as plt
model_file = '/media/sf_sharedFolder/Models/23rdNov/weights.02-0.03.h5'
x = np.load('/media/sf_sharedFolder/npArrays/39894NS/3DAugment001-002PatientNS_Original.npy')
fig = plt.figure()
#model = load_model(model_file)
npImageArray = np.ndarray((1, 5, 256, 256, 1), dtype='float32')
for i in range(200, 400, 50):
x1 = x[i, 0, :, :, 0]
x2 = x[i, 1, :, :, 0]
x3 = x[i, 2, :, :, 0]
x4 = x[i, 3, :, :, 0]
x5 = x[i, 4, :, :, 0]
npImageArray[0, :, :, :, 0] = x[i, :, :, :, 0]
#y = model.predict(npImageArray)
#y1 = y[0, 2, :, :, 0]
#plt.subplot(121)
#plt.imshow(x3, cmap='gray')
#plt.subplot(122)
#plt.imshow(y1, cmap='gray')
#plt.show()
plt.subplot(151)
plt.imshow(x1, cmap='gray')
plt.subplot(152)
plt.imshow(x2, cmap='gray')
plt.subplot(153)
plt.imshow(x3, cmap='gray')
plt.subplot(154)
plt.imshow(x4, cmap='gray')
plt.subplot(155)
plt.imshow(x5, cmap='gray')
plt.show()
| UTF-8 | Python | false | false | 1,407 | py | 4 | predict3D.py | 3 | 0.617626 | 0.550817 | 0 | 48 | 27.3125 | 123 |
meejah/txtorcon | 5,214,090,310,530 | d076c6de2ca1a44c5ddd67defdc877fce16d69fe | 7341a050b77bba6fd1fe53e0caee6f5d491789ad | /test/test_router.py | 8faa5add821ca4096162b17f70d58fddfb9c139d | [
"MIT"
]
| permissive | https://github.com/meejah/txtorcon | 45b60a0b812cc8e647c00d47c4eedfaf59d6e2fd | 668d829fe061cbc90e74c494c708e59594599794 | refs/heads/main | 2023-08-31T08:31:35.772972 | 2023-08-30T17:36:46 | 2023-08-30T17:36:46 | 3,852,351 | 193 | 66 | MIT | false | 2023-08-30T17:36:47 | 2012-03-28T06:51:27 | 2023-08-24T23:47:23 | 2023-08-30T17:36:46 | 3,900 | 243 | 72 | 46 | Python | false | false | import json
from datetime import datetime
from mock import Mock
from twisted.trial import unittest
from twisted.internet import defer
from twisted.python.failure import Failure
from twisted.web.client import ResponseDone
from txtorcon.router import Router, hexIdFromHash, hashFromHexId
class FakeController(object):
def get_info_raw(self, i):
return defer.succeed('250-ip-to-country/something=XX\r\n250 OK')
class UtilityTests(unittest.TestCase):
def test_hex_converters(self):
self.assertEqual(
hexIdFromHash('AHhuQ8zFQJdT8l42Axxc6m6kNwI'),
'$00786E43CCC5409753F25E36031C5CEA6EA43702'
)
self.assertEqual(
hashFromHexId('$00786E43CCC5409753F25E36031C5CEA6EA43702'),
'AHhuQ8zFQJdT8l42Axxc6m6kNwI'
)
# should work with or without leading $
self.assertEqual(
hexIdFromHash(hashFromHexId('00786E43CCC5409753F25E36031C5CEA6EA43702')),
'$00786E43CCC5409753F25E36031C5CEA6EA43702'
)
class RouterTests(unittest.TestCase):
def test_ctor(self):
controller = object()
router = Router(controller)
router.update("foo",
"AHhuQ8zFQJdT8l42Axxc6m6kNwI",
"MAANkj30tnFvmoh7FsjVFr+cmcs",
"2011-12-16 15:11:34",
"77.183.225.114",
"24051", "24052")
self.assertEqual(
router.id_hex,
"$00786E43CCC5409753F25E36031C5CEA6EA43702"
)
# we assert this twice to cover the cached + uncached cases
self.assertTrue(isinstance(router.modified, datetime))
self.assertTrue(isinstance(router.modified, datetime))
self.assertEqual(router.policy, '')
def test_unique_name(self):
controller = object()
router = Router(controller)
router.update("foo",
"AHhuQ8zFQJdT8l42Axxc6m6kNwI",
"MAANkj30tnFvmoh7FsjVFr+cmcs",
"2011-12-16 15:11:34",
"77.183.225.114",
"24051", "24052")
self.assertEqual(
router.id_hex,
"$00786E43CCC5409753F25E36031C5CEA6EA43702"
)
self.assertEqual(
router.unique_name,
"$00786E43CCC5409753F25E36031C5CEA6EA43702"
)
router.flags = ['Named']
self.assertEqual(router.unique_name, "foo")
def test_flags(self):
controller = object()
router = Router(controller)
router.update("foo",
"AHhuQ8zFQJdT8l42Axxc6m6kNwI",
"MAANkj30tnFvmoh7FsjVFr+cmcs",
"2011-12-16 15:11:34",
"77.183.225.114",
"24051", "24052")
router.flags = "Exit Fast Named Running V2Dir Valid".split()
self.assertEqual(router.name_is_unique, True)
def test_flags_from_string(self):
controller = object()
router = Router(controller)
router.update("foo",
"AHhuQ8zFQJdT8l42Axxc6m6kNwI",
"MAANkj30tnFvmoh7FsjVFr+cmcs",
"2011-12-16 15:11:34",
"77.183.225.114",
"24051", "24052")
router.flags = "Exit Fast Named Running V2Dir Valid"
self.assertEqual(router.name_is_unique, True)
def test_policy_accept(self):
controller = object()
router = Router(controller)
router.update("foo",
"AHhuQ8zFQJdT8l42Axxc6m6kNwI",
"MAANkj30tnFvmoh7FsjVFr+cmcs",
"2011-12-16 15:11:34",
"77.183.225.114",
"24051", "24052")
router.policy = "accept 25,128-256".split()
self.assertTrue(router.accepts_port(25))
for x in range(128, 256):
self.assertTrue(router.accepts_port(x))
self.assertTrue(not router.accepts_port(26))
self.assertEqual(router.policy, 'accept 25,128-256')
def test_policy_reject(self):
controller = object()
router = Router(controller)
router.update("foo",
"AHhuQ8zFQJdT8l42Axxc6m6kNwI",
"MAANkj30tnFvmoh7FsjVFr+cmcs",
"2011-12-16 15:11:34",
"77.183.225.114",
"24051", "24052")
router.policy = "reject 500-600,655,7766".split()
for x in range(1, 500):
self.assertTrue(router.accepts_port(x))
for x in range(500, 601):
self.assertTrue(not router.accepts_port(x))
self.assertEqual(router.policy, 'reject 500-600,655,7766')
def test_countrycode(self):
class CountryCodeController(object):
def get_info_raw(self, i):
return defer.succeed(
'250-ip-to-country/127.1.2.3=ZZ\r\n250 OK'
)
controller = CountryCodeController()
router = Router(controller)
router.update("foo",
"AHhuQ8zFQJdT8l42Axxc6m6kNwI",
"MAANkj30tnFvmoh7FsjVFr+cmcs",
"2011-12-16 15:11:34",
"127.1.2.3",
"24051", "24052")
self.assertEqual(router.location.countrycode, 'ZZ')
@defer.inlineCallbacks
def test_get_location_private(self):
class CountryCodeController(object):
def get_info_raw(self, i):
return defer.succeed(
'250-ip-to-country/192.168.0.1=ZZ\r\n250 OK'
)
controller = CountryCodeController()
r = Router(controller)
r.update('routername', 'deadbeef', 'orhash', 'modified', '192.168.0.1', '', '')
loc0 = yield r.get_location()
loc1 = yield r.get_location()
self.assertEqual(loc0.countrycode, 'ZZ')
self.assertEqual(loc1.countrycode, 'ZZ')
@defer.inlineCallbacks
def test_get_location_something(self):
class CountryCodeController(object):
def get_info_raw(self, i):
return defer.succeed(
'250-ip-to-country/8.8.8.8=US\r\n250 OK'
)
controller = CountryCodeController()
r = Router(controller)
r.update('routername', 'deadbeef', 'orhash', 'modified', '8.8.8.8', '', '')
loc = yield r.get_location()
self.assertNotEqual(loc.countrycode, None)
@defer.inlineCallbacks
def test_get_location_unknown(self):
class CountryCodeController(object):
def get_info_raw(self, i):
raise RuntimeError("shouldn't happen")
controller = CountryCodeController()
r = Router(controller)
loc = yield r.get_location()
self.assertEqual(loc.countrycode, None)
def test_policy_error(self):
router = Router(object())
try:
router.policy = 'foo 123'
self.fail()
except Exception as e:
self.assertTrue("Don't understand" in str(e))
def test_policy_not_set_error(self):
router = Router(object())
try:
router.accepts_port(123)
self.fail()
except Exception as e:
self.assertTrue("policy" in str(e))
def test_repr(self):
router = Router(FakeController())
router.update("foo",
"AHhuQ8zFQJdT8l42Axxc6m6kNwI",
"MAANkj30tnFvmoh7FsjVFr+cmcs",
"2011-12-16 15:11:34",
"1.2.3.4",
"24051", "24052")
router.flags = ['Named']
repr(router)
def test_repr_no_update(self):
router = Router(FakeController())
repr(router)
class OnionOOTests(unittest.TestCase):
def setUp(self):
self.router = Router(FakeController())
self.router.update(
"foo",
"AHhuQ8zFQJdT8l42Axxc6m6kNwI",
"MAANkj30tnFvmoh7FsjVFr+cmcs",
"2011-12-16 15:11:34",
"1.2.3.4",
"24051", "24052"
)
@defer.inlineCallbacks
def test_onionoo_get_fails(self):
agent = Mock()
resp = Mock()
resp.code = 500
agent.request = Mock(return_value=defer.succeed(resp))
with self.assertRaises(Exception) as ctx:
yield self.router.get_onionoo_details(agent)
self.assertTrue(
"Failed to lookup" in str(ctx.exception)
)
@defer.inlineCallbacks
def test_onionoo_success(self):
agent = Mock()
resp = Mock()
resp.code = 200
def feed_response(protocol):
config = {
"relays": [
{
"fingerprint": "00786E43CCC5409753F25E36031C5CEA6EA43702",
},
]
}
protocol.dataReceived(json.dumps(config).encode())
protocol.connectionLost(Failure(ResponseDone()))
resp.deliverBody = Mock(side_effect=feed_response)
agent.request = Mock(return_value=defer.succeed(resp))
data = yield self.router.get_onionoo_details(agent)
self.assertTrue('fingerprint' in data)
self.assertTrue(data['fingerprint'] == "00786E43CCC5409753F25E36031C5CEA6EA43702")
@defer.inlineCallbacks
def test_onionoo_too_many_answers(self):
agent = Mock()
resp = Mock()
resp.code = 200
def feed_response(protocol):
config = {
"relays": [
{
"fingerprint": "00786E43CCC5409753F25E36031C5CEA6EA43702",
},
{
"fingerprint": "boom",
}
]
}
protocol.dataReceived(json.dumps(config).encode())
protocol.connectionLost(Failure(ResponseDone()))
resp.deliverBody = Mock(side_effect=feed_response)
agent.request = Mock(return_value=defer.succeed(resp))
with self.assertRaises(Exception) as ctx:
yield self.router.get_onionoo_details(agent)
self.assertTrue(
"multiple relays for" in str(ctx.exception)
)
@defer.inlineCallbacks
def test_onionoo_wrong_fingerprint(self):
agent = Mock()
resp = Mock()
resp.code = 200
def feed_response(protocol):
config = {
"relays": [
{
"fingerprint": "boom",
},
]
}
protocol.dataReceived(json.dumps(config).encode())
protocol.connectionLost(Failure(ResponseDone()))
resp.deliverBody = Mock(side_effect=feed_response)
agent.request = Mock(return_value=defer.succeed(resp))
with self.assertRaises(Exception) as ctx:
yield self.router.get_onionoo_details(agent)
self.assertTrue(
" but got data for " in str(ctx.exception)
)
| UTF-8 | Python | false | false | 11,110 | py | 183 | test_router.py | 89 | 0.548155 | 0.475338 | 0 | 331 | 32.564955 | 90 |
hdedhiya/skyline | 3,324,304,692,394 | 2d9286f52114ea4efa7a49452e1e3dbd17874c52 | 5140125c9fc1d9bb468fa6c5b43da8206810ce55 | /src/test/python/visualize.py | 134d622bb513a119b846f6fa7d79cf1f668fbbec | []
| no_license | https://github.com/hdedhiya/skyline | 015c0bb70fc822210674ced5edac6585f21922d2 | 6ef78e288c704293b4b8b629a6cdeefc451d0082 | refs/heads/master | 2022-04-11T03:07:17.303214 | 2020-03-23T22:19:01 | 2020-03-23T22:19:01 | 247,558,175 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib.pyplot as plt
import numpy as np
skyline = open('../../../target/skyline/skyline-1.txt', 'r')
points = open('../java/db/dataset1.txt', 'r')
modpoint = [34.78, 19.24] # Point inserted / deleted
p = []
for line in points:
if len(line) != 0:
tokens = line.strip().split(' ')
if len(tokens) == 2:
p.append((float(tokens[0]), float(tokens[1])))
points.close()
s = []
for line in skyline:
if len(line) != 0:
tokens = line.strip().split(' ')
if len(tokens) == 2:
s.append((float(tokens[0]), float(tokens[1])))
skyline.close()
# p = np.append(np.array(p), [modpoint], axis=0) # For insertion
p = np.array(p)
s = np.array(s)
plt.title("Deletion of (34.78, 19.24) Causing Change in Skyline")
plt.scatter(p[:,0], p[:,1])
plt.scatter(s[:,0], s[:,1], c='r')
plt.scatter(modpoint[0], modpoint[1], c='y', marker='x') # Show modified point
plt.show()
plt.close() | UTF-8 | Python | false | false | 935 | py | 6 | visualize.py | 4 | 0.589305 | 0.554011 | 0 | 35 | 25.742857 | 78 |
Hsintien-Ng/age_exp | 10,170,482,581,423 | 9c3993cdce699bf1b7ea5d2e01df2ded63f3d207 | 4e518c51762a95bf8633bf25115832209b77156a | /main.py | 71a61479fb95c3d55d6a8b6713ed6d7149f45a66 | []
| no_license | https://github.com/Hsintien-Ng/age_exp | b4c86bfe76035e335eae05b5cb6bfec3943a23cd | cfde29d44497203a6cb15756980fa9beb4fdfc7a | refs/heads/master | 2021-07-06T14:06:04.682024 | 2020-08-13T03:39:53 | 2020-08-13T03:39:53 | 153,274,842 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from dataset.MORPH import MORPH
from model.plain_cnn_asymmetry import PlainCNN_Asym
from model.plain_cnn_atrous import PlainCNN_Atrous
from model.plain_cnn_asymmetry import plain_parameters_func
from train.calculator.age_mae_calculator import AgeMAECalculator
from train.loss.KL_Divergence import KLDivLoss
from train.loss.focal import FocalLoss
from train.predictor.expection_predictor import ExpectionPredictor
from train.predictor.max_predictor import MaxPredictor
from train.config import Config
from train.trainer import Trainer
from utils.logger import Logger
from utils.model import load_pretrained_func
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def generate_alias(model_cls, task):
assert task == 'Age' or task == 'Exp'
model = model_cls.__name__
from datetime import datetime
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
alias = '{}_{}_{}'.format(task, model, current_time)
return alias
def generate_file_msg(sps, loss, predictor, acc_calc):
sp_descriptor = ''
for sp in sps:
sp_descriptor += '{}:{}\n'.format(sp, sps[sp])
loss_info = '{}:{}\n'.format('loss', loss.get_alias())
predictor_info = '{}:{}\n'.format('predictor', predictor.get_alias())
acc_calc_info = '{}:{}\n'.format('acc_calc', acc_calc.get_alias())
msg = '{}{}{}{}'.format(sp_descriptor, loss_info, predictor_info, acc_calc_info)
return msg
# dir define
index_dir = os.path.join('/', 'home', 'xintian', 'projects', 'age_exp', 'MORPH_Split')
pretrained_model_dir = os.path.join('/', 'home', 'xintian', 'projects', 'age_exp',
'models', 'Age_PlainCNN_Asym_Oct15_23-21-48', 'epoch_28.pkl')
# super param define
sps = {'epoch_num': 20, 'momentum': 0.9, 'weight_decay': 0.0002,
# 'learning_rates': [1e-2, 5e-3, 1e-3, 1e-4, 1e-5],
'learning_rates': [1e-3, 1e-4],
# 'decay_points': [10, 20, 30, 40],
'decay_points': [10],
'batch_size': 64, 'pretrain': True,
'pretrained_model_dir': pretrained_model_dir,
'load_function': load_pretrained_func, 'balance': False}
parameters_func = plain_parameters_func
gpu_id = [0]
# trainer component
#model_cls = PlainCNN_Atrous
model_cls = PlainCNN_Asym
loss = KLDivLoss()
predictor = ExpectionPredictor()
calculator = AgeMAECalculator()
print('dataset')
train_dataset = MORPH(data_dir=index_dir, mode='train', balance=sps['balance'])
print('train complete')
valid_dataset = MORPH(data_dir=index_dir, mode='valid', balance=False)
print('valid complete')
def run(model_cls, loss, predictor, acc_calc, train_dataset, valid_dataset, sps):
# log setting
alias = generate_alias(model_cls, task='Age')
msg = generate_file_msg(sps, loss, predictor, acc_calc)
tb_log_path = os.path.join('runs', alias)
save_dir = os.path.join('models', alias)
logger_alias = alias
config = Config(epoch_num=sps['epoch_num'], momentum=sps['momentum'], weight_decay=sps['weight_decay'],
learning_rates=sps['learning_rates'], decay_points=sps['decay_points'],
batch_size=sps['batch_size'], parameters_func=parameters_func,
tb_log_path=tb_log_path, save_dir=save_dir, pretrain=sps['pretrain'],
pretrained_model_dir=sps['pretrained_model_dir'],
load_function=sps['load_function'], logger_alias=logger_alias, gpu_id=gpu_id)
logger = Logger()
logger.open_file(os.path.join('log'), alias=alias, file_name=alias+'.txt', file_msg=msg)
trainer = Trainer(model_cls=model_cls, loss=loss, predictor=predictor, calculator=calculator,
train_dataset=train_dataset, val_dataset=valid_dataset, config=config, logger=logger)
trainer.train()
logger.close_file(alias)
if __name__ == '__main__':
run(model_cls, loss, predictor, calculator, train_dataset, valid_dataset, sps)
# loss = FocalLoss(2)
# run(model_cls, loss, predictor, calculator, train_dataset, val_dataset, sps)
# loss = FocalLoss(5)
# run(model_cls, loss, predictor, calculator, train_dataset, val_dataset, sps)
| UTF-8 | Python | false | false | 4,146 | py | 42,727 | main.py | 37 | 0.657501 | 0.645683 | 0 | 101 | 40.049505 | 107 |
reusee/defphp | 8,272,107,040,408 | f66984a0577084fec0276992a6ad3d1beb5e299c | c11cac44a0ce73d09eee422aa3911f2e58d97ab8 | /make_html_specification.py | 9602bb91c7547c21e92e47904faec84cf7352960 | []
| no_license | https://github.com/reusee/defphp | a756989491e10b3c67b03146ec3154439fff41a0 | 627d1ccf8db507c11d0d8c99554f0cad9f867150 | refs/heads/master | 2021-01-10T18:54:09.102883 | 2012-03-25T07:29:34 | 2012-03-25T07:29:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import urllib2
import re
url = 'http://dev.w3.org/html5/spec/index.html#attributes-1'
content = urllib2.urlopen(url).read()
start = content.index('Attributes</h3>')
start = content.index('<tbody>', start)
end = content.index('</tbody>', start)
rows = content[start:end].split('<tr>')
attribute_dict = {}
for row in rows:
cols = row.split('<td>')
cols = [re.sub('<[^>]*>', '', col).strip() for col in cols]
cols = [x for x in cols if x != '']
if cols == []: continue
attribute = cols[0]
tags = [x.strip() for x in cols[1].split(';')]
if not attribute_dict.has_key(attribute):
attribute_dict[attribute] = []
attribute_dict[attribute] += tags
start = content.index('Elements</h3>')
start = content.index('<tbody>', start)
end = content.index('</tbody>', start)
rows = content[start:end].split('<tr>')
non_self_terminating_tags = []
self_terminating_tags = []
for row in rows:
cols = row.split('<td>')
cols = [re.sub('<[^>]*>', '', col).strip() for col in cols]
cols = [x for x in cols if x != '']
if cols == []: continue
elements = [x.strip() for x in cols[0].split(',')]
if cols[4] == 'empty':
self_terminating_tags += elements
else:
non_self_terminating_tags += elements
output_file = open('html_specification.py', 'w')
output_file.write('html_attributes = {\n')
for attribute in attribute_dict:
output_file.write(" '%s': [\n" % attribute)
for tag in attribute_dict[attribute]:
if tag == 'HTML elements': tag = '*'
output_file.write(" '%s',\n" % tag)
output_file.write(" ],\n")
output_file.write('}\n')
output_file.write('self_terminating_tags = [\n')
for element in self_terminating_tags:
output_file.write(" '%s',\n" % element)
output_file.write(']\n')
output_file.write('non_self_terminating_tags = [\n')
for element in non_self_terminating_tags:
output_file.write(" '%s',\n" % element)
output_file.write(']\n')
output_file.close()
| UTF-8 | Python | false | false | 1,912 | py | 34 | make_html_specification.py | 33 | 0.632322 | 0.626569 | 0 | 62 | 29.83871 | 61 |
gvwilson/tools | 3,470,333,612,197 | cc897488deb0d103f80e1b72f473a4b0760b3f6a | 391c2e8d839a9278d356c0fc21ecacffd939ed84 | /topen | f0d05cf0acce4a40d2163d262b362cbcd9264746 | []
| no_license | https://github.com/gvwilson/tools | 0ba19e4a56fa838ed71c93d9caeb2cd380bd015c | f43f8eb19109b5088eaccd46e8cad7bd6055f1db | refs/heads/master | 2019-07-15T04:48:37.078615 | 2019-01-19T19:10:01 | 2019-01-19T19:10:01 | 26,680,389 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import sys
import os
import re
pat = re.compile(r'(\(.\) )?(\d{4}-\d{2}-\d{2} ).*(http.?://[^ ]+).*')
target = int(sys.argv[1])
with open(os.path.join(os.environ['TODO_DIR'], 'todo.txt'), 'r') as reader:
for (i, line) in enumerate(reader.readlines()):
m = pat.search(line)
if m:
url = m.group(3)
if (i+1) == target:
os.system('open {}'.format(url))
| UTF-8 | Python | false | false | 434 | 35 | topen | 34 | 0.511521 | 0.497696 | 0 | 16 | 26.125 | 75 |
|
chrisritchie1994/Compendium_Deployment | 10,703,058,528,578 | a41587ee258f5d68665df90cde38f97541b786ce | 54b4f76c358a19a2fba43f0c49026f2c08e5eaa6 | /Compendium/compendium_app/migrations/0002_auto_20181118_1723.py | 8f2088aef3142d160f5fb43b2fd21e0d71aa5464 | []
| no_license | https://github.com/chrisritchie1994/Compendium_Deployment | 9a50de0b8b0576c34baeceace2acb296f6ac5af0 | 918cd5b2631c7b24ff6f250043387f74d66c2bef | refs/heads/master | 2020-04-14T13:11:47.109988 | 2019-01-03T14:25:22 | 2019-01-03T14:25:22 | 163,857,613 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.1.3 on 2018-11-18 07:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('compendium_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Idea',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('entry', models.TextField()),
('idea', models.TextField()),
('application', models.TextField()),
],
),
migrations.RenameField(
model_name='journal',
old_name='id',
new_name='journal_id',
),
migrations.AddField(
model_name='idea',
name='journal_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='compendium_app.Journal'),
),
migrations.AddField(
model_name='idea',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| UTF-8 | Python | false | false | 1,248 | py | 33 | 0002_auto_20181118_1723.py | 21 | 0.564103 | 0.548878 | 0 | 40 | 30.2 | 110 |
google/skia | 6,665,789,267,961 | 824ace81e6fb5b42b45da522a64a1d57385aa2c5 | 0bcd128368e2de959ca648960ffd7944067fcf27 | /tools/skpbench/skiaperf.py | 23608a5875b09e58f80bc3f3623064dc1f129aa2 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/google/skia | ac6e39179cd33cf0c8a46d29c1a70bf78b4d74ee | bf6b239838d3eb56562fffd0856f4047867ae771 | refs/heads/main | 2023-08-31T21:03:04.620734 | 2023-08-31T18:24:15 | 2023-08-31T20:20:26 | 15,773,229 | 8,064 | 1,487 | BSD-3-Clause | false | 2023-09-11T13:42:07 | 2014-01-09T17:09:57 | 2023-09-11T01:21:26 | 2023-09-11T05:10:41 | 874,952 | 7,866 | 1,376 | 23 | C++ | false | false | #!/usr/bin/env python
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from _benchresult import BenchResult
from argparse import ArgumentParser
from collections import defaultdict
import json
import sys
__argparse = ArgumentParser(description="""
Formats skpbench.py outputs for Skia Perf.
""")
__argparse.add_argument('sources',
nargs='+', help="source files that contain skpbench results")
__argparse.add_argument('--properties',
nargs='*', help="space-separated key/value pairs identifying the run")
__argparse.add_argument('--key',
nargs='*', help="space-separated key/value pairs identifying the builder")
__argparse.add_argument('-o', '--outfile',
default='-', help="output file ('-' for stdout)")
FLAGS = __argparse.parse_args()
class JSONDict(dict):
"""Simple class for building a JSON dictionary
Returns another JSONDict upon accessing an undefined item. Does not allow an
item to change once it has been inserted.
"""
def __init__(self, key_value_pairs=None):
dict.__init__(self)
if not key_value_pairs:
return
if len(key_value_pairs) % 2:
raise Exception("uneven number of key/value arguments.")
for k,v in zip(key_value_pairs[::2], key_value_pairs[1::2]):
self[k] = v
def __getitem__(self, key):
if not key in self:
dict.__setitem__(self, key, JSONDict())
return dict.__getitem__(self, key)
def __setitem__(self, key, val):
if key in self:
raise Exception("%s: tried to set already-defined JSONDict item\n"
" old value: '%s'\n"
" new value: '%s'" % (key, self[key], val))
dict.__setitem__(self, key, val)
def emit(self, outfile):
json.dump(self, outfile, indent=4, separators=(',', ' : '), sort_keys=True)
print('', file=outfile)
def main():
data = JSONDict(
FLAGS.properties + \
['key', JSONDict(FLAGS.key + \
['bench_type', 'playback', \
'source_type', 'skp'])])
for src in FLAGS.sources:
with open(src, mode='r') as infile:
for line in infile:
match = BenchResult.match(line)
if not match:
continue
if match.sample_ms != 50:
raise Exception("%s: unexpected sample_ms != 50" % match.sample_ms)
for result in ('accum', 'median', 'min', 'max'):
data['results'][match.bench][match.config] \
['%s_%s_%s' % (result, match.clock, match.metric)] = \
getattr(match, result)
if FLAGS.outfile != '-':
with open(FLAGS.outfile, 'w+') as outfile:
data.emit(outfile)
else:
data.emit(sys.stdout)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,784 | py | 7,235 | skiaperf.py | 5,176 | 0.615302 | 0.610632 | 0 | 92 | 29.26087 | 79 |
bbaobelief/aio_proxy | 3,719,441,688,128 | 0e294749ab73f2cb495bfc14eea8faecaa2b7bb9 | 20243a459956b3736c510e48e5f0e751d3195e9f | /crawler/sp.py | 98652c149855854fa34ce6755068e1563328d295 | []
| no_license | https://github.com/bbaobelief/aio_proxy | 07a249067e292c75e79c01f0eef119c248e659c7 | 8f57477b4eff7de0985507e890a33f156a47afea | refs/heads/master | 2020-03-21T15:30:56.741143 | 2018-08-23T06:41:02 | 2018-08-23T06:41:02 | 138,716,918 | 0 | 0 | null | false | 2021-03-31T19:01:57 | 2018-06-26T09:39:55 | 2018-08-23T06:41:35 | 2021-03-31T19:01:56 | 3,698 | 0 | 0 | 4 | Python | false | false | import aiohttp
import asyncio
#proxy="http://180.97.193.58:3128"
proxy="http://114.250.25.19:80"
async def get():
async with aiohttp.ClientSession() as session:
async with session.get('https://www.baidu.com', timeout=20, proxy=proxy) as resp:
print(resp.status)
assert resp.status == 200
print(await resp.text())
# html = await resp.text(encoding='utf-8')
loop = asyncio.get_event_loop()
loop.run_until_complete(get())
| UTF-8 | Python | false | false | 452 | py | 11 | sp.py | 8 | 0.676991 | 0.606195 | 0 | 17 | 25.588235 | 85 |
iskracat/tlspu.cookiepolicy | 214,748,405,842 | 394da563b11e6e13e14c64f13bad36fa0060af36 | 2b02f940a59de3ea9ca8c420d76f5ff39f07ef80 | /tlspu/cookiepolicy/browser/viewlets.py | 18b2595ac8716b707d40e47c0cccae27ee5f8167 | []
| no_license | https://github.com/iskracat/tlspu.cookiepolicy | 0a2c5bdac34a257a2926123ead9441f67b4c63a6 | 5291b8f4259b8f83a4ac673c08404e12d58d53f4 | refs/heads/master | 2021-06-07T14:19:35.741418 | 2017-06-14T08:33:39 | 2017-06-14T08:33:39 | 31,893,483 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.CMFCore.utils import getToolByName
from plone.app.layout.viewlets import ViewletBase
from plone.registry.interfaces import IRegistry
from zope.component import getUtility
class CookiePolicyViewlet(ViewletBase):
enabled = False
title = ""
message = ""
render = ViewPageTemplateFile("templates/cookiepolicy.pt")
def __init__(self, context, request, view, manager):
super(CookiePolicyViewlet, self).__init__(context, request, view,
manager)
registry = getUtility(IRegistry)
enabled = registry.records.get('tlspu.cookiepolicy.TCP_enabled').value
title = registry.records.get('tlspu.cookiepolicy.TCP_title').value
message = registry.records.get('tlspu.cookiepolicy.TCP_message').value
self.enabled = enabled
self.title = title or "Cookie Warning"
self.message = (message or
"This site uses cookies but the owner has not explained why!")
def update(self):
return
| UTF-8 | Python | false | false | 1,113 | py | 7 | viewlets.py | 3 | 0.678347 | 0.678347 | 0 | 31 | 34.903226 | 78 |
langus0/imgrid | 16,269,336,165,454 | 9098a7d18012bf5d2305ba22976347add17f915e | 585699fb489bc420fcdc048d2ac15577a695a40d | /mesh-clusterer/clustering/mesh/equalwidth_mesh.py | ed0f86df91d20d5b428983e3e2be7bd6a3a11254 | []
| no_license | https://github.com/langus0/imgrid | fbcae3368cd3c15eecdb0d95ba8824df7232ceb7 | d12c2a8c34b494ef048528818e324f48997eac7e | refs/heads/master | 2021-01-10T00:04:03.894826 | 2017-09-21T11:35:06 | 2017-09-21T11:35:06 | 91,773,531 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
from clustering.mesh.mesh import Mesh
from clustering.cube import Cube
from clustering.cube_classifiers.probability_cube_classifier import ProbabilityCubeClassifier, ONLY_MAJORITY, \
MINORITY_CLASS
import numpy as np
import math
import itertools
__author__ = "Mateusz Lango, Dariusz Brzezinski"
class EqualWidthMesh(Mesh):
"""
"""
def __init__(self, data, k_bins, dim_mod="divide"):
num_attributes = len(data[0].data)
predefined_bins = False
if k_bins == "sqrt":
self.k = int(math.sqrt(len(data)))
elif k_bins == "1+3logn":
self.k = int(1 + 3.322 * math.log(len(data)))
elif k_bins == "5logn":
self.k = int(5 * math.log(len(data)))
elif k_bins == "IQR":
k_sum = 0
for attribute_idx in xrange(num_attributes):
attribute_data = []
for element in data:
attribute_data.append(element.data[attribute_idx])
iqr = np.subtract(*np.percentile(attribute_data, [75, 25]))
rng = np.subtract(*np.percentile(attribute_data, [100, 0]))
k_sum += rng / (2.64 * iqr * len(data) ** (-1. / 3.))
self.k = int(k_sum / (num_attributes * 1.0))
elif k_bins == "div10":
self.k = int(len(data) / 10)
else:
self.k = k_bins
predefined_bins = True
if dim_mod is not None and not predefined_bins:
if dim_mod == "root":
self.k = int(math.ceil(self.k ** (1. / num_attributes)))
elif dim_mod == "divide":
self.k = int(math.ceil(self.k / num_attributes))
self.create_mesh(data)
def get_num_dimensions(self):
return len(self.attr2intervals)
def get_cubes(self):
return set(self.mesh.itervalues())
def join_cubes(self, cube1, cube2):
cube1.join(cube2)
for coords in cube2.coordinates:
self.mesh[coords] = cube1
def get_cube(self, coordinates):
assert len(coordinates) == self.get_num_dimensions()
return self.mesh.get(tuple(coordinates), None)
def get_neighbours(self, cube):
for coord in cube.coordinates:
for i in xrange(len(coord)):
for modifier in [-1, +1]:
new_coord = list(coord)
new_coord[i] += modifier
new_coord = tuple(new_coord)
if new_coord in cube.coordinates:
continue
neighbour = self.get_cube(new_coord)
if neighbour is not None:
assert neighbour != cube
yield neighbour
def create_mesh(self, data):
self._attr2intervals(data)
self.mesh = {}
for coord in itertools.product(*([range(self.k)] * self.get_num_dimensions())):
coord = tuple(coord)
self.mesh[coord] = Cube(coord)
for element in data:
self.mesh[self.get_coordinates(element)].add_datapoint(element)
def _attr2intervals(self, data):
self.attr2intervals = []
for attribute_idx in xrange(len(data[0].data)):
min = np.min([i.data[attribute_idx] for i in data])
max = np.max([i.data[attribute_idx] for i in data])
self.attr2intervals.append(self._create_intervals(min, max))
def _create_intervals(self, min, max):
return np.linspace(min, max, num=self.k + 1, endpoint=True)
def get_coordinates(self, element):
coordinates = []
for attribute_idx, range in enumerate(self.attr2intervals):
value = element.data[attribute_idx]
for idx, start_of_interval in enumerate(range):
if value < start_of_interval:
assert idx != 0
coordinates.append(idx - 1)
break
if len(coordinates) == attribute_idx:
coordinates.append(self.k - 1)
return tuple(coordinates)
def convert_to_true_coordinates(self, coordinates):
result = []
for idx, coord in enumerate(coordinates):
result.append(self.attr2intervals[idx][coord])
return result
def get_coord_width(self, coordinates):
result = []
for idx in xrange(len(coordinates)):
result.append(self.attr2intervals[idx][1] - self.attr2intervals[idx][0])
return result
def convert_to_lists(self):
pred = list()
real = list()
for cube in self.get_cubes():
typ = ProbabilityCubeClassifier.class_of_cube(cube)[0]
if typ == ONLY_MAJORITY:
continue
for element in cube.data:
if element.clazz == MINORITY_CLASS:
pred.append(typ)
real.append(element.typeOfExampe)
return pred, real
def count_cubes(self):
return len(self.get_cubes())
def count_minority_cubes(self):
count = 0
for cube in self.get_cubes():
if cube.num_of_class_examples(MINORITY_CLASS) > 0:
count += 1
return count
def generate_labels(self, minority_only=False):
return self.generate_labels_for_evaluation(minority_only)[0]
def generate_labels_for_evaluation(self, minority_only=False):
labels = list()
true_labels = list()
for index, cube in enumerate(self.get_cubes()):
for element in cube.data:
if minority_only and element.clazz != MINORITY_CLASS:
continue
labels.append(index)
true_labels.append(element.clusterIdx)
return labels, true_labels
def count_examples_of_type(self, type_of_examples):
how_many = 0
for cube in self.get_cubes():
if ProbabilityCubeClassifier.class_of_cube(cube)[0] == type_of_examples:
for element in cube.data:
if element.clazz == MINORITY_CLASS:
how_many += 1
return how_many
| UTF-8 | Python | false | false | 6,149 | py | 103 | equalwidth_mesh.py | 22 | 0.554724 | 0.544316 | 0 | 172 | 34.75 | 111 |
GgnDpSngh/ERAN-VNN-COMP | 9,328,669,005,279 | b01bb81457deea219155945019d7603225e04b5b | 1e53c854bd6d6239c584db35c55d1fdcacc00638 | /tf_verify/krelu.py | 2a5f547ae8d5fee33d76930fd6d2390b4b1fb02d | [
"Apache-2.0"
]
| permissive | https://github.com/GgnDpSngh/ERAN-VNN-COMP | 785145f9d8edca74de7d1e54cf67f7423577a9fb | 98734af4d752346b68d3f7b68f75b5b44c605cd8 | refs/heads/master | 2022-12-13T01:40:54.076816 | 2020-08-27T18:12:06 | 2020-08-27T18:12:06 | 290,538,076 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from elina_scalar import *
from elina_dimension import *
from elina_linexpr0 import *
from elina_abstract0 import *
from fppoly import *
import numpy as np
import cdd
import time
import itertools
import multiprocessing
import math
from config import config
"""
From reference manual:
http://web.mit.edu/sage/export/cddlib-094b.dfsg/doc/cddlibman.ps
CDD H-representaion format:
each row represents b + Ax >= 0
example: 2*x_1 - 3*x_2 >= 1 translates to [-1, 2, -3]
CDD V-representaion format:
<code> x_1 x_2 ... x_d
code is 1 for extremal point, 0 for generating direction (rays)
example: extreme point (2, -1, 3) translates to [1, 2, -1, 3]
all polytopes generated here should be closed, hence code=1
"""
def generate_linexpr0(offset, varids, coeffs):
# returns ELINA expression, equivalent to sum_i(varids[i]*coeffs[i])
assert len(varids) == len(coeffs)
n = len(varids)
linexpr0 = elina_linexpr0_alloc(ElinaLinexprDiscr.ELINA_LINEXPR_SPARSE, n)
cst = pointer(linexpr0.contents.cst)
elina_scalar_set_double(cst.contents.val.scalar, 0)
for i, (x, coeffx) in enumerate(zip(varids, coeffs)):
linterm = pointer(linexpr0.contents.p.linterm[i])
linterm.contents.dim = ElinaDim(offset + x)
coeff = pointer(linterm.contents.coeff)
elina_scalar_set_double(coeff.contents.val.scalar, coeffx)
return linexpr0
class Krelu:
def __init__(self, cdd_hrepr):
start = time.time()
# krelu on variables in varsid
#self.varsid = varsid
self.k = len(cdd_hrepr[0])-1
self.cdd_hrepr = cdd_hrepr
#print("LENGTH ", len(cdd_hrepr[0]))
#cdd_hrepr = self.get_ineqs(varsid)
check_pt1 = time.time()
# We get orthant points using exact precision, because it allows to guarantee soundness of the algorithm.
cdd_hrepr = cdd.Matrix(cdd_hrepr, number_type='fraction')
cdd_hrepr.rep_type = cdd.RepType.INEQUALITY
pts = self.get_orthant_points(cdd_hrepr)
# Generate extremal points in the space of variables before and
# after relu
pts = [([1] + row + [x if x>0 else 0 for x in row]) for row in pts]
adjust_constraints_to_make_sound = False
# Floating point CDD is much faster then the precise CDD, however for some inputs it fails
# due to numerical errors. If that is the case we fall back to using precise CDD.
try:
cdd_vrepr = cdd.Matrix(pts, number_type='float')
cdd_vrepr.rep_type = cdd.RepType.GENERATOR
# Convert back to H-repr.
cons = cdd.Polyhedron(cdd_vrepr).get_inequalities()
adjust_constraints_to_make_sound = True
# I don't adjust linearities, so just setting lin_set to an empty set.
self.lin_set = frozenset([])
except:
cdd_vrepr = cdd.Matrix(pts, number_type='fraction')
cdd_vrepr.rep_type = cdd.RepType.GENERATOR
# Convert back to H-repr.
cons = cdd.Polyhedron(cdd_vrepr).get_inequalities()
self.lin_set = cons.lin_set
cons = np.asarray(cons, dtype=np.float64)
# If floating point CDD was run, then we have to adjust constraints to make sure taht
if adjust_constraints_to_make_sound:
pts = np.asarray(pts, dtype=np.float64)
cons_abs = np.abs(cons)
pts_abs = np.abs(pts)
cons_x_pts = np.matmul(cons, np.transpose(pts))
cons_x_pts_err = np.matmul(cons_abs, np.transpose(pts_abs))
# Since we use double precision number of bits to represent fraction is 52.
# I'll use generous over-approximation by using 2^-40 as a relative error coefficient.
rel_err = pow(2, -40)
cons_x_pts_err *= rel_err
cons_x_pts -= cons_x_pts_err
for ci in range(len(cons)):
min_val = np.min(cons_x_pts[ci, :])
if min_val < 0:
cons[ci, 0] -= min_val
# normalize constraints for numerical stability
# more info: http://files.gurobi.com/Numerics.pdf
absmax = np.absolute(cons).max(axis=1)
self.cons = cons/absmax[:, None]
end = time.time()
return
def get_orthant_points(self, cdd_hrepr):
# Get points of polytope restricted to all possible orthtants
pts = []
for polarity in itertools.product([-1, 1], repeat=self.k):
hrepr = cdd_hrepr.copy()
# add constraints to restrict to +ve/-ve half of variables
for i in range(self.k):
row = [0]*(self.k+1)
row[1+i] = polarity[i]
# row corresponds to the half-space x_i>=0 if polarity[i]==+1
hrepr.extend([row])
# remove reduntant constraints
hrepr.canonicalize()
# Convert to V-repr.
pts_new = cdd.Polyhedron(hrepr).get_generators()
assert all(row[0]==1 for row in pts_new)
for row in pts_new:
pts.append(list(row[1:]))
return pts
def make_krelu_obj(varsid):
return Krelu(varsid)
class Krelu_expr:
def __init__(self, expr, varsid, bound):
self.expr = expr
self.varsid = varsid
self.bound = bound
def get_ineqs_zono(varsid):
cdd_hrepr = []
# Get bounds on linear expressions over variables before relu
# Order of coefficients determined by logic here
for coeffs in itertools.product([-1, 0, 1], repeat=len(varsid)):
if all(c==0 for c in coeffs):
continue
linexpr0 = generate_linexpr0(Krelu.offset, varsid, coeffs)
element = elina_abstract0_assign_linexpr_array(Krelu.man,True,Krelu.element,Krelu.tdim,linexpr0,1,None)
bound_linexpr = elina_abstract0_bound_dimension(Krelu.man,Krelu.element,Krelu.offset+Krelu.length)
upper_bound = bound_linexpr.contents.sup.contents.val.dbl
cdd_hrepr.append([upper_bound] + [-c for c in coeffs])
return cdd_hrepr
def compute_bound(constraint, lbi, ubi, varsid, j, is_lower):
k = len(varsid)
divisor = -constraint[j+k+1]
actual_bound = constraint[0]/divisor
potential_improvement = 0
for l in range(k):
coeff = constraint[l+1]/divisor
if is_lower:
if coeff < 0:
actual_bound += coeff * ubi[varsid[l]]
elif coeff > 0:
actual_bound += coeff * lbi[varsid[l]]
else:
if coeff < 0:
actual_bound += coeff * lbi[varsid[l]]
elif coeff > 0:
actual_bound += coeff * ubi[varsid[l]]
potential_improvement += abs(coeff * (ubi[varsid[l]] - lbi[varsid[l]]))
if l==j:
continue
coeff = constraint[l+k+1]/divisor
if((is_lower and coeff<0) or ((not is_lower) and (coeff > 0))):
actual_bound += coeff * ubi[varsid[l]]
return actual_bound, potential_improvement
def calculate_nnz(constraint, k):
nnz = 0
for i in range(k):
if constraint[i+1] != 0:
nnz = nnz+1
return nnz
def compute_expr_bounds_from_candidates(krelu_inst, varsid, bound_expr, lbi, ubi, candidate_bounds, is_lower):
assert not is_lower
k = krelu_inst.k
cons = krelu_inst.cons
for j in range(k):
candidate_rows = candidate_bounds[j]
if is_lower:
best_bound = -math.inf
else:
best_bound = math.inf
best_index = -1
for i in range(len(candidate_rows)):
row_index = candidate_rows[i]
actual_bound, potential_improvement = compute_bound(cons[row_index], lbi, ubi, varsid, j, is_lower)
bound = actual_bound - potential_improvement / 2
nnz = calculate_nnz(cons[row_index], k)
if nnz < 2:
continue
if((is_lower and bound > best_bound) or ((not is_lower) and bound < best_bound)):
best_index = row_index
best_bound = bound
if best_index == -1:
continue
res = np.zeros(k+1)
best_row = cons[best_index]
divisor = -best_row[j+k+1]
assert divisor > 0
#if divisor == 0:
# print("ROW ",best_row)
# print("CONS ", cons, krelu_inst)
# print("CDD ", krelu_inst.cdd_hrepr)
# print("j ", j, "lb ", lbi[varsid[0]], lbi[varsid[1]], "ub ", ubi[varsid[0]], ubi[varsid[1]] )
# print("candidates ", len(candidate_rows))
res[0] = best_row[0]/divisor
for l in range(k):
res[l+1] = best_row[l+1]/divisor
if(l==j):
continue
coeff = best_row[l+k+1]/divisor
if((is_lower and coeff<0) or ((not is_lower) and (coeff > 0))):
res[0] = res[0] + coeff*ubi[varsid[l]]
print("res ", res, "best_row ", best_row,"j ", j)
if varsid[j] in bound_expr.keys():
current_bound = bound_expr[varsid[j]].bound
if (is_lower and best_bound > current_bound) or ((not is_lower) and best_bound < current_bound):
bound_expr[varsid[j]] = Krelu_expr(res, varsid, best_bound)
else:
bound_expr[varsid[j]] = Krelu_expr(res, varsid, best_bound)
def compute_expr_bounds(krelu_inst, varsid, lower_bound_expr, upper_bound_expr, lbi, ubi):
cons = krelu_inst.cons
nbrows = len(cons)
k = len(varsid)
candidate_lower_bounds = []
candidate_upper_bounds = []
for j in range(k):
candidate_lower_bounds.append([])
candidate_upper_bounds.append([])
lin_size = len(krelu_inst.lin_set)
new_cons = np.zeros((lin_size,2*k+1),dtype=np.float64)
lin_count = 0
for i in range(nbrows):
if i in krelu_inst.lin_set:
row = cons[i]
for j in range(2*k+1):
new_cons[lin_count][j] = -row[j]
lin_count = lin_count + 1
krelu_inst.cons = np.vstack([cons,new_cons])
cons = krelu_inst.cons
nbrows = len(cons)
for i in range(nbrows):
row = cons[i]
for j in range(k):
if row[j+k+1]<0:
candidate_upper_bounds[j].append(i)
elif row[j+k+1]>0:
candidate_lower_bounds[j].append(i)
#compute_expr_bounds_from_candidates(krelu_inst, varsid, lower_bound_expr, lbi, ubi, candidate_lower_bounds, True)
compute_expr_bounds_from_candidates(krelu_inst, varsid, upper_bound_expr, lbi, ubi, candidate_upper_bounds, False)
def get_sparse_cover_for_group_of_vars(vars):
"""Function is fast for len(vars) = 50 and becomes slow for len(vars) ~ 100."""
K = 3
assert len(vars) > K
sparsed_combs = []
for comb in itertools.combinations(vars, K):
add = True
for selected_comb in sparsed_combs:
if len(set(comb).intersection(set(selected_comb))) >= K - 1:
add = False
break
if add:
sparsed_combs.append(comb)
return sparsed_combs
def sparse_heuristic_with_cutoff(all_vars, areas):
assert len(all_vars) == len(areas)
K = 3
sparse_n = config.sparse_n
cutoff = 0.05
print("sparse n", sparse_n)
# Sort vars by descending area
all_vars = sorted(all_vars, key=lambda var: -areas[var])
vars_above_cutoff = [i for i in all_vars if areas[i] >= cutoff]
krelu_args = []
while len(vars_above_cutoff) > 0:
grouplen = min(sparse_n, len(vars_above_cutoff))
group = vars_above_cutoff[:grouplen]
vars_above_cutoff = vars_above_cutoff[grouplen:]
if grouplen <= K:
krelu_args.append(group)
else:
group_args = get_sparse_cover_for_group_of_vars(group)
for arg in group_args:
krelu_args.append(arg)
# Also just apply 1-relu for every var.
for var in all_vars:
krelu_args.append([var])
return krelu_args
def encode_kactivation_cons(nn, man, element, offset, layerno, length, lbi, ubi, relu_groups, need_pop, domain, activation_type):
import deepzono_nodes as dn
if(need_pop):
relu_groups.pop()
last_conv = -1
is_conv = False
for i in range(nn.numlayer):
if nn.layertypes[i] == 'Conv':
last_conv = i
is_conv = True
lbi = np.asarray(lbi, dtype=np.double)
ubi = np.asarray(ubi, dtype=np.double)
candidate_vars = [i for i in range(length) if lbi[i] < 0 and ubi[i] > 0]
candidate_vars_areas = {var: -lbi[var] * ubi[var] for var in candidate_vars}
# Sort vars by descending area
candidate_vars = sorted(candidate_vars, key=lambda var: -candidate_vars_areas[var])
# Use sparse heuristic to select args (uncomment to use)
krelu_args = sparse_heuristic_with_cutoff(candidate_vars, candidate_vars_areas)
relucons = []
#print("UBI ",ubi)
tdim = ElinaDim(offset+length)
if domain == 'refinezono':
element = dn.add_dimensions(man,element,offset+length,1)
#krelu_args = []
#if config.dyn_krelu and candidate_vars:
# limit3relucalls = 500
# firstk = math.sqrt(6*limit3relucalls/len(candidate_vars))
# firstk = int(min(firstk, len(candidate_vars)))
# if is_conv and layerno < last_conv:
# firstk = 1
# else:
# firstk = 5#int(max(1,firstk))
# print("firstk ",firstk)
# if firstk>3:
# while candidate_vars:
# headlen = min(firstk, len(candidate_vars))
# head = candidate_vars[:headlen]
# candidate_vars = candidate_vars[headlen:]
# if len(head)<=3:
# krelu_args.append(head)
# else:
# for arg in itertools.combinations(head, 3):
# krelu_args.append(arg)
#klist = ([3] if (config.use_3relu) else []) + ([2] if (config.use_2relu) else []) + [1]
#for k in klist:
# while len(candidate_vars) >= k:
# krelu_args.append(candidate_vars[:k])
# candidate_vars = candidate_vars[k:]
Krelu.man = man
Krelu.element = element
Krelu.tdim = tdim
Krelu.length = length
Krelu.layerno = layerno
Krelu.offset = offset
Krelu.domain = domain
start = time.time()
if domain == 'refinezono':
with multiprocessing.Pool(config.numproc) as pool:
cdd_hrepr_array = pool.map(get_ineqs_zono, krelu_args)
else:
# krelu_results = []
total_size = 0
for varsid in krelu_args:
size = 3**len(varsid) - 1
total_size = total_size + size
linexpr0 = elina_linexpr0_array_alloc(total_size)
i = 0
for varsid in krelu_args:
for coeffs in itertools.product([-1, 0, 1], repeat=len(varsid)):
if all(c==0 for c in coeffs):
continue
linexpr0[i] = generate_linexpr0(offset, varsid, coeffs)
i = i + 1
upper_bound = get_upper_bound_for_linexpr0(man,element,linexpr0, total_size, layerno)
i=0
cdd_hrepr_array = []
for varsid in krelu_args:
cdd_hrepr = []
for coeffs in itertools.product([-1, 0, 1], repeat=len(varsid)):
if all(c==0 for c in coeffs):
continue
cdd_hrepr.append([upper_bound[i]] + [-c for c in coeffs])
#print("UPPER BOUND ", upper_bound[i], "COEFF ", coeffs)
#if len(varsid)>1:
# print("LB ", lbi[varsid[0]],lbi[varsid[1]], "UB ", ubi[varsid[0]], ubi[varsid[1]])
i = i + 1
cdd_hrepr_array.append(cdd_hrepr)
with multiprocessing.Pool(config.numproc) as pool:
krelu_results = pool.map(make_krelu_obj, cdd_hrepr_array)
# krelu_results.append(make_krelu_obj(krelu_args[i]))
#bound_expr_list = []
gid = 0
lower_bound_expr = {}
upper_bound_expr = {}
for krelu_inst in krelu_results:
varsid = krelu_args[gid]
krelu_inst.varsid = varsid
# For now disabling since in the experiments updating expression bounds makes results worse.
# compute_expr_bounds(krelu_inst, varsid, lower_bound_expr, upper_bound_expr, lbi, ubi)
#print("VARSID ",varsid)
#bound_expr_list.append(Krelu_expr(lower_bound_expr, upper_bound_expr, varsid))
relucons.append(krelu_inst)
gid = gid+1
end = time.time()
if config.debug:
print('krelu time spent: ' + str(end-start))
if domain == 'refinezono':
element = dn.remove_dimensions(man,element,offset+length,1)
relu_groups.append(relucons)
return lower_bound_expr, upper_bound_expr
| UTF-8 | Python | false | false | 16,738 | py | 61 | krelu.py | 15 | 0.582985 | 0.571872 | 0 | 458 | 35.545852 | 129 |
victorviro/Text-classifier-from-BERT-embeddings | 15,427,522,564,666 | 4fad6d3a8c72ecf9829e90e2157e187333b6fefc | cc0959203937193253fded9af1cc5cef10076568 | /src/train.py | 03e4ae66695f0d9bac2bfc5f276cc7b0e2188659 | []
| no_license | https://github.com/victorviro/Text-classifier-from-BERT-embeddings | 1a6576b68e8f70d442951dcd03cceca910f54df9 | 41580df2caec3f3cac4525e9069d930d43f594f2 | refs/heads/master | 2023-02-27T07:54:38.410736 | 2021-01-30T10:49:39 | 2021-01-30T10:49:39 | 329,711,383 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pickle
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import spacy_sentence_bert
from constants import DATASET_PATH, SPACY_MODEL_NAME, MODEL_PATH
from utils import get_sentence_embeddings
def train_model():
"""
Load the dataset, get the embeddings for the sentences in the
training dataset, train the model and save it.
"""
# Load dataset in pandas DataFrame
source_df = pd.read_csv(DATASET_PATH)
# Get the sentences and the target variable separately
texts = list(source_df["text"])
target_variable = source_df["class"]
# Split the dataset for training and test
train_texts, test_texts, y_train, y_test = train_test_split(
texts, target_variable,
test_size=0.3,
random_state=1)
print(f'Number of sentences in the train datatet: {len(train_texts)}')
# Load the spaCy statistical model
print('Loading the spaCy statistical model...')
nlp = spacy_sentence_bert.load_model(SPACY_MODEL_NAME)
print('Getting embeddings of the sentences in the training datatet')
X_train = get_sentence_embeddings(nlp, train_texts)
print('Obtained embeddings of the sentences')
print(f'Lenght of the embeddings: {X_train[0].shape[0]}')
# Define the model
model = LogisticRegression(random_state=0, max_iter=1000)
# Train the model
print('Training the model')
model.fit(X_train, y_train)
# Save the model
with open(MODEL_PATH, 'wb') as f:
pickle.dump(model, f)
print(f'Model saved in {MODEL_PATH}')
if __name__ == "__main__":
train_model()
| UTF-8 | Python | false | false | 1,795 | py | 7 | train.py | 4 | 0.636212 | 0.630641 | 0 | 54 | 32.222222 | 75 |
yiulau/all_code | 9,715,216,035,477 | 17e43893a32b892a082e4015dcfaf0660db4bb36 | a77ad6a7d32478a3921a67ed339ee1d120853f37 | /abstract/T_diag_e.py | 0212afd9b063d63f857f5703591bc0f592ef5a9a | []
| no_license | https://github.com/yiulau/all_code | 5cf7804c5a77d197f6a979d6fafa036f95d16659 | 4630ea6efc630616e6f767807ebbd64c82852726 | refs/heads/master | 2020-03-18T05:19:50.586402 | 2018-08-03T09:14:59 | 2018-08-03T09:14:59 | 134,337,109 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from abstract.abstract_class_T import T
from abstract.abstract_class_point import point
import torch
class T_diag_e(T):
def __init__(self,metric,linkedV):
self.metric = metric
super(T_diag_e, self).__init__(linkedV)
def evaluate_scalar(self,q_point=None,p_point=None):
if not q_point is None:
print("should not pass q_point for this metric")
pass
if not p_point is None:
self.load_point(p_point)
output = 0
for i in range(len(self.list_var)):
output += (self.list_var[i].data * self.list_var[i].data/self.metric._var_list_tensor[i]).sum() * 0.5
return(output)
def dp(self,p_flattened_tensor):
out = 1/self.metric._flattened_var * p_flattened_tensor
return(out)
# def dtaudp(self,p=None):
# if p==None:
# for i in range(len(self.list_shapes)):
# self.gradient[i].copy_(self.metric_var_list[i] * self.p[i])
# else:
# for i in range(len(self.list_shapes)):
# self.gradient[i].copy_(self.metric_var_list[i] * p[i])
# return (self.gradient)
#
# def dtaudq(self):
# raise ValueError("should not call this function")
def generate_momentum(self,q):
out = point(list_tensor=self.list_tensor,pointtype="p",need_flatten=self.need_flatten)
out.flattened_tensor.copy_(torch.sqrt(self.metric._flattened_var) * torch.randn(self.dim))
out.load_flatten()
return(out) | UTF-8 | Python | false | false | 1,520 | py | 340 | T_diag_e.py | 320 | 0.594737 | 0.592105 | 0 | 39 | 38 | 113 |
iiibsceprana/pranavsai | 18,408,229,840,382 | d111d9d6ac3a7a4f25de11d5c46b98197ed5421b | 14f223f1855215f6cbeaba533bcfe26532161918 | /functions/funtest3.py | 956b97cedf42647a34bdcd0532a3dd82ad72ec7b | []
| no_license | https://github.com/iiibsceprana/pranavsai | 1026519a44eac429db8c4a6e3664277839d5dd52 | ffd8c937c50814676b0ee1eabdfd461087d52b96 | refs/heads/master | 2020-03-09T23:36:23.258180 | 2018-04-11T08:53:55 | 2018-04-11T08:53:55 | 129,061,808 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def printme( str="my name is pranav sai"):
"this prints a passed string into the function"
print(str)
printme()
printme("my name is sai pranav")
| UTF-8 | Python | false | false | 160 | py | 44 | funtest3.py | 44 | 0.66875 | 0.66875 | 0 | 7 | 21.714286 | 51 |
cynthiayu316/Tiny-Tuffys-Super-Mario-Project | 17,540,646,440,839 | a2784521e02322c2c10e3f3a50166aa2f84bcda0 | 6aa63fda98ba7c1d57324781651d782ba8aaab8e | /pipe.py | cb9c48577b4ac3f73f165d47417a3db575c514df | []
| no_license | https://github.com/cynthiayu316/Tiny-Tuffys-Super-Mario-Project | f8f602a2b20b90b58418988bf8cd50f24e52a3b6 | 1502509cca1c0e87cbf846a2a4eab214bfe249f2 | refs/heads/master | 2021-02-06T04:00:37.328753 | 2019-11-04T06:34:42 | 2019-11-04T06:34:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
from pygame.sprite import Sprite
class Pipe(Sprite):
small_pipe = pygame.image.load('images/small_pipe.png')
medium_pipe = pygame.image.load('images/medium_pipe.png')
large_pipe = pygame.image.load('images/large_pipe.png')
def __init__(self, pos, screen, size=0):
super(Pipe, self).__init__()
self.pos = pos
self.screen = screen
self.size = size
self.set_pipe_size()
# added
self.image, self.rect, self.x, self.y = self.set_pipe_size()
def update(self):
self.rect.x = self.x
self.rect.y = self.y
def set_pipe_size(self):
if self.size == 0:
self.image = self.small_pipe
if self.size == 1:
self.image = self.medium_pipe
if self.size == 2:
self.image = self.large_pipe
self.rect = self.image.get_rect()
self.rect.x = self.rect.width
self.rect.y = self.rect.height
self.x = float(self.rect.x)
self.y = float(self.rect.y)
# added
return self.image, self.rect, self.x, self.y
| UTF-8 | Python | false | false | 1,143 | py | 15 | pipe.py | 14 | 0.550306 | 0.546807 | 0 | 39 | 27.307692 | 68 |
Jackroll/aprendendopython | 4,492,535,826,821 | bc917072897aeecc12e49fdbc7c66aee1b72d344 | a1d5290470d5a8beb99846d62d8539a13021470e | /exercicios_udemy/Orientacao_Objetos/desafio_poo/main.py | 823ed48bb124f96e6dc038788294bbb6963dd913 | []
| no_license | https://github.com/Jackroll/aprendendopython | 26007465b42666f0a9aff43e8229b24aef418d4c | 9211612a1be8015bcf8d23d8cdfbd11d9df38135 | refs/heads/master | 2021-02-19T19:40:22.993033 | 2020-04-04T21:35:08 | 2020-04-04T21:35:08 | 245,319,139 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Sistema bancario:
- Banco
- Clientes
- contas
Banco = [ [Clientes ] ]
- Clientes:
- tem que ter conta (corrente ou poupança)
- Sacar
- Depositar
- Conta Corrente / Poupança :
- Limite extra (pode ser sacado e ficar com saldo negativo)
- Banco:
- clientes
- contas
"""
from Orientacao_Objetos.desafio_poo.cliente import Cliente
from Orientacao_Objetos.desafio_poo.conta import ContaCorrente, ContaPoupanca
from Orientacao_Objetos.desafio_poo.banco import Banco
# instancia a classe Banco()
banco = Banco()
#Cria vários clientes através da classe Clientes ()
cliente01 = Cliente('Jacson', 33)
cliente02 = Cliente('Pedro', 54)
cliente03 = Cliente('Ivan ', 18)
#Cria varias contas com as classes ContaPoupança() e ContaCorrente()
conta01 = ContaPoupanca(568, 3035, 0)
conta02 = ContaCorrente(222, 3036, 0)
conta03 = ContaPoupanca(333, 3037, 0)
#Cadastra conta no banco atraves do método cadastrar_conta e Objeto banco
banco.cadastrar_conta(conta01)
banco.cadastrar_conta(conta02)
banco.cadastrar_conta(conta03)
# Cadastra cliente no banco atraves do método cadastrar_cliente e Objeto banco
banco.cadastrar_cliente(cliente01)
banco.cadastrar_cliente(cliente02)
banco.cadastrar_cliente(cliente03)
# Insere conta criada ao cliente especifico
cliente01.inserir_conta(conta01)
cliente02.inserir_conta(conta02)
cliente03.inserir_conta(conta03)
# Faz validação, verifica se a agencia, conta e cliente existem no banco
if banco.validacao(cliente02):
cliente02.conta.deposito(500)
cliente02.conta.sacar(550)
else:
print('Não Autenticado')
if banco.validacao(cliente01):
cliente01.conta.deposito(500)
cliente01.conta.sacar(100)
else:
print('Não Autenticado') | UTF-8 | Python | false | false | 1,789 | py | 104 | main.py | 100 | 0.727784 | 0.677165 | 0 | 63 | 27.238095 | 78 |
sumeilan/lemon_testcase | 6,451,040,884,530 | 5fd4e8f492d3e62dfc874073534dd1fd01b55b7c | b7fd9500a776c0e92676e65b80dec0f28ebbd232 | /test2.py | 90453af1e3f27ca0dd828a7fc1164f9cad8d8853 | []
| no_license | https://github.com/sumeilan/lemon_testcase | b433a36f32010a06b1684f1e4494236b4e964ac0 | fda97abac0540cbdca52c1e0f848bf776f4182bd | refs/heads/master | 2022-12-09T22:07:30.845115 | 2019-10-19T13:59:34 | 2019-10-19T13:59:34 | 198,031,251 | 0 | 1 | null | false | 2022-11-25T22:51:19 | 2019-07-21T08:20:36 | 2019-10-19T14:01:38 | 2022-11-25T22:51:19 | 5,442 | 0 | 1 | 1 | Python | false | false | import hashlib
import hmac
import base64
import json
secretKey = 'SwYNTwt5qPABx29Atyi0'
body='{"page":"1","pageSize":"20","app_key":"lemondream"}'
bo={"page": "1", "pageSize": "20", "app_key": "lemondream"}
co = json.dumps(bo).replace(" ", "") #讲bo 变成 body
boo={'page':'1','pageSize':'20','app_key':'lemondream'}
booo =json.dumps(boo).replace(" ", "")
body1 = base64.b64encode(hmac.new(str.encode(secretKey), str.encode(str(body)), digestmod=hashlib.sha256).digest())
co1 = base64.b64encode(hmac.new(str.encode(secretKey), str.encode(str(co)), digestmod=hashlib.sha256).digest())
bo1 = base64.b64encode(hmac.new(str.encode(secretKey), str.encode(str(bo)), digestmod=hashlib.sha256).digest())
boo1 = base64.b64encode(hmac.new(str.encode(secretKey), str.encode(str(boo)), digestmod=hashlib.sha256).digest())
booo1 = base64.b64encode(hmac.new(str.encode(secretKey), str.encode(str(booo)), digestmod=hashlib.sha256).digest())
print(body,type(body),body1)
print(co,type(co),co1)
print(bo,type(bo),bo1)
print(boo,type(boo),boo1)
print(booo,type(booo),booo1)
| UTF-8 | Python | false | false | 1,070 | py | 21 | test2.py | 17 | 0.712406 | 0.656015 | 0 | 28 | 36.892857 | 115 |
komkar123/Udacity_NanoDegree_DataEngineering | 558,345,777,486 | 2270b6b4e571fbc9d63e53094ddfb03bbe09673d | e052eb5507b15bd8ac35edc8d7473b0ce9413412 | /Airflow/plugins/operators/load_dimension.py | 5fe0fb9f675ee6cb47b867bdcd1aab15122a2997 | []
| no_license | https://github.com/komkar123/Udacity_NanoDegree_DataEngineering | 19ddea0478dbdae102a4fe67d2f9c5cdf1db8c61 | 3e4da07962b93566b7b7ab234114d1a24fcfd3cf | refs/heads/master | 2022-11-13T05:25:50.146729 | 2020-07-11T00:41:26 | 2020-07-11T00:41:26 | 262,697,913 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class LoadDimensionOperator(BaseOperator):
ui_color = '#80BD9E'
insert_sql="""
Insert into {table}
{sql};
"""
@apply_defaults
def __init__(self,
redshift_conn_id,
sql,
table,
*args, **kwargs):
super(LoadDimensionOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id=redshift_conn_id
self.sql=sql
self.table=table
def execute(self, context):
postgres=PostgresHook(postgres_conn_id=self.redshift_conn_id)
insertsqlop=LoadDimensionOperator.insert_sql.format(
self.table,
self.sql
)
postgres.run(insertsqlop)
| UTF-8 | Python | false | false | 867 | py | 17 | load_dimension.py | 8 | 0.597463 | 0.594002 | 0 | 32 | 26.09375 | 69 |
gaz888/Python-mini-blog | 14,267,881,366,958 | b5dbb964b4ac018839dc825c12df9598174aee1b | 6f33f1425b0ef6888fa2001f0c31cb1ef14c91c7 | /entities/Post.py | 66631843824321e5650b2c7f6886b741c6216ec1 | []
| no_license | https://github.com/gaz888/Python-mini-blog | bac4fd6d860415c0cc02a7f0afe560fd508fd569 | 5d87652e7dbe0246331f14258d87eb7713c1fd7c | refs/heads/master | 2021-01-17T20:09:10.666803 | 2013-01-01T22:37:25 | 2013-01-01T22:37:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from google.appengine.ext import db
from google.appengine.api import memcache
import time
__author__ = 'ssav'
class Post(db.Model):
title = db.StringProperty(required=True)
content = db.TextProperty(required=True)
created = db.DateTimeProperty(auto_now_add=True)
ALL_POSTS = 'all_posts'
LAST_TIME_QUERIED_ALL_POSTS = 'last time queried all posts'
POST = 'post#:'
LAST_TIME_QUERIED_POST = 'last time queried post#'
@classmethod
def get_all_order_by_date(cls, update_cache = False):
all_posts = memcache.get(cls.ALL_POSTS)
if update_cache or all_posts is None:
all_posts = Post.gql('ORDER BY created DESC')
memcache.set(cls.ALL_POSTS, all_posts)
memcache.set(cls.LAST_TIME_QUERIED_ALL_POSTS, time.time())
return all_posts
@classmethod
def put_and_update_cache(cls, post):
post.put()
cls.get_all_order_by_date(True)
@classmethod
def get_by_id_and_update_cache(cls, id):
post = memcache.get(cls.POST + str(id))
if post is None:
post = Post.get_by_id(id)
memcache.set(cls.POST + str(id), post)
memcache.set(cls.LAST_TIME_QUERIED_POST + str(id), time.time())
return post
def as_dict(self):
time_fmt = '%c'
d = {'title': self.title,
'content': self.content,
'created': self.created.strftime(time_fmt)}
return d | UTF-8 | Python | false | false | 1,454 | py | 24 | Post.py | 14 | 0.610729 | 0.610729 | 0 | 47 | 29.957447 | 75 |
NikithaChandrashekar/guessing | 13,357,348,312,257 | c44ec8b580103b5f468bcb7e648124243f661406 | ec0cea13a51abae7b101d145de6c289649038a10 | /guessingGame.py | b025408961823ae73ecee5b8beeea8c7ebe6ecce | []
| no_license | https://github.com/NikithaChandrashekar/guessing | c62cd23a23fb56144c5a7810d77fe884d78e9f49 | d13c51aa8d0466c80713cf0069a668f20e5b614c | refs/heads/main | 2023-07-22T11:27:14.814106 | 2021-09-08T11:51:41 | 2021-09-08T11:51:41 | 404,328,005 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
name=input("Hi!Enter your name.")
print("ok"+name+",hi!")
age=input("Please tell me your age!")
print("hmm,okay !")
q1=input("guess the capital of india!")
if(q1=="Delhi"):
print("yes that is correct!")
else:
print("sorry, but that is wrong.")
| UTF-8 | Python | false | false | 254 | py | 2 | guessingGame.py | 1 | 0.641732 | 0.633858 | 0 | 9 | 27 | 39 |
rodrigolc/gowdock-landing | 8,899,172,257,627 | 23a7f9802b29caf9e00aade8a506bdd3d1324f09 | cd2b85de3990e7cc2ae66ba1610c07ef5c220098 | /landing_page/models.py | 2dda4ec5a04cb3f58f98af11bb97788e8ac45a3b | []
| no_license | https://github.com/rodrigolc/gowdock-landing | 1c635dc9e3c0789cc6e63da3bdfad636bec1f691 | 5397c8453b625467ed737083ab57d46ce892f02a | refs/heads/master | 2018-11-04T14:22:41.579353 | 2018-10-05T18:43:26 | 2018-10-05T18:43:26 | 146,206,638 | 0 | 0 | null | false | 2018-09-11T23:04:38 | 2018-08-26T18:37:57 | 2018-09-03T13:48:26 | 2018-09-11T23:04:38 | 2,994 | 0 | 0 | 0 | Python | false | null | from django.db import models
# Create your models here.
class CadastroLanding(models.Model):
nome = models.CharField(
max_length=120,
error_messages={
'blank': "Nome vazio"
}
)
email = models.EmailField(
unique=True,
error_messages={
'null': "Email nulo",
'blank': "Email vazio",
'invalid': "Email inválido",
'unique': "Email já cadastrado"
}
)
def __str__(self):
return "%s - %s" % (self.nome,self.email)
| UTF-8 | Python | false | false | 547 | py | 12 | models.py | 4 | 0.519266 | 0.513761 | 0 | 23 | 22.695652 | 49 |
ivanag01/Python-By-Example | 5,858,335,425,294 | f2e83d7060229496d8abd0ec9c36db3bc2ef9e83 | a6e5b996f70c36b1d258fe7d65446f8ced5f7f1b | /src/065.py | 85531eeb82e2e02dea41f9578ba52f09e3301739 | []
| no_license | https://github.com/ivanag01/Python-By-Example | 5e0087628d2f3641be9183d4c00c42b346c4704c | baeb6f129777da20e7da869fe778016d1403b44e | refs/heads/main | 2023-06-01T10:28:37.740258 | 2021-06-18T11:45:43 | 2021-06-18T11:45:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import turtle
turtle.left(90)
turtle.forward(100)
turtle.right(90)
turtle.penup()
turtle.forward(50)
turtle.pendown()
turtle.forward(75)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(75)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(75)
turtle.penup()
turtle.forward(50)
turtle.pendown()
turtle.forward(75)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(45)
turtle.left(180)
turtle.forward(45)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(75)
turtle.hideturtle()
turtle.exitonclick() | UTF-8 | Python | false | false | 572 | py | 57 | 065.py | 56 | 0.769231 | 0.678322 | 0 | 39 | 13.692308 | 20 |
gsakkas/seq2parse | 14,877,766,742,752 | 52acf77bc52a98f2486bb57d00586162691c417a | e547f7a92e7a1c1d79f8631f9e8ee8a93879a4eb | /src/transformer_classifier.py | 2037c9312c97e67af88b642a371573a980892ae4 | []
| no_license | https://github.com/gsakkas/seq2parse | 3c33ec7bc6cc6e4abd9e4981e53efdc173b7a7b9 | 7ae0681f1139cb873868727f035c1b7a369c3eb9 | refs/heads/main | 2023-04-09T12:29:37.902066 | 2023-01-18T21:32:12 | 2023-01-18T21:32:12 | 417,597,310 | 7 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
"""
## Implement a Transformer block as a layer
"""
class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super(TransformerBlock, self).__init__()
self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
self.ffn = keras.Sequential(
[layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, inputs, training):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
"""
## Implement embedding layer
Two seperate embedding layers, one for tokens, one for token index (positions).
"""
class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, vocab_size, embed_dim):
super(TokenAndPositionEmbedding, self).__init__()
self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = tf.shape(x)[-1]
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions
"""
## Implement a Transformer classifier
"""
class TransformerClassifier(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, transformer_blks=1, dense_dims=[64,], vocab_size=500, maxlen=200, y_maxlen=2, active='softmax'):
self.embed_dim = embed_dim # Embedding size for each token
self.num_heads = num_heads # Number of attention heads
self.ff_dim = ff_dim # Hidden layer size in feed forward network inside transformer
self.vocab_size = vocab_size
self.maxlen = maxlen
self.y_maxlen = y_maxlen
self.activation = active
self.dense_dims = dense_dims
self.transformer_blks = transformer_blks
self.embedding_layer = TokenAndPositionEmbedding(self.maxlen, self.vocab_size, self.embed_dim)
self.transformer_blocks_list = [TransformerBlock(self.embed_dim, self.num_heads, self.ff_dim) for _ in range(self.transformer_blks)]
input = layers.Input(shape=(self.maxlen,))
x = self.embedding_layer(input)
for blk in self.transformer_blocks_list:
x = blk(x)
# x = layers.Flatten()(x)
x = layers.GlobalAveragePooling1D()(x)
for dense_layer in self.dense_dims:
x = layers.Dropout(0.2)(x)
x = layers.Dense(dense_layer, kernel_regularizer=regularizers.l2(0.005), activation="relu")(x)
x = layers.Dropout(0.2)(x)
outputs = layers.Dense(self.y_maxlen, activation=self.activation)(x)
self.model = keras.Model(inputs=input, outputs=outputs)
def compile(self, optimizer='adam', loss="sparse_categorical_crossentropy", metrics=["accuracy"]):
self.model.summary()
self.model.compile(optimizer, loss, metrics=metrics)
def fit(self, xs, ys, valid_data, batch_size=16, epochs=5, verbose=1):
return self.model.fit(xs, ys, batch_size=batch_size, epochs=epochs, validation_data=valid_data, verbose=verbose)
def predict(self, xs):
return self.model.predict(xs)
def save_weights(self, path):
return self.model.save_weights(path)
def load_weights(self, path):
return self.model.load_weights(path)
"""
## Implement an Augmented Transformer classifier
"""
class AugmentedTransformerClassifier(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, transformer_blks=1, dense_dims=[64,], vocab_size=500, maxlen=200, y_maxlen=2, next_tokens_len=59, active='softmax'):
self.embed_dim = embed_dim # Embedding size for each token
self.num_heads = num_heads # Number of attention heads
self.ff_dim = ff_dim # Hidden layer size in feed forward network inside transformer
self.vocab_size = vocab_size
self.maxlen = maxlen
self.y_maxlen = y_maxlen
self.next_tokens_len = next_tokens_len
self.activation = active
self.dense_dims = dense_dims
self.transformer_blks = transformer_blks
self.embedding_layer = TokenAndPositionEmbedding(self.maxlen, self.vocab_size, self.embed_dim)
self.transformer_blocks_list = [TransformerBlock(self.embed_dim, self.num_heads, self.ff_dim) for _ in range(self.transformer_blks)]
seq_input = layers.Input(shape=(self.maxlen,))
nexts_input = layers.Input(shape=(self.next_tokens_len,))
x = self.embedding_layer(seq_input)
for blk in self.transformer_blocks_list:
x = blk(x)
# x = layers.Flatten()(x)
x = layers.GlobalAveragePooling1D()(x)
x = layers.Concatenate()([x, nexts_input])
for dense_layer in self.dense_dims:
x = layers.Dropout(0.2)(x)
x = layers.Dense(dense_layer, kernel_regularizer=regularizers.l2(0.005), activation="relu")(x)
x = layers.Dropout(0.2)(x)
outputs = layers.Dense(self.y_maxlen, activation=self.activation)(x)
self.model = keras.Model(inputs=[seq_input, nexts_input], outputs=outputs)
def compile(self, optimizer='adam', loss="sparse_categorical_crossentropy", metrics=["accuracy"]):
self.model.summary()
self.model.compile(optimizer, loss, metrics=metrics)
def fit(self, xs, ys, valid_data, batch_size=16, epochs=5, verbose=1):
return self.model.fit(xs, ys, batch_size=batch_size, epochs=epochs, validation_data=valid_data, verbose=verbose)
def predict(self, xs):
return self.model.predict(xs)
def save_weights(self, path):
return self.model.save_weights(path)
def load_weights(self, path):
return self.model.load_weights(path)
if __name__ == "__main__":
"""
## Download and prepare dataset
"""
vocab_size = 10000 # Only consider the top 10k words
maxlen = 200 # Only consider the first 200 words of each movie review
(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(num_words=vocab_size)
print(len(x_train), "Training sequences")
print(len(x_val), "Validation sequences")
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen)
x_val = keras.preprocessing.sequence.pad_sequences(x_val, maxlen=maxlen)
"""
## Create classifier model using transformer layer
Transformer layer outputs one vector for each time step of our input sequence.
Here, we take the mean across all time steps and
use a feed forward network on top of it to classify text.
"""
embed_dim = 32 # Embedding size for each token
num_heads = 2 # Number of attention heads
ff_dim = 32 # Hidden layer size in feed forward network inside transformer
transformerClfr = TransformerClassifier(embed_dim, num_heads, ff_dim, 1, [32], vocab_size, maxlen)
"""
## Train and Evaluate
"""
transformerClfr.compile("adam", "sparse_categorical_crossentropy",
metrics=[keras.metrics.SparseCategoricalAccuracy(name="accuracy"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top-5-accuracy")])
history = transformerClfr.fit(x_train, y_train, (x_val, y_val))
| UTF-8 | Python | false | false | 7,814 | py | 386 | transformer_classifier.py | 227 | 0.665088 | 0.653187 | 0 | 181 | 42.171271 | 169 |
rluria14/maxarcat | 1,236,950,606,942 | 2522c1fab2b61ba6fa00ca7529399d748f370113 | b61c07b0e33f68a41ec184305d81769aa48345b0 | /maxarcat_client/test/test_stacitem_api.py | 10433ba66bc77c38729d344ae3b89d821ad0a73f | [
"MIT"
]
| permissive | https://github.com/rluria14/maxarcat | c5d6909d8fc74b4f49b9cec5816f935547c7e10b | dc4fd010fc65149c0b3421438b48f1af20887404 | refs/heads/master | 2023-06-09T16:58:58.559576 | 2021-07-01T01:08:53 | 2021-07-01T01:08:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
"""
Maxar Content API - Catalog
The Maxar Content Catalog API implements a STAC-compliant service for searching the Maxar content catalog. __The STAC specification is still under development. When version 1.0 of the STAC specification is released the Content Catalog API will be updated to reflect any changes, some of which will not be backward compatible with this current version.__ For information on STAC see [stacspec.org](https://stacspec.org) # noqa: E501
OpenAPI spec version: 0.9
Contact: DL-Content-Catalog@maxar.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import maxarcat_client
from maxarcat_client.api.stacitem_api import STACITEMApi # noqa: E501
from maxarcat_client.rest import ApiException
class TestSTACITEMApi(unittest.TestCase):
"""STACITEMApi unit test stubs"""
def setUp(self):
self.api = STACITEMApi() # noqa: E501
def tearDown(self):
pass
def test_get_item_stac(self):
"""Test case for get_item_stac
Get STAC item # noqa: E501
"""
pass
def test_get_items_stac(self):
"""Test case for get_items_stac
Search STAC items in a collection. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 1,372 | py | 36 | test_stacitem_api.py | 18 | 0.682216 | 0.667638 | 0 | 47 | 28.191489 | 440 |
thomasboevith/pyrad | 2,929,167,734,747 | 204281895e48adefb28724fa4bb73562ac0e37a6 | 77d36fec53d21f20e55c1860abe4680a78b5f383 | /utils_proj.py | 6b5550af422372e65c263fbf6c08cf382feaa079 | []
| no_license | https://github.com/thomasboevith/pyrad | dbc963bea038ae3dea37fc8a3b98856fdb5df3c5 | 249abbca681f2876ef893009540f07e884b038dd | refs/heads/master | 2019-04-05T00:55:04.080525 | 2016-09-06T08:36:09 | 2016-09-06T08:36:09 | 56,674,485 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
import pyproj
import sys
"""Some predefined and standard projections"""
log = logging.getLogger(__name__)
def predefined_projections(projname):
if projname == "dmi_stere":
""" Stereographic projection (DMI standard projection) """
projstring = '+proj=stere +ellps=WGS84 +lat_0=56 +lon_0=10.5666' + \
' +lat_ts=56'
elif projname == "dmi_metar":
""" Stereographic projection (metar) """
projstring = '+proj=stere +ellps=WGS84 +x_0=450000 +y_0=350000' + \
' +lat_0=56 +lon_0=10.5666 +lat_ts=56'
elif projname == "gmaps":
"""Google Maps projection"""
projstring = '+proj=merc +lat_ts=0 +lon_0=0 +a=6378137.0 +b=6378137.0'
elif projname == "web_mercator":
projstring = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0' + \
' +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null' + \
' +wktext +no_defs' # https://en.wikipedia.org/wiki/Web_Mercator
else:
log.error('Projection not found: %s' % projname)
sys.exit(1)
return pyproj.Proj(projstring), projstring
def predefined_areas(areaname, pixelsize):
"""Define upper left geographical coordinates and the extent (rows,cols)"""
if areaname == "dmi_stere":
numrows = int(432 * (2000. / pixelsize))
numcols = int(496 * (2000. / pixelsize))
upperleft_lon = 3.
upperleft_lat = 60.
proj, projstring = predefined_projections('dmi_stere')
upperleft_x, upperleft_y = proj(upperleft_lon, upperleft_lat)
lowerleft_lon, lowerleft_lat = proj(upperleft_x,
upperleft_y - (numrows * pixelsize),
inverse=True)
upperright_lon, upperright_lat = proj( \
upperleft_x + (numcols * pixelsize),
upperleft_y,
inverse=True)
elif areaname == "dmi_metar":
numrows = 846
numcols = 995
upperleft_x = 26785.817348
upperleft_y = 825106.248553
proj, projstring = predefined_projections('dmi_metar')
lowerleft_lon, lowerleft_lat = proj(upperleft_x,
upperleft_y - (numrows * pixelsize),
inverse=True)
upperright_lon, upperright_lat = proj( \
upperleft_x + (numcols * pixelsize),
upperleft_y,
inverse=True)
else:
log.error('Areaname not found: %s' % areaname)
sys.exit(1)
return proj, projstring, numrows, numcols, lowerleft_lon, lowerleft_lat, \
upperright_lon, upperright_lat
def radar_gnonomic_projection(lon, lat):
"""Gnonomic projection for radar"""
projstring = '+proj=gnom +ellps=WGS84'
projstring += ' +lon_0=' + str(lon)
projstring += ' +lat_0=' + str(lat)
return pyproj.Proj(projstring), projstring
| UTF-8 | Python | false | false | 3,100 | py | 8 | utils_proj.py | 7 | 0.538065 | 0.493226 | 0 | 75 | 40.333333 | 79 |
OanaIgnat/coding_practice | 16,071,767,659,395 | d31eb1c6aa35696581ec48a3cd097d75edb720fa | fc44f5469ab61537c96594c63e11f599201e6aaf | /data_structures_algorithms/arrays/arrange.py | 8fec681fb94e6a7d73779177072911f68be3b836 | []
| no_license | https://github.com/OanaIgnat/coding_practice | ad98ef052bf24c0547ef61a5a5bd848207259123 | 73a7841f8467e5e6ea7fed942c464b8d71be9a55 | refs/heads/master | 2022-03-03T22:54:31.130738 | 2022-02-25T18:04:42 | 2022-02-25T18:04:42 | 168,975,280 | 10 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Facebook:
Rearrange a given array so that Arr[i] becomes Arr[Arr[i]] with O(1) extra space.
Example:
Input : [1, 0]
Return : [0, 1]
Lets say N = size of the array. Then, following holds true :
All elements in the array are in the range [0, N-1]
N * N does not overflow for a signed integer
'''
class Solution:
# @param A : list of integers
# Modify the array A which is passed by reference.
# You do not need to return anything in this case.
def arrange_my_sol_not_optimum(self, A):
n = len(A)
if n % 10 == 0:
power_ten = n // 10
else:
power_ten = n // 10 + 1
for i in range(0, n):
A[i] = A[A[i]] % (10 ** power_ten) * (10 ** power_ten) + A[i] % (10 ** power_ten)
for i in range(0, n):
A[i] = A[i] // (10 ** power_ten)
'''
Now, we will do a slight trick to encode 2 numbers in one index.
This trick assumes that N * N does not overflow.
Given a number as
A = B + C * N if ( B, C < N )
A % N = B
A / N = C
'''
def arrange(self, A):
n = len(A)
for i in range(0, n):
A[i] = A[A[i]] % n * n + A[i] % n
for i in range(0, n):
A[i] = A[i] // n
def test_arrange():
s = Solution()
assert (Solution.arrange(s, [1, 0]) == [0, 1])
assert (Solution.arrange(s, [2, 0, 1]) == [1, 2, 0])
assert (Solution.arrange(s, [1, 2, 7, 0, 9, 3, 6, 8, 5, 4, 11, 10]) == [2, 7, 8, 1, 4, 0, 6, 5, 3, 9, 10, 11])
if __name__ == "__main__":
test_arrange()
| UTF-8 | Python | false | false | 1,568 | py | 61 | arrange.py | 56 | 0.5 | 0.457908 | 0 | 60 | 25.133333 | 114 |
jiachen247/CodeIt2018-CreditSussie | 7,550,552,549,111 | a4e0b59a7b55a6cde6e9cefc455d070a64636217 | 7ae994ceed2d419c8054e723158b7cb11e129a69 | /codeitsuisse/routes/broadcasterpt2.py | 98a28474ddd8cd3300506d8843399dc2f90441a9 | []
| no_license | https://github.com/jiachen247/CodeIt2018-CreditSussie | fdc85e3ed8e6936904f64353fa941bae28fc9ea5 | c47344bb9215dfe490f74b48baa0edcaf0f22779 | refs/heads/master | 2021-09-23T12:56:30.776568 | 2018-09-23T01:35:57 | 2018-09-23T01:35:57 | 149,738,675 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
from flask import request, jsonify;
from codeitsuisse import app;
logger = logging.getLogger(__name__)
@app.route('/broadcaster/most-connected-node', methods=['POST','GET'])
def evaluate_most_connected_node():
data = request.get_json();
logging.info("data sent for evaluation {}".format(data))
node_list = data.get("data");
print("Input Data:", node_list)
broadcast = {}
for x in node_list:
(a, b) = x.split('->')
if a in broadcast:
broadcast[a][0] += 1
else:
broadcast[a] = [1,b]
if b not in broadcast:
broadcast[b] = [0]
def get_sum(letter):
if broadcast[letter][-1] == 'Updated':
return letter[0]
else:
for x in range(1, len(broadcast[letter]) - 1):
broadcast[letter][0] += get_sum(x)
broadcast[letter].append('Updated')
return broadcast[letter][0]
largest_val = 0
letter = []
for x in broadcast:
val = get_sum(x)
if val >= largest_val:
largest_val = val
letter.append([largest_val, x])
letter.sort(reverse=True)
best_letter = []
for x in letter:
if not x[0] == letter[0][0]:
break
else:
best_letter.append(x[1])
result = {"result": sorted(best_letter)[0]}
print("Output Data:", result)
logging.info("My result :{}".format(result))
return jsonify(result); | UTF-8 | Python | false | false | 1,470 | py | 26 | broadcasterpt2.py | 23 | 0.552381 | 0.541497 | 0 | 46 | 30.978261 | 70 |
lithium/django-ingress-discoverer | 10,015,863,779,738 | fc709c29aa1084ee571d13cba29350fdfb12088a | 6930f4f0c79f3d938298447643d955cb7835df84 | /discoverer/management/commands/import_from_portalinfo.py | e99885c4d197eb24e5d5950ac510fd4641aeb74b | []
| no_license | https://github.com/lithium/django-ingress-discoverer | 231e8b5bd5d0586b47518144ced4aae7d555e972 | 5c7985e8dd110ddf8f85e6eb70b1bedac8e05f25 | refs/heads/master | 2022-01-15T20:05:47.326040 | 2017-06-25T05:15:03 | 2017-06-25T05:15:03 | 212,792,475 | 0 | 0 | null | false | 2022-01-06T22:37:56 | 2019-10-04T10:45:01 | 2019-10-04T10:45:53 | 2022-01-06T22:37:56 | 105 | 0 | 0 | 2 | Python | false | false | import pprint
from django.core.management import BaseCommand
from pymongo.errors import BulkWriteError
from discoverer.models import PortalInfo
from discoverer.portalindex.helpers import MongoPortalIndex
class Command(BaseCommand):
def handle(self, *args, **options):
for pi in PortalInfo.objects.all():
MongoPortalIndex.update_portal(
latE6=int(float(pi.lat)*1e6),
lngE6=int(float(pi.lng)*1e6),
name=pi.name,
guid=None,
timestamp=pi.created_at,
created_by=pi.created_by,
region=None)
try:
results = MongoPortalIndex.bulk_op_execute()
except BulkWriteError as e:
print e.details
raise e
pprint.pprint(results)
| UTF-8 | Python | false | false | 811 | py | 33 | import_from_portalinfo.py | 23 | 0.606658 | 0.59926 | 0 | 27 | 29.037037 | 59 |
davelidev/LeetCode | 10,307,921,522,582 | 32a2e6b1aa1edf59fc6547b39e8898f183625fed | cd47aaf8de3b09706988ffb4f4ad9a5ae8e9ff5c | /201 Bitwise AND of Numbers Range.py | 2b4e87c4cad05055f74f71987ad6788627253abb | []
| no_license | https://github.com/davelidev/LeetCode | 023d6d0c987218480022164afc4f6cb4232093d2 | c89d2b92c2e0881a2809caa753351ab4ef5caa18 | refs/heads/master | 2020-03-23T07:02:28.609282 | 2018-11-03T02:56:33 | 2018-11-03T02:56:33 | 141,244,429 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Answer(object):
'''201. Bitwise AND of Numbers Range'''
def rangeBitwiseAnd(m, n):
res = ~0
while ((m & res) != (n & res)):
res = res << 1
return res & m | UTF-8 | Python | false | false | 199 | py | 346 | 201 Bitwise AND of Numbers Range.py | 346 | 0.492462 | 0.467337 | 0 | 7 | 27.571429 | 39 |
mayckxavier/ci-crud-generator | 13,709,535,638,814 | bcf807b87d3aa69bc9abf116862201140e270b3b | e68a30ada8d1956f1f4455eb2c453cb3622b4183 | /crud_generator.py | 1f57ed5e60ed7799bb32d377d8fe90acf39d1a75 | []
| no_license | https://github.com/mayckxavier/ci-crud-generator | adfb596a260b9d29e925cf907d57cf449a0b2b74 | ead04c129192ba887104fffb0b4bcae987dfd655 | refs/heads/master | 2021-01-20T02:28:47.447388 | 2016-07-21T12:01:51 | 2016-07-21T12:01:51 | 63,865,671 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
from shutil import copyfile, move
controllersPath = 'application/controllers/'
modelsPath = 'application/models/'
viewsPath = 'application/views/'
crud_generator_files = 'crud_generator_files/'
def create_controller(param):
# Verificando se controller já existe
if os.path.isfile(controllersPath + param + 's.php'):
print 'Controller Existe'
else:
result_file_name = param.lower() + 's.php'
result_file = crud_generator_files + result_file_name
default_controller_file = crud_generator_files + 'controller_file.php'
controller_name = param + 's'
model_name = param + '_model'
cap_param_name = param.capitalize()
print 'Controller Não existe. Criando cópia de arquivo'
copyfile(crud_generator_files + 'controller_file.php', result_file)
f_read = open(default_controller_file, "r")
f_write = open(result_file, "w+")
text = f_read.read()
text = text.replace("{{class_name}}", controller_name.capitalize())
text = text.replace("{{controller_name}}", controller_name)
text = text.replace("{{model_name}}", model_name)
text = text.replace("{{cap_param_name}}", cap_param_name)
text = text.replace("{{param_name}}", param)
f_write.write(text)
f_write.close()
move(result_file,controllersPath + result_file_name.capitalize())
pass
def create_model(param):
# Verificando se model já existe
if os.path.isfile(modelsPath + param + '_model.php'):
print 'Model já existe'
else:
result_file_name = param.lower() + '_model.php'
result_file = crud_generator_files + result_file_name
default_model_file = crud_generator_files + 'model_file.php'
model_name = param + '_model'
print 'Model Não existe. Criando cópia de arquivo'
copyfile(crud_generator_files + 'model_file.php', result_file.capitalize())
f_read = open(default_model_file, "r")
f_write = open(result_file, "w+")
text = f_read.read()
text = text.replace("{{class_name}}", model_name.capitalize())
text = text.replace("{{model_name}}", model_name)
text = text.replace("{{table_name}}", param + 's')
text = text.replace("{{param_name}}", param)
f_write.write(text)
f_write.close()
move(result_file,modelsPath + result_file_name.capitalize())
pass
def create_views(param):
view_dir_path = viewsPath + param + 's'
if not os.path.exists(view_dir_path):
os.makedirs(view_dir_path)
open(viewsPath + param + 's' + '/create.php','a').close()
open(viewsPath + param + 's' + '/edit.php','a').close()
open(viewsPath + param + 's' + '/list.php','a').close()
pass
metodo = sys.argv[1]
param = sys.argv[2]
if metodo == 'create_controller':
create_controller(param)
pass
if metodo == 'create_model':
create_model(param)
pass
if metodo == 'create_views':
create_views(param)
if metodo == 'create':
create_controller(param)
create_model(param)
create_views(param)
| UTF-8 | Python | false | false | 3,199 | py | 6 | crud_generator.py | 5 | 0.616228 | 0.615288 | 0 | 108 | 28.555556 | 83 |
riavats05/4June2018 | 10,703,058,506,289 | 73b3e66ca12c62b92c25674e16ebad28bcaaa31e | e8d9a486877c0fafc313d00c76ad014cbb04e1ae | /database_prgm.py | 70d759a34facf6169891a70ab2f81e75598f002d | []
| no_license | https://github.com/riavats05/4June2018 | 69b11404a6921ba079e9c40f38c7e890aecea075 | 49a4c3b307cf24d2409b1d6499058bd24c849a57 | refs/heads/master | 2020-03-20T21:38:26.367934 | 2018-06-28T07:09:54 | 2018-06-28T07:09:54 | 137,744,569 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python2
import mysql.connector as mysql
connection=mysql.connect(user='root',password='riavats',host='localhost',database='adhoc1')
cur=connection.cursor()
cur.execute("select name,email from student")
output=cur.fetchall()
print output
print "1. Insert"
print "2. Update"
print "3. Exit"
value=raw_input("Enter choice")
if value=='1':
cur.execute("Insert into student (name,email) values ('N2','n2@gmail.com')")
print("One row inserted")
connection.commit()
elif value=='2':
cur.execute("Update student set name='Ria' where sr=1")
print("Row Updated")
connection.commit()
else:
exit()
| UTF-8 | Python | false | false | 605 | py | 12 | database_prgm.py | 11 | 0.727273 | 0.710744 | 0 | 21 | 27.761905 | 91 |
skyportal/skyportal | 13,597,866,459,160 | 042ce6c71104164ef362cfba22c181dc2bfac193 | a18d2b12d0e6afa1e3f5b6e0a6c84d671cdd5e34 | /skyportal/tests/api/test_catalog_services.py | ac6990058b46673e3e27b2f905fae90b717af7fa | [
"BSD-3-Clause",
"MIT"
]
| permissive | https://github.com/skyportal/skyportal | a55b59a50b7d9d8437a2c6a978cb7713bb67aee2 | 161d3532ba3ba059446addcdac58ca96f39e9636 | refs/heads/main | 2023-09-01T03:39:33.676748 | 2023-08-28T16:29:02 | 2023-08-28T16:29:02 | 82,240,075 | 80 | 100 | NOASSERTION | false | 2023-09-14T21:08:34 | 2017-02-17T00:34:43 | 2023-08-29T03:00:42 | 2023-09-14T21:08:32 | 60,967 | 78 | 81 | 106 | Python | false | false | import numpy as np
import pytest
import time
import uuid
from skyportal.tests import api
@pytest.mark.flaky(reruns=2)
def test_swift_lsxps(super_admin_token):
name = str(uuid.uuid4())
status, data = api(
'POST',
'telescope',
data={
'name': name,
'nickname': name,
'lat': 0.0,
'lon': 0.0,
'elevation': 0.0,
'diameter': 10.0,
},
token=super_admin_token,
)
assert status == 200
assert data['status'] == 'success'
telescope_id = data['data']['id']
instrument_name = str(uuid.uuid4())
status, data = api(
'POST',
'instrument',
data={
'name': instrument_name,
'type': 'imager',
'band': 'NIR',
'filters': ['swiftxrt'],
'telescope_id': telescope_id,
},
token=super_admin_token,
)
assert status == 200
assert data['status'] == 'success'
status, data = api(
"POST",
"catalogs/swift_lsxps",
data={'telescope_name': name},
token=super_admin_token,
)
assert status == 200
NRETRIES = 10
retries = 0
sources_loaded = False
obj_id = "Swift-J023017.0+283603"
while not sources_loaded and retries < NRETRIES:
status, data = api(
'GET',
f'sources/{obj_id}',
token=super_admin_token,
)
if not status == 200:
retries = retries + 1
time.sleep(10)
else:
sources_loaded = True
assert np.isclose(data['data']['ra'], 37.5712185545)
assert np.isclose(data['data']['dec'], 28.6012172159)
| UTF-8 | Python | false | false | 1,701 | py | 984 | test_catalog_services.py | 871 | 0.5097 | 0.470312 | 0 | 72 | 22.625 | 57 |
hhelibeb/leetcode | 14,611,478,753,364 | a179b5007a3fa18b8b84bd3512cdabb8f34681fa | 9e1993270cb9cf714fc102647861557d0e859b62 | /1276. Number of Burgers with No Waste of Ingredients.py | b99d7ede00035eb800e0ca1fd02523ed32dad324 | []
| no_license | https://github.com/hhelibeb/leetcode | 06adb57bad9e2ef3e0140c7e364788b534441370 | dc02ea889ec3b53415dc1c5f9e7444c2b46fcd06 | refs/heads/master | 2021-01-20T18:28:05.125275 | 2020-04-30T14:13:36 | 2020-04-30T14:13:36 | 59,983,962 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import List
class Solution:
def numOfBurgers(self, tomatoSlices: int, cheeseSlices: int) -> List[int]:
if tomatoSlices % 2 != 0:
return
if tomatoSlices > cheeseSlices * 4 or tomatoSlices < cheeseSlices * 2:
return
jumbo = 0
smallRight = tomatoSlices / 2
smallLeft = 0
cheeseTemp = jumbo + smallRight
if cheeseTemp == cheeseSlices:
return ([int(jumbo), int(smallRight)])
while True:
smallMid = smallRight - int((smallRight-smallLeft )/4) * 2
jumbo = int((tomatoSlices - smallMid*2)/4)
cheeseTemp = jumbo + smallMid
if cheeseTemp == cheeseSlices and ( 4 * jumbo + 2 * smallMid ) == tomatoSlices:
return([int(jumbo), int(smallMid)])
if smallRight < 0:
return
if cheeseTemp > cheeseSlices:
smallRight = smallMid - 1
if cheeseTemp < cheeseSlices:
smallLeft = smallMid + 1
if cheeseTemp == cheeseSlices:
smallRight = smallMid - 1
S = Solution()
print( S.numOfBurgers(4,16))
print(28087*4+841652*2)
| UTF-8 | Python | false | false | 1,220 | py | 20 | 1276. Number of Burgers with No Waste of Ingredients.py | 19 | 0.544262 | 0.517213 | 0 | 32 | 36.125 | 91 |
bopopescu/geosci | 10,522,669,888,669 | 0508e1f61a2cb36b87d5114c6f917cff419c9352 | bc01e1d158e7d8f28451a7e108afb8ec4cb7d5d4 | /sage/src/sage/sets/finite_set_maps.py | 763e503ff1e977d04eb67a76930fbdbb6e0fce1d | []
| no_license | https://github.com/bopopescu/geosci | 28792bda1ec1f06e23ba8dcb313769b98f793dad | 0d9eacbf74e2acffefde93e39f8bcbec745cdaba | refs/heads/master | 2021-09-22T17:47:20.194233 | 2018-09-12T22:19:36 | 2018-09-12T22:19:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | r"""
Maps between finite sets
This module implements parents modeling the set of all maps between
two finite sets. At the user level, any such parent should be
constructed using the factory class :class:`FiniteSetMaps` which
properly selects which of its subclasses to use.
AUTHORS:
- Florent Hivert
"""
#*****************************************************************************
# Copyright (C) 2010 Florent Hivert <Florent.Hivert@univ-rouen.fr>,
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
import itertools
from sage.structure.parent import Parent
from sage.rings.integer import Integer
from sage.structure.unique_representation import UniqueRepresentation
from sage.categories.sets_cat import Sets, EmptySetError
from sage.categories.monoids import Monoids
from sage.categories.enumerated_sets import EnumeratedSets
from sage.sets.finite_enumerated_set import FiniteEnumeratedSet
from sage.sets.integer_range import IntegerRange
from sage.sets.finite_set_map_cy import (
FiniteSetMap_MN, FiniteSetMap_Set,
FiniteSetEndoMap_N, FiniteSetEndoMap_Set )
from sage.misc.cachefunc import cached_method
# TODO: finite set maps should be morphisms in the category of finite sets
class FiniteSetMaps(UniqueRepresentation, Parent):
"""
Maps between finite sets
Constructs the set of all maps between two sets. The sets can be
given using any of the three following ways:
1. an object in the category ``Sets()``.
2. a finite iterable. In this case, an object of the class
:class:`~sage.sets.finite_enumerated_set.FiniteEnumeratedSet`
is constructed from the iterable.
3. an integer ``n`` designing the set `\{1, 2, \dots, n\}`. In this case
an object of the class :class:`~sage.sets.integer_range.IntegerRange`
is constructed.
INPUT:
- ``domain`` -- a set, finite iterable, or integer.
- ``codomain`` -- a set, finite iterable, integer, or ``None``
(default). In this last case, the maps are endo-maps of the domain.
- ``action`` -- ``"left"`` (default) or ``"right"``. The side
where the maps act on the domain. This is used in particular to
define the meaning of the product (composition) of two maps.
- ``category`` -- the category in which the sets of maps is
constructed. By default, this is ``FiniteMonoids()`` if the domain and
codomain coincide, and ``FiniteEnumeratedSets()`` otherwise.
OUTPUT:
an instance of a subclass of :class:`FiniteSetMaps` modeling
the set of all maps between ``domain`` and ``codomain``.
EXAMPLES:
We construct the set ``M`` of all maps from `\{a,b\}` to `\{3,4,5\}`::
sage: M = FiniteSetMaps(["a", "b"], [3, 4, 5]); M
Maps from {'a', 'b'} to {3, 4, 5}
sage: M.cardinality()
9
sage: M.domain()
{'a', 'b'}
sage: M.codomain()
{3, 4, 5}
sage: for f in M: print(f)
map: a -> 3, b -> 3
map: a -> 3, b -> 4
map: a -> 3, b -> 5
map: a -> 4, b -> 3
map: a -> 4, b -> 4
map: a -> 4, b -> 5
map: a -> 5, b -> 3
map: a -> 5, b -> 4
map: a -> 5, b -> 5
Elements can be constructed from functions and dictionaries::
sage: M(lambda c: ord(c)-94)
map: a -> 3, b -> 4
sage: M.from_dict({'a':3, 'b':5})
map: a -> 3, b -> 5
If the domain is equal to the codomain, then maps can be
composed::
sage: M = FiniteSetMaps([1, 2, 3])
sage: f = M.from_dict({1:2, 2:1, 3:3}); f
map: 1 -> 2, 2 -> 1, 3 -> 3
sage: g = M.from_dict({1:2, 2:3, 3:1}); g
map: 1 -> 2, 2 -> 3, 3 -> 1
sage: f * g
map: 1 -> 1, 2 -> 3, 3 -> 2
This makes `M` into a monoid::
sage: M.category()
Join of Category of finite monoids and Category of finite enumerated sets
sage: M.one()
map: 1 -> 1, 2 -> 2, 3 -> 3
By default, composition is from right to left, which corresponds
to an action on the left. If one specifies ``action`` to right,
then the composition is from left to right::
sage: M = FiniteSetMaps([1, 2, 3], action = 'right')
sage: f = M.from_dict({1:2, 2:1, 3:3})
sage: g = M.from_dict({1:2, 2:3, 3:1})
sage: f * g
map: 1 -> 3, 2 -> 2, 3 -> 1
If the domains and codomains are both of the form `\{0,\dots\}`,
then one can use the shortcut::
sage: M = FiniteSetMaps(2,3); M
Maps from {0, 1} to {0, 1, 2}
sage: M.cardinality()
9
For a compact notation, the elements are then printed as lists
`[f(i), i=0,\dots]`::
sage: list(M)
[[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]
TESTS::
sage: TestSuite(FiniteSetMaps(0)).run()
sage: TestSuite(FiniteSetMaps(0, 2)).run()
sage: TestSuite(FiniteSetMaps(2, 0)).run()
sage: TestSuite(FiniteSetMaps([], [])).run()
sage: TestSuite(FiniteSetMaps([1, 2], [])).run()
sage: TestSuite(FiniteSetMaps([], [1, 2])).run()
"""
@staticmethod
def __classcall_private__(cls, domain, codomain = None, action = "left", category = None):
"""
TESTS::
sage: FiniteSetMaps(3)
Maps from {0, 1, 2} to itself
sage: FiniteSetMaps(4, 2)
Maps from {0, 1, 2, 3} to {0, 1}
sage: FiniteSetMaps(4, ["a","b","c"])
Maps from {0, 1, 2, 3} to {'a', 'b', 'c'}
sage: FiniteSetMaps([1,2], ["a","b","c"])
Maps from {1, 2} to {'a', 'b', 'c'}
sage: FiniteSetMaps([1,2,4], 3)
Maps from {1, 2, 4} to {0, 1, 2}
"""
if codomain is None:
if isinstance(domain, (int, Integer)):
return FiniteSetEndoMaps_N(domain, action, category)
else:
if domain not in Sets():
domain = FiniteEnumeratedSet(domain)
return FiniteSetEndoMaps_Set(domain, action, category)
if isinstance(domain, (int, Integer)):
if isinstance(codomain, (int, Integer)):
return FiniteSetMaps_MN(domain, codomain, category)
else:
domain = IntegerRange(domain)
if isinstance(codomain, (int, Integer)):
codomain = IntegerRange(codomain)
if domain not in Sets():
domain = FiniteEnumeratedSet(domain)
if codomain not in Sets():
codomain = FiniteEnumeratedSet(codomain)
return FiniteSetMaps_Set(domain, codomain, category)
def cardinality(self):
"""
The cardinality of ``self``
EXAMPLES::
sage: FiniteSetMaps(4, 3).cardinality()
81
"""
return self.codomain().cardinality()**self.domain().cardinality()
class FiniteSetMaps_MN(FiniteSetMaps):
"""
The set of all maps from `\{1, 2, \dots, m\}` to `\{1, 2, \dots, n\}`.
Users should use the factory class :class:`FiniteSetMaps` to
create instances of this class.
INPUT:
- ``m``, ``n`` -- integers
- ``category`` -- the category in which the sets of maps is
constructed. It must be a sub-category of
``EnumeratedSets().Finite()`` which is the default value.
"""
def __init__(self, m, n, category=None):
"""
TESTS::
sage: M = FiniteSetMaps(2,3)
sage: M.category()
Category of finite enumerated sets
sage: M.__class__
<class 'sage.sets.finite_set_maps.FiniteSetMaps_MN_with_category'>
sage: TestSuite(M).run()
"""
Parent.__init__(self,
category=EnumeratedSets().Finite().or_subcategory(category))
self._m = Integer(m)
self._n = Integer(n)
def domain(self):
"""
The domain of ``self``
EXAMPLES::
sage: FiniteSetMaps(3,2).domain()
{0, 1, 2}
"""
return IntegerRange(self._m)
def codomain(self):
"""
The codomain of ``self``
EXAMPLES::
sage: FiniteSetMaps(3,2).codomain()
{0, 1}
"""
return IntegerRange(self._n)
def _repr_(self):
"""
TESTS::
sage: FiniteSetMaps(2,3)
Maps from {0, 1} to {0, 1, 2}
"""
return "Maps from %s to %s"%(self.domain(), self.codomain())
def __contains__(self, x):
"""
EXAMPLES::
sage: M = FiniteSetMaps(3,2)
sage: [0,1,1] in M
True
sage: [1,2,4] in M
False
"""
if isinstance(x, self.element_class):
return x.parent() is self and len(x) == self._m
else:
x = list(x)
if len(x) != self._m:
return False
for i in x:
if not (0 <= i < self._n):
return False
return True
def an_element(self):
"""
Returns a map in ``self``
EXAMPLES::
sage: M = FiniteSetMaps(4, 2)
sage: M.an_element()
[0, 0, 0, 0]
sage: M = FiniteSetMaps(0, 0)
sage: M.an_element()
[]
An exception :class:`~sage.categories.sets_cat.EmptySetError`
is raised if this set is empty, that is if the codomain is
empty and the domain is not.
sage: M = FiniteSetMaps(4, 0)
sage: M.cardinality()
0
sage: M.an_element()
Traceback (most recent call last):
...
EmptySetError
"""
if self._m > 0 and self._n == 0:
raise EmptySetError
return self._from_list_([0]*self._m)
def __iter__(self):
"""
EXAMPLES::
sage: M = FiniteSetMaps(2,2)
sage: M.list()
[[0, 0], [0, 1], [1, 0], [1, 1]]
TESTS::
sage: FiniteSetMaps(0,0).list()
[[]]
sage: FiniteSetMaps(0,1).list()
[[]]
sage: FiniteSetMaps(0,10).list()
[[]]
sage: FiniteSetMaps(1,0).list()
[]
sage: FiniteSetMaps(1,1).list()
[[0]]
"""
for v in itertools.product(range(self._n), repeat=self._m):
yield self._from_list_(v)
def _from_list_(self, v):
"""
EXAMPLES::
sage: M = FiniteSetMaps(4,3)
sage: M._from_list_([2,1,1,0])
[2, 1, 1, 0]
"""
return self.element_class(self, v, check=False)
def _element_constructor_(self, *args, **keywords):
"""
EXAMPLES::
sage: M = FiniteSetMaps(4,3)
sage: M([2,1,1,0])
[2, 1, 1, 0]
"""
return self.element_class(self, *args, **keywords)
Element = FiniteSetMap_MN
class FiniteSetMaps_Set(FiniteSetMaps_MN):
"""
The sets of all maps between two sets
Users should use the factory class :class:`FiniteSetMaps` to
create instances of this class.
INPUT:
- ``domain`` -- an object in the category ``FiniteSets()``.
- ``codomain`` -- an object in the category ``FiniteSets()``.
- ``category`` -- the category in which the sets of maps is
constructed. It must be a sub-category of
``EnumeratedSets().Finite()`` which is the default value.
"""
def __init__(self, domain, codomain, category=None):
"""
EXAMPLES::
sage: M = FiniteSetMaps(["a", "b"], [3, 4, 5])
sage: M
Maps from {'a', 'b'} to {3, 4, 5}
sage: M.cardinality()
9
sage: for f in M: print(f)
map: a -> 3, b -> 3
map: a -> 3, b -> 4
map: a -> 3, b -> 5
map: a -> 4, b -> 3
map: a -> 4, b -> 4
map: a -> 4, b -> 5
map: a -> 5, b -> 3
map: a -> 5, b -> 4
map: a -> 5, b -> 5
TESTS::
sage: M.__class__
<class 'sage.sets.finite_set_maps.FiniteSetMaps_Set_with_category'>
sage: M.category()
Category of finite enumerated sets
sage: TestSuite(M).run()
"""
FiniteSetMaps_MN.__init__(self, domain.cardinality(), codomain.cardinality(),
category=category)
self._domain = domain
self._codomain = codomain
import sage.combinat.ranker as ranker
ldomain = domain.list()
lcodomain = codomain.list()
self._unrank_domain = ranker.unrank_from_list(ldomain)
self._rank_domain = ranker.rank_from_list(ldomain)
self._unrank_codomain = ranker.unrank_from_list(lcodomain)
self._rank_codomain = ranker.rank_from_list(lcodomain)
def domain(self):
"""
The domain of ``self``
EXAMPLES::
sage: FiniteSetMaps(["a", "b"], [3, 4, 5]).domain()
{'a', 'b'}
"""
return self._domain
def codomain(self):
"""
The codomain of ``self``
EXAMPLES::
sage: FiniteSetMaps(["a", "b"], [3, 4, 5]).codomain()
{3, 4, 5}
"""
return self._codomain
# TODO: consistency from_dict / from_list
def _from_list_(self, v):
"""
Create a function from a list
The list gives in the order of the element of the domain the
rank (index) of its image in the codomain.
EXAMPLES::
sage: M = FiniteSetMaps(["a", "b"], [3, 4, 5])
sage: M._from_list_([2,1])
map: a -> 5, b -> 4
"""
return self.element_class.from_list(self, v)
def from_dict(self, d):
"""
Create a map from a dictionary
EXAMPLES::
sage: M = FiniteSetMaps(["a", "b"], [3, 4, 5])
sage: M.from_dict({"a": 4, "b": 3})
map: a -> 4, b -> 3
"""
return self.element_class.from_dict(self, d)
Element = FiniteSetMap_Set
class FiniteSetEndoMaps_N(FiniteSetMaps_MN):
"""
The sets of all maps from `\{1, 2, \dots, n\}` to itself
Users should use the factory class :class:`FiniteSetMaps` to
create instances of this class.
INPUT:
- ``n`` -- an integer.
- ``category`` -- the category in which the sets of maps is
constructed. It must be a sub-category of ``Monoids().Finite()``
and ``EnumeratedSets().Finite()`` which is the default value.
"""
def __init__(self, n, action, category=None):
"""
EXAMPLES::
sage: M = FiniteSetMaps(3)
sage: M.category()
Join of Category of finite monoids and Category of finite enumerated sets
sage: M.__class__
<class 'sage.sets.finite_set_maps.FiniteSetEndoMaps_N_with_category'>
sage: TestSuite(M).run()
"""
category = (EnumeratedSets() & Monoids().Finite()).or_subcategory(category)
FiniteSetMaps_MN.__init__(self, n, n, category=category)
self._action = action
@cached_method
def one(self):
"""
EXAMPLES::
sage: M = FiniteSetMaps(4)
sage: M.one()
[0, 1, 2, 3]
"""
return self._from_list_(range(self._n))
def an_element(self):
"""
Returns a map in ``self``
EXAMPLES::
sage: M = FiniteSetMaps(4)
sage: M.an_element()
[3, 2, 1, 0]
"""
return self._from_list_(range(self._n-1, -1, -1))
def _repr_(self):
"""
TESTS::
sage: FiniteSetMaps(2)
Maps from {0, 1} to itself
"""
return "Maps from %s to itself"%(self.domain())
Element = FiniteSetEndoMap_N
class FiniteSetEndoMaps_Set(FiniteSetMaps_Set, FiniteSetEndoMaps_N):
"""
The sets of all maps from a set to itself
Users should use the factory class :class:`FiniteSetMaps` to
create instances of this class.
INPUT:
- ``domain`` -- an object in the category ``FiniteSets()``.
- ``category`` -- the category in which the sets of maps is
constructed. It must be a sub-category of ``Monoids().Finite()``
and ``EnumeratedSets().Finite()`` which is the default value.
"""
def __init__(self, domain, action, category=None):
"""
TESTS::
sage: M = FiniteSetMaps(["a", "b", "c"])
sage: M.category()
Join of Category of finite monoids and Category of finite enumerated sets
sage: M.__class__
<class 'sage.sets.finite_set_maps.FiniteSetEndoMaps_Set_with_category'>
sage: TestSuite(M).run()
"""
category = (EnumeratedSets() & Monoids().Finite()).or_subcategory(category)
FiniteSetMaps_MN.__init__(self, domain.cardinality(), domain.cardinality(),
category=category)
self._domain = domain
self._codomain = domain
import sage.combinat.ranker as ranker
ldomain = domain.list()
self._unrank_domain = ranker.unrank_from_list(ldomain)
self._rank_domain = ranker.rank_from_list(ldomain)
self._unrank_codomain = self._unrank_domain
self._rank_codomain = self._rank_domain
self._action = action
Element = FiniteSetEndoMap_Set
| UTF-8 | Python | false | false | 17,658 | py | 1,686 | finite_set_maps.py | 1,083 | 0.52707 | 0.508155 | 0 | 586 | 29.131399 | 94 |
truckli/practical-calcs | 10,617,159,201,277 | 276a2d14fbc69de02af7e523649d1f1f9c0fa822 | abd2510393fd980cea62d5716f20f7510547020f | /baseDialog.pyw | e254d0ceb36927c9e2048d94c7ea713e57309bf7 | []
| no_license | https://github.com/truckli/practical-calcs | d25d714edab082122a2e8006dbf2a7ffcf70165a | 94d554c65704910d0c5e90dfac6872c93bc53424 | refs/heads/master | 2016-03-27T16:34:49.901955 | 2013-09-16T08:15:46 | 2013-09-16T08:15:46 | 35,704,381 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# Copyright (c) 2007-8 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self._setupUi()
def _clearWidgets(self):
self._autoUpdateRate()
self.updateUi()
def _setupUi(self):
self.calendar = QCalendarWidget()
grid = QGridLayout()
grid.addWidget(self.calendar, 0, 0)
self.setLayout(grid)
return
def updateUi(self):
return
def _advancedSetting(self):
self._autoUpdateRate()
self.updateUi()
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| UTF-8 | Python | false | false | 1,234 | pyw | 4 | baseDialog.pyw | 3 | 0.719611 | 0.710697 | 0 | 46 | 25.76087 | 74 |
DimitriMisiak/foobar-challenge-withgoogle | 9,474,697,905,476 | dff1b70018e79bcaf884e3e3b3b688697bc5cc6e | fdc6be235a166db4ac152fb55510036c2a9cf317 | /bomb_baby.py | 5be715e5070470b13a4af61aac7df115bb9f1c07 | []
| no_license | https://github.com/DimitriMisiak/foobar-challenge-withgoogle | ca4afec3237c0817ff7ea8d106dbd909fc33f62b | ee9d6a229a87e350c41cc8d5251bf7f88a4bee08 | refs/heads/main | 2023-06-14T15:25:41.920027 | 2021-07-14T22:56:12 | 2021-07-14T22:56:12 | 373,237,719 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed May 19 14:56:15 2021
@author: dimit
"""
def solution(M, F):
M = int(M)
F = int(F)
n_gen = 0
while True:
n_gen += max(M,F) // min(M,F)
remainder = max(M,F) % min(M,F)
F = min(M,F)
M = remainder
print(M, F)
if F == 1:
return str(n_gen-1)
elif M == 0:
return "impossible"
print(('1', '2'))
print(solution('1', '2'))
print(('4', '7'))
print(solution('4', '7'))
print(('4', '2'))
print(solution('4', '2')) | UTF-8 | Python | false | false | 561 | py | 8 | bomb_baby.py | 8 | 0.440285 | 0.388592 | 0 | 31 | 17.129032 | 39 |
rashid0531/DistributedCNN | 8,967,891,729,375 | 0696bdcace348f2733176c6a6b955ecc8eabe88a | c38596522e2b39a4742149c5f65ed9b5886e0c6b | /distributed_train.py | 4f988097559b20c13f0491ceeb3725b0cc2020e6 | []
| no_license | https://github.com/rashid0531/DistributedCNN | d06771736f2e7e42439667bd8f2bdb069ba83088 | daaf547a48ee0c7ceb970f33d2bb3bd269e61431 | refs/heads/master | 2022-05-20T18:54:45.364748 | 2019-07-04T20:57:37 | 2019-07-04T20:57:37 | 138,433,398 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import argparse
import tensorflow as tf
import os
from PIL import Image, ImageFile
import numpy as np
import prepare
from matplotlib import pyplot as plt
from datetime import datetime
from tensorflow.python.client import timeline
import modified_MCNN as model
import os
import re
import time
from datetime import datetime
import subprocess
import psutil
tf.logging.set_verbosity(tf.logging.INFO)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# This function kills all the child processes associated with the parent process sent as function argument.
def kill(proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
def core_model(input_image):
mcnn_model = model.MCNN(input_image)
predicted_density_map = mcnn_model.final_layer_output
return predicted_density_map
def do_training(args):
ps_hosts = args["ps_hosts"].split(",")
worker_hosts = args["worker_hosts"].split(",")
print("Number of Parameter Servers: ",len(ps_hosts), " Number of workers: ", len(worker_hosts))
# Create a cluster from the parameter server and worker hosts.
cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
# Create and start a server for the local task.
server = tf.train.Server(cluster,
job_name=args["job_name"],
task_index=int(args["task_index"]))
if args["job_name"] == "ps":
server.join()
elif args["job_name"] == "worker":
# Assigns ops to the local worker by default.
with tf.device(tf.train.replica_device_setter(worker_device="/job:worker/task:%d" % int(args["task_index"]), ps_device="/job:ps/cpu:0", cluster=cluster)):
train_set_image, train_set_gt, test_set_image, test_set_gt = prepare.get_train_test_DataSet(args["image_path"], args["gt_path"], args["dataset_train_test_ratio"])
TRAINSET_LENGTH = len(train_set_image)
print("Trainset Length : ", len(train_set_image) , len(train_set_gt))
images_input_train = tf.constant(train_set_image)
images_gt_train = tf.constant(train_set_gt)
dataset_train = tf.data.Dataset.from_tensor_slices((images_input_train, images_gt_train))
# At time of this writing Tensorflow doesn't support a mixture of user defined python function with tensorflow operations.
# So we can't use one py_func to process data using tenosrflow operation and nontensorflow operation.
# Train Set
Batched_dataset_train = dataset_train.map(
lambda img, gt: tf.py_func(prepare.read_npy_file, [img, gt], [img.dtype, tf.float32]))
Batched_dataset_train = Batched_dataset_train.shard(len(worker_hosts),int(args["task_index"]))
Batched_dataset_train = Batched_dataset_train \
.shuffle(buffer_size=500) \
.map(prepare._parse_function,num_parallel_calls= args["num_parallel_threads"]) \
.apply(tf.contrib.data.batch_and_drop_remainder(args["batch_size_per_GPU"])) \
.prefetch(buffer_size = args["prefetch_buffer"])\
.repeat()
iterator = Batched_dataset_train.make_one_shot_iterator()
mini_batch = iterator.get_next()
image_names = mini_batch[0]
# If the number of GPUs is set to 1, then no splitting will be done
split_batches_imgs = tf.split(mini_batch[1], int(args["num_gpus"]))
split_batches_gt = tf.split(mini_batch[2], int(args["num_gpus"]))
predicted_density_map = core_model(split_batches_imgs[0])
cost = tf.reduce_mean(
tf.sqrt(
tf.reduce_sum(
tf.square(
tf.subtract(split_batches_gt[0], predicted_density_map)), axis=[1, 2, 3], keepdims=True)))
sum_of_gt = tf.reduce_sum(split_batches_gt[0], axis=[1, 2, 3], keepdims=True)
sum_of_predicted_density_map = tf.reduce_sum(predicted_density_map, axis=[1, 2, 3], keepdims=True)
mse = tf.sqrt(tf.reduce_mean(tf.square(sum_of_gt - sum_of_predicted_density_map)))
# Changed the mean abosolute error.
mae = tf.reduce_mean(
tf.reduce_sum(tf.abs(tf.subtract(sum_of_gt, sum_of_predicted_density_map)), axis=[1, 2, 3], keepdims=True),name="mae")
# Adding summary to the graph.
# added a small threshold value with mae to prevent NaN to be stored in summary histogram.
#tf.summary.scalar("Mean Squared Error", mse)
tf.summary.scalar("Mean_Absolute_Error", mae)
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
summary_op = tf.summary.merge(summaries)
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.AdamOptimizer(learning_rate=(args["learning_rate"]))
#train_op = optimizer.minimize(cost, global_step=global_step)
# Synchronous training.
opt = tf.train.SyncReplicasOptimizer(optimizer, replicas_to_aggregate=len(worker_hosts),
total_num_replicas=len(worker_hosts))
# Some models have startup_delays to help stabilize the model but when using
# sync_replicas training, set it to 0.
# Now you can call `minimize()` or `compute_gradients()` and
# `apply_gradients()` normally
# !!!! Doubtful
training_op = opt.minimize(cost, global_step=global_step)
config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
# The StopAtStepHook handles stopping after running given steps.
#SHARDED_TRAINSET_LENGTH = int(TRAINSET_LENGTH/len(worker_hosts))
effective_batch_size = int(args["batch_size_per_GPU"])*len(worker_hosts)
end_point = int((TRAINSET_LENGTH * int(args["number_of_epoch"])) / effective_batch_size)
print("End Point : ",end_point)
is_chief=(int(args["task_index"]) == 0)
# You can create the hook which handles initialization and queues.
sync_replicas_hook = opt.make_session_run_hook(is_chief)
# Creating profiler hook.
profile_hook = tf.train.ProfilerHook(save_steps=1000, output_dir='/home/rashid/DistributedCNN/Model/timeline/')
# Simple Example of logging hooks
"""
tensors_to_log = {"MAE ": "mae"}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=5)
"""
# last_step should be equal to the end_point
hooks=[sync_replicas_hook, tf.train.StopAtStepHook(last_step=end_point),profile_hook]
# The MonitoredTrainingSession takes care of session initialization,
# restoring from a checkpoint, saving to a checkpoint, and closing when done
# or an error occurs.
arr_examples_per_sec = []
with tf.train.MonitoredTrainingSession(master=server.target,
is_chief=(int(args["task_index"]) == 0),
checkpoint_dir=args["checkpoint_path"],
hooks=hooks, config=config) as mon_sess:
while not mon_sess.should_stop():
# Run a training step asynchronously.
# See <a href="../api_docs/python/tf/train/SyncReplicasOptimizer"><code>tf.train.SyncReplicasOptimizer</code></a> for additional details on how to
# perform *synchronous* training.
# mon_sess.run handles AbortedError in case of preempted PS.
start_time = time.time()
_, loss_value, step = mon_sess.run((training_op, mae,global_step))
duration = time.time() - start_time
examples_per_sec = (args["batch_size_per_GPU"] * len(worker_hosts)) / duration
arr_examples_per_sec.append(examples_per_sec)
format_str = ('%s: step %d, loss = %.2f , examples/sec = %.1f ')
print(format_str % (datetime.now(), step, loss_value,
examples_per_sec))
#mon_sess.run(training_op)
with open("exm_per_sec.txt","w") as file_object:
for i in range(0,len(arr_examples_per_sec)):
file_object.write(str(arr_examples_per_sec[i])+"\n")
print("--- Experiment Finished ---")
if __name__ == "__main__":
# The following default values will be used if not provided from the command line arguments.
DEFAULT_NUMBER_OF_GPUS = 1
DEFAULT_EPOCH = 3000
DEFAULT_NUMBER_OF_WORKERS = 1
DEFAULT_NUMBER_OF_PS = 1
DEFAULT_BATCHSIZE_PER_GPU = 8
DEFAULT_BATCHSIZE = DEFAULT_BATCHSIZE_PER_GPU * DEFAULT_NUMBER_OF_GPUS * DEFAULT_NUMBER_OF_WORKERS
DEFAULT_PARALLEL_THREADS = 8
#DEFAULT_PREFETCH_BUFFER_SIZE = DEFAULT_BATCHSIZE * DEFAULT_NUMBER_OF_GPUS * 2
DEFAULT_PREFETCH_BUFFER_SIZE = 128
DEFAULT_IMAGE_PATH = "/home/mrc689/Sampled_Dataset"
DEFAULT_GT_PATH = "/home/mrc689/Sampled_Dataset_GT/density_map"
DEFAULT_LOG_PATH = "/home/mrc689/tf_logs"
DEFAULT_RATIO_TRAINTEST_DATASET = 0.7
DEFAULT_LEARNING_RATE = 0.00001
DEFAULT_CHECKPOINT_PATH = "/home/mrc689/tf_ckpt"
DEFAULT_LOG_FREQUENCY = 10
#DEFAULT_MAXSTEPS = (DEFAULT_TRAINSET_LENGTH * DEFAULT_EPOCH) / DEFAULT_BATCHSIZE
# Create arguements to parse
ap = argparse.ArgumentParser(description="Script to train the FlowerCounter model using multiGPUs in single node.")
ap.add_argument("-g", "--num_gpus", required=False, help="How many GPUs to use.",default = DEFAULT_NUMBER_OF_GPUS)
ap.add_argument("-e", "--number_of_epoch", required=False, help="Number of epochs",default = DEFAULT_EPOCH)
ap.add_argument("-b", "--batch_size", required=False, help="Number of images to process in a minibatch",default = DEFAULT_BATCHSIZE)
ap.add_argument("-gb", "--batch_size_per_GPU", required=False, help="Number of images to process in a batch per GPU",default = DEFAULT_BATCHSIZE_PER_GPU)
ap.add_argument("-i", "--image_path", required=False, help="Input path of the images",default = DEFAULT_IMAGE_PATH)
ap.add_argument("-gt", "--gt_path", required=False, help="Ground truth path of input images",default = DEFAULT_GT_PATH)
ap.add_argument("-num_threads", "--num_parallel_threads", required=False, help="Number of threads to use in parallel for preprocessing elements in input pipeline", default = DEFAULT_PARALLEL_THREADS)
ap.add_argument("-l", "--log_path", required=False, help="Path to save the tensorflow log files",default=DEFAULT_LOG_PATH)
ap.add_argument("-r", "--dataset_train_test_ratio", required=False, help="Dataset ratio for train and test set .",default = DEFAULT_RATIO_TRAINTEST_DATASET)
ap.add_argument("-pbuff","--prefetch_buffer",required=False,help="An internal buffer to prefetch elements from the input dataset ahead of the time they are requested",default=DEFAULT_PREFETCH_BUFFER_SIZE)
ap.add_argument("-lr", "--learning_rate", required=False, help="Default learning rate.",default = DEFAULT_LEARNING_RATE)
ap.add_argument("-ckpt_path", "--checkpoint_path", required=False, help="Path to save the Tensorflow model as checkpoint file.",default = DEFAULT_CHECKPOINT_PATH)
# Arguments needed for Distributed Training.
ap.add_argument("-pshosts", "--ps_hosts", required=False, help="Comma-separated list of hostname:port pairs.")
ap.add_argument("-wkhosts", "--worker_hosts", required=False, help="Comma-separated list of hostname:port pairs.")
ap.add_argument("-job", "--job_name", required=False, help="One of 'ps', 'worker'.")
ap.add_argument("-tsk_index", "--task_index", required=False, help="Index of task within the job.")
ap.add_argument("-lg_freq", "--log_frequency", required=False, help="Log frequency.",default = DEFAULT_LOG_FREQUENCY)
args = vars(ap.parse_args())
start_time = time.time()
tf.reset_default_graph()
# This process initiates the GPU profiling script.
proc = subprocess.Popen(['./gpu_profile'])
print("start GPU profiling process with pid %s" % proc.pid)
do_training(args)
duration = time.time() - start_time
kill(proc.pid)
print("Duration : ", duration)
| UTF-8 | Python | false | false | 12,623 | py | 25 | distributed_train.py | 19 | 0.638438 | 0.632734 | 0 | 275 | 44.901818 | 208 |
krishnavpai/open_source_start | 1,090,921,701,652 | f133f6477d7d5f7861a22c7e07b8fcc38e339f8a | c68f04d903c4e926b7b9a38096ed09dc257366b1 | /Python/reverse_linked_list.py | a4dc60dd1c253ab2f44f6d93f9b79c2fa66ad645 | []
| no_license | https://github.com/krishnavpai/open_source_start | bbbb7aa0f6d7ddfc811a1b93807626aa2fd2c39e | 81af597111abb98ff24ae46607361248f6c8723c | refs/heads/master | 2023-08-20T23:21:34.204120 | 2021-10-01T20:36:29 | 2021-10-01T20:36:29 | 412,608,512 | 1 | 0 | null | true | 2021-10-01T20:27:57 | 2021-10-01T20:27:56 | 2021-10-01T17:28:41 | 2021-10-01T19:28:03 | 5,693 | 0 | 0 | 0 | null | false | false | from typing import Tuple
class Node:
def __init__(self, data: int, next_=None) -> None:
self.data = data
self.next_ = next_
class LinkedList:
# Minimal representation of linked list
def __init__(self) -> None:
self.head = None
def insert(self, data: int) -> None:
'''
Inserts nodes linked list
'''
# if list is empty, make head
# point to newly created node
if self.head == None:
self.head = Node(data=data)
# if list is non-empty,
# insert element at head.
else:
self.head = Node(data=data, next_=self.head)
def reverse_iterative(self) -> None:
'''
Iteratively reverse linked list
'''
# tracks previous node
previous = None
# tracks current node
current = self.head
# tracks next-to current node
upcomming = current.next_
# make current node to point to
# previous node, then move all
# the pointers one node ahead.
while current != None:
current.next_ = previous
previous = current
current = upcomming
upcomming = upcomming.next_ if upcomming != None else None
# at the end of above loop
# previous pointer is pointing
# to last node in linked list
# which is then made the head
# of reversed linked list
# via this assignment
self.head = previous
def reverse_recurrsive(self, current: Node, previous: Node) -> None:
'''
Recurrsively reverse linked list
'''
# if current is none this indicates
# whole list has been traversed.
# so we make the previous node(which
# will we pointing to the last node)
# the head of the reversed linked list
if current == None:
self.head = previous
return
# if current is not none therefore
# there are nodes remaining to be processed
# temp stores the next node to process
temp = current.next_
# make current node point to previous node
current.next_ = previous
# recurrsively repeat procedure
self.reverse_recurrsive(temp, current)
def traverse(self) -> None:
'''
Traverse linked list
'''
temp = self.head
while temp != None:
print(f'{temp.data} ->', end=' ')
temp = temp.next_
print('Null')
def generate_test_case(min_num: int, max_num: int, length: int) -> Tuple[int]:
'''
Generate test cases
'''
from random import randint
test_case = [randint(min_num, max_num) for _ in range(length)]
print('Test case is')
print(test_case)
print('\n')
return test_case
if __name__ == '__main__':
test = generate_test_case(0, 30, 8)
ll = LinkedList()
for item in test:
ll.insert(item)
ll.traverse()
ll.reverse_iterative()
ll.traverse()
ll.reverse_recurrsive(ll.head, None)
ll.traverse()
| UTF-8 | Python | false | false | 3,108 | py | 45 | reverse_linked_list.py | 39 | 0.560167 | 0.55888 | 0 | 109 | 27.513761 | 78 |
ellmetha/morganaubert-resume | 549,755,821,930 | 016fe53482182cdae599985df9935abb71dd2087 | 7a897f5e1b857b4342340583966126fdd0b7ec73 | /main/__init__.py | 80adbd9d8f22bd030f29015b76d2559e2010fd82 | [
"MIT"
]
| permissive | https://github.com/ellmetha/morganaubert-resume | 4690a678b5f12bf3ad3ce21b036a738b65018c19 | 7987ba5f1b0667d75b94ecbcf9ae5bbc0c4c9627 | refs/heads/main | 2023-07-19T15:22:58.873621 | 2023-07-15T13:01:44 | 2023-07-15T13:01:44 | 23,937,127 | 2 | 1 | MIT | false | 2023-07-19T02:58:11 | 2014-09-11T21:53:52 | 2022-01-01T14:12:45 | 2023-07-19T02:58:10 | 10,850 | 0 | 0 | 1 | HTML | false | false | from flask import Flask, redirect, request
from config import config
from . import extensions, modules
def create_app(config_name):
config_obj = config[config_name]()
app = Flask(__name__, static_url_path='/static')
# Initializes configuration values.
app.config.from_object(config_obj)
# Configure SSL if the current platform supports it.
if not app.debug and not app.testing and not app.config.get('SSL_DISABLE'):
from flask_sslify import SSLify
SSLify(app)
@app.before_request
def redirect_www():
""" Redirects www requests to non-www. """
if request.host.startswith('www.'):
new_host = request.host[4:]
return redirect(f"{request.scheme}://{new_host}/", code=301)
# Initializes Flask extensions.
extensions.init_app(app)
# Initializes modules.
modules.init_app(app)
return app
| UTF-8 | Python | false | false | 899 | py | 38 | __init__.py | 19 | 0.656285 | 0.651835 | 0 | 33 | 26.242424 | 79 |
gabriellaec/desoft-analise-exercicios | 2,946,347,588,954 | 0af77932304e0a999dda1243059e530fb868b603 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_252/ch146_2020_04_13_02_16_13_512281.py | 8b50d7e598a73adf37af44c088813c6796457fc2 | []
| no_license | https://github.com/gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def conta_ocorrencias(lista):
d={}
for i in range(len(lista)):
palavra=lista[i]
if palavra in d:
d[palavra]+=1
else:
d[palavra]=1
return d
| UTF-8 | Python | false | false | 207 | py | 35,359 | ch146_2020_04_13_02_16_13_512281.py | 35,352 | 0.47343 | 0.463768 | 0 | 9 | 21.111111 | 31 |
silvadirceu/experiments | 13,752,485,311,889 | 9ba83ec644ff7c0269fed0c77792ba6bc139aa36 | 036f11eaae82a9c7838580d141375ab3c03f739a | /psychouacoustic-critic/perceptual_critic.py | 8a7f780418d28153921407b0c9ec6e9016600380 | []
| no_license | https://github.com/silvadirceu/experiments | 8b6f1739a51803f73da89c137d07871505ddf712 | 2390392726a43aa5587e02d8ee2a423cf281463c | refs/heads/master | 2022-02-19T11:18:52.485742 | 2019-09-26T14:43:51 | 2019-09-26T14:43:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import zounds
from zounds.learn import GanExperiment
from zounds.learn import Conv1d, ConvTranspose1d, DctTransform, sample_norm
import torch
from torch import nn
from torch.nn import functional as F
latent_dim = 100
sample_size = 8192
def sample_norm(x):
# norms = torch.norm(x, dim=1, keepdim=True)
# return x / norms
return x
# class Generator(nn.Module):
# def __init__(self):
# super(Generator, self).__init__()
# self.activation = lambda x: F.leaky_relu(x, 0.2)
# self.main = nn.Sequential(
# ConvTranspose1d(
# latent_dim, 512, 4, 1, 0,
# sample_norm=True, dropout=False, activation=self.activation),
# ConvTranspose1d(
# 512, 512, 8, 4, 2,
# sample_norm=True, dropout=False, activation=self.activation),
# ConvTranspose1d(
# 512, 512, 8, 4, 2,
# sample_norm=True, dropout=False, activation=self.activation),
# ConvTranspose1d(
# 512, 512, 8, 4, 2,
# sample_norm=True, dropout=False, activation=self.activation),
# ConvTranspose1d(
# 512, 512, 8, 4, 2,
# sample_norm=True, dropout=False, activation=self.activation))
#
# self.to_samples = ConvTranspose1d(
# 512, 1, 16, 8, 4,
# sample_norm=False,
# batch_norm=False,
# dropout=False,
# activation=None)
#
# def forward(self, x):
# x = x.view(-1, latent_dim, 1)
#
# for m in self.main:
# nx = m(x)
# factor = nx.shape[1] // x.shape[1]
# if nx.shape[-1] == x.shape[-1] and factor:
# upsampled = F.upsample(x, scale_factor=factor, mode='linear')
# x = self.activation(x + upsampled)
# else:
# x = nx
#
# x = self.to_samples(x)
# return x.view(-1, sample_size)
class LayerWithAttention(nn.Module):
def __init__(
self,
layer_type,
in_channels,
out_channels,
kernel,
stride=1,
padding=0,
dilation=1,
attention_func=F.sigmoid):
super(LayerWithAttention, self).__init__()
self.conv = layer_type(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel,
stride=stride,
padding=padding,
dilation=dilation,
bias=False)
self.gate = layer_type(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel,
stride=stride,
padding=padding,
dilation=dilation,
bias=False)
self.attention_func = attention_func
def forward(self, x):
c = self.conv(x)
c = sample_norm(c)
g = self.gate(x)
g = sample_norm(g)
out = F.tanh(c) * self.attention_func(g)
return out
class ConvAttentionLayer(LayerWithAttention):
def __init__(
self,
in_channels,
out_channels,
kernel,
stride=1,
padding=0,
dilation=1,
attention_func=F.sigmoid):
super(ConvAttentionLayer, self).__init__(
nn.Conv1d,
in_channels,
out_channels,
kernel,
stride,
padding,
dilation,
attention_func)
class ConvTransposeAttentionLayer(LayerWithAttention):
def __init__(
self,
in_channels,
out_channels,
kernel,
stride=1,
padding=0,
dilation=1,
attention_func=F.sigmoid):
super(ConvTransposeAttentionLayer, self).__init__(
nn.ConvTranspose1d,
in_channels,
out_channels,
kernel,
stride,
padding,
dilation,
attention_func)
class GeneratorWithAttention(nn.Module):
def __init__(self):
super(GeneratorWithAttention, self).__init__()
self.attn_func = F.sigmoid
self.layers = [
ConvTransposeAttentionLayer(
latent_dim, 512, 4, 2, 0, attention_func=self.attn_func),
ConvTransposeAttentionLayer(
512, 512, 8, 4, 2, attention_func=self.attn_func),
ConvTransposeAttentionLayer(
512, 512, 8, 4, 2, attention_func=self.attn_func),
ConvTransposeAttentionLayer(
512, 512, 8, 4, 2, attention_func=self.attn_func),
ConvTransposeAttentionLayer(
512, 256, 8, 4, 2, attention_func=self.attn_func),
]
self.main = nn.Sequential(*self.layers)
# TODO: Is overlap to blame for the noise?
# TODO: Try a hanning window at the "synthesis" step
# TODO: Do more weights at the final step make the problem worse,
# or better?
# TODO: Consider freezing the final layer to be morlet wavelets, or
# something
self.final = nn.ConvTranspose1d(256, 1, 16, 8, 4, bias=False)
self.gate = nn.ConvTranspose1d(256, 1, 16, 8, 4, bias=False)
# n_filters = 16
# self.dilated = nn.Sequential(
# nn.Conv1d(1, n_filters, 2, 1, dilation=1, padding=1, bias=False),
# nn.Conv1d(n_filters, n_filters, 2, 1, dilation=2, padding=1,
# bias=False),
# nn.Conv1d(n_filters, n_filters, 2, 1, dilation=4, padding=2,
# bias=False),
# nn.Conv1d(n_filters, n_filters, 2, 1, dilation=8, padding=4,
# bias=False),
# nn.Conv1d(n_filters, n_filters, 2, 1, dilation=16, padding=8,
# bias=False),
# nn.Conv1d(n_filters, n_filters, 2, 1, dilation=32, padding=16,
# bias=False),
#
# )
#
# self.final_final = nn.Conv1d(
# n_filters, 1, 2, 1, dilation=64, padding=32, bias=False)
def forward(self, x):
x = x.view(-1, latent_dim, 1)
x = self.main(x)
# TODO: at this point, activations in x should be fairly sparse, and
# should ideally have weights on a logarithmic scale
# TODO: Does a gate at this phase make sense? Shouldn't we gate the
# value passed to self.final
c = self.final(x)
g = self.gate(x)
x = F.sigmoid(g) * c
# for d in self.dilated:
# x = d(x)
# x = sample_norm(x)
# x = F.tanh(x)
#
# x = self.final_final(x)
# return x[..., :sample_size].contiguous()
return torch.clamp(x, -1, 1)
class Critic(nn.Module):
def __init__(self):
super(Critic, self).__init__()
self.last_dim = 512
self.activation = F.elu
self.dct_transform = DctTransform(use_cuda=True)
self.audio = nn.Sequential(
nn.Conv1d(1, 512, 16, 8, 4, bias=False),
nn.Conv1d(512, 512, 8, 4, 2, bias=False),
nn.Conv1d(512, 512, 8, 4, 2, bias=False),
nn.Conv1d(512, 512, 4, 2, 2, bias=False)
)
# TODO: consider first convolving over the dct channels
self.spectral = Conv1d(
512, 512, 1, 1, 1,
batch_norm=False, dropout=False, activation=self.activation)
self.main = nn.Sequential(
Conv1d(
1024, 512, 4, 2, 2,
batch_norm=False, dropout=False, activation=self.activation),
Conv1d(
512, 512, 4, 2, 2,
batch_norm=False, dropout=False, activation=self.activation),
Conv1d(
512, 512, 4, 2, 2,
batch_norm=False, dropout=False, activation=self.activation),
Conv1d(
512, 512, 4, 2, 2,
batch_norm=False, dropout=False, activation=self.activation),
Conv1d(
512, self.last_dim, 3, 1, 0,
batch_norm=False, dropout=False, activation=self.activation)
)
self.linear = nn.Linear(self.last_dim, 1, bias=False)
def forward(self, x):
x = x.view(-1, 1, sample_size)
# compute features given raw audio
audio = x
for m in self.audio:
audio = m(audio)
audio = self.activation(audio)
# TODO: try log-spaced frequencies
# TODO: try applying log weighting to the dct coefficients
# do an explicit frequency short-time fourier transform-type operation
x = self.dct_transform.short_time_dct(
x, 512, 256, zounds.HanningWindowingFunc())
maxes, indices = torch.max(torch.abs(x), dim=1, keepdim=True)
spectral = x / maxes
spectral = self.spectral(spectral)
spectral = self.activation(spectral)
x = torch.cat([audio, spectral], dim=1)
for m in self.main:
x = m(x)
x = self.activation(x)
x = x.view(-1, self.last_dim)
x = self.linear(x)
return x
class GanPair(nn.Module):
def __init__(self):
super(GanPair, self).__init__()
self.generator = GeneratorWithAttention()
self.discriminator = Critic()
def forward(self, x):
raise NotImplementedError()
if __name__ == '__main__':
def real_sample_transformer(windowed):
return windowed[zounds.Seconds(10):-zounds.Seconds(15)]
experiment = GanExperiment(
'perceptual',
zounds.InternetArchive('AOC11B'),
GanPair(),
real_sample_transformer=real_sample_transformer,
latent_dim=latent_dim,
sample_size=sample_size,
n_critic_iterations=10,
n_samples=int(4e5))
experiment.run()
| UTF-8 | Python | false | false | 9,911 | py | 77 | perceptual_critic.py | 58 | 0.527898 | 0.496015 | 0 | 305 | 31.495082 | 79 |
lwijers/spygame | 11,390,253,276,200 | 44bef6133ecf0b1f829f8c41a3911fe4af43846e | fb8555be68272d90946e56a50c2eff973a9f0b0e | /_items.py | f43f4cfcf38dd5874cbfbea9c585ebbc6f963a7e | []
| no_license | https://github.com/lwijers/spygame | 95b34aa7a1490faff340760aab044650b2e3f250 | b85a3d9ad4f0320cc2c0bfee4983edce0815b120 | refs/heads/master | 2020-03-14T07:39:29.809164 | 2018-04-29T16:07:49 | 2018-04-29T16:07:49 | 131,508,949 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
from main._const import *
class Ladder():
def __init__(self, grid, start_rect):
self.possible_interactions = {
'move_here' : { "req" : None}
}
self.stats = {}
self.grid = grid
self.height = SM_CELL_SIZE * 6
self.width = SM_CELL_SIZE
self.rect = pygame.Rect(0,0, self.width, self.height)
self.top_tile = start_rect
self.rect.topleft = self.top_tile.topleft
self.connected_tiles = []
for tile in self.grid.tiles:
if tile.rect.colliderect(self.rect):
self.connected_tiles.append(tile)
tile.is_accessible = True
self.connected_tiles = sorted(self.connected_tiles)
self.is_selectable_by_box = False
# for tile in self.connected_tiles:
# print tile.id
while len(self.connected_tiles) > 1:
self.grid.add_connection(self.connected_tiles[0], self.connected_tiles[1])
del self.connected_tiles[0]
def process_input(self, events):
pass
def update(self):
pass
def on_mouse_hover(self, mouse_pos):
pass
def on_click(self, mouse_pos):
pass
def on_r_click(self):
pass
def switch_selected(self):
pass
def set_selected(self, selected):
pass
def draw(self, screen):
# pass
pygame.draw.rect(screen, BROWN, self.rect)
| UTF-8 | Python | false | false | 1,441 | py | 25 | _items.py | 24 | 0.569049 | 0.564192 | 0 | 53 | 26.188679 | 86 |
JoseRivas1998/Snake-pygame | 34,359,785,324 | cf31543f396e333def3805904c18b9596b9b5164 | e1a6082207c7b16637479ffea69ad7074ec36f69 | /snake.py | 2447f6a03497496b55f91eb9886888a97dc8f54d | []
| no_license | https://github.com/JoseRivas1998/Snake-pygame | d6d8ac247016b91ad2accfbe1dcbd811a283c8e3 | e22dee003c0c1f52aa8b0a9cfe4658431c1599e0 | refs/heads/master | 2020-07-01T10:44:35.903962 | 2019-08-08T05:20:28 | 2019-08-08T05:20:28 | 201,151,104 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import Tuple
from pygame.font import Font
from pygame.surface import Surface
class Snake:
score: int = 0
@staticmethod
def draw__text(text: str, center: Tuple[float, float], color: Tuple[int, int, int], font: Font,
surface: Surface):
text_surface = font.render(text, False, color)
text_rect = text_surface.get_rect()
text_rect.center = center
surface.blit(text_surface, text_rect)
| UTF-8 | Python | false | false | 464 | py | 17 | snake.py | 16 | 0.640086 | 0.637931 | 0 | 16 | 28 | 99 |
Mesteriis/atavism | 601,295,455,639 | 33862fe40d8cfcc2cdafd32433c08434c4be1767 | 36847f055b9e6cc852b2ffd4081f61563f9456d0 | /atavism/http11/objects.py | db03e5f63b35eebececbec83f50695f16559eef4 | [
"Unlicense"
]
| permissive | https://github.com/Mesteriis/atavism | 427611f2cde4068cb73508dd1cfd30b1ee797906 | 4db376589fdd712dc7f023a54296d4f4965fd80a | refs/heads/master | 2021-05-30T12:36:43.138763 | 2015-06-02T12:53:39 | 2015-06-02T12:53:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ HTTP relies on Requests and Responses. The classes contained in this module are
able to be created in either a client or server environment. They are named for
their content and intent.
"""
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from atavism.http11.base import BaseHttp
class HttpRequest(BaseHttp):
""" An HTTP Request.
"""
def __init__(self, inp=None, method=None, path=None):
BaseHttp.__init__(self)
if inp is not None:
self.read_content(inp)
self.method = method or 'GET'
if path is not None:
self.path = path if not isinstance(path, bytes) else path.decode()
def read_content(self, data):
if data is None or len(data) == 0:
return 0
hdr = self.header.finished
rv = BaseHttp.read_content(self, data)
if hdr is False and self.header.finished:
self.method, self.path, self.http = self.header.status_line.split(' ')
return rv
def reset(self):
cnt = self._content
while cnt._next:
cnt = cnt._next
cnt.send_position = 0
self.headers_sent = False
def complete(self):
if len(self.ranges) > 0:
self.add_header('Range', 'bytes={}'.format(','.join([r.header() for r in self.ranges])))
self.header.status_line = '{} {} {}'.format(self.method.upper(), self.path, self.http)
# if len(self._content) > 512:
# self._content.content_sz = 'chunked'
self._complete()
def make_response(self):
resp = HttpResponse()
resp.add_header('Connection', 'keep-alive' if self.is_keepalive else 'close')
resp.ranges = self.ranges
if self.method.upper() == b'HEAD':
resp.headers_only = True
ce = self.get('accept-encoding')
if ce is not None and 'gzip' in ce:
# todo - handle this properly
resp.set_compression('gzip')
return resp
class HttpResponse(BaseHttp):
""" Class that represents a response from an HTTP server.
"""
#todo - expand list
STATUS_MSG = {
200: 'OK',
206: 'Partial Content',
301: 'Moved permanently',
401: 'Unathorised',
402: 'Payment required',
403: 'Forbidden',
404: 'Not found',
405: 'Method not allowed',
416: 'Requested range not satisfiable'
}
def __init__(self, inp=None, code=None):
BaseHttp.__init__(self)
self.msg = 'OK'
self.code = code or 200
if inp is not None:
self.read_content(inp)
def read_content(self, data):
if data is None or len(data) == 0:
return 0
hdr = self.header.finished
rv = BaseHttp.read_content(self, data)
if hdr is False and self.header.finished:
self.http, self.code, self.msg = self.header.status_line.split(' ', 2)
self.code = int(self.code)
return rv
def status_msg(self):
return self.STATUS_MSG.get(self.code, "Unknown status! {}".format(self.code))
def set_code(self, code):
""" Set the ode to use for the response and set
:param code: The numeric error code to respond with.
:return: None.
"""
self.code = code
if code >= 400:
self.ranges = []
elif code == 206 and len(self.ranges) == 0:
self.code = 200
def mark_complete(self):
self._content.finished = True
self._content.decompress()
def complete(self):
if len(self.ranges) > 0:
self.check_ranges()
if self.code == 200:
self.code = 206
hdrs = self._content.create_ranged_output(self.ranges)
self.header.add_headers(hdrs)
self.header.status_line = 'HTTP/1.1 {} {}'.format(self.code, self.status_msg())
self._complete()
def check_ranges(self):
for r in self.ranges:
st, end = r.absolutes(len(self))
if 0 < st >= len(self) or st > end >= len(self):
self.set_code(416)
break
| UTF-8 | Python | false | false | 4,169 | py | 19 | objects.py | 18 | 0.566563 | 0.550971 | 0 | 131 | 30.824427 | 100 |
Ananyapam7/CS-2201 | 7,679,401,536,027 | 663f82b8315cc345f81380191deb124d2c5b3b77 | 075c5d30aca7627025d529d2ab2bdbcb77d6cbab | /Worksheet 09/Q3.py | 14db0c35e7bd63b3a39d8de4785741d0ddd7cdef | []
| no_license | https://github.com/Ananyapam7/CS-2201 | a10f20c65a13042919a04dd156f741ed348adb91 | 12615b687afeba20f425afcd0b0f2b3ca6548746 | refs/heads/master | 2022-12-04T12:11:01.435419 | 2020-08-23T11:10:58 | 2020-08-23T11:10:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
#a1=float(raw_input("Enter lower limit of integration :"))
#a2=float(raw_input("Enter upper limit of integration :"))
guass=np.loadtxt('gauss32.dat')
W=guass[:,1]
X=guass[:,2]
def I(a,b,f):
Z=(((b-a)*0.5*X)+(b+a)*0.5)
return np.dot(f(Z),W)*(b-a)*0.5
f=lambda x: (np.sin(x))**2
g=lambda x: x*np.exp(-x*x)
F=0.5-(np.sin(2.0))*0.25
G=(1-np.exp(-4))*0.5
print "Integral of (sinx)^2 from 0 to 1 is :", I(0,1,f) #Can substitute a1,a2 in the place of 0,1
print "Error =",np.abs(I(0,1,f)-F)
print "Integral of xe^(-x^2) from 0 to 2 is :",I(0,2,g)
print "Error =",np.abs(I(0,2,g)-G)
| UTF-8 | Python | false | false | 603 | py | 18 | Q3.py | 15 | 0.606965 | 0.537313 | 0 | 21 | 27.619048 | 98 |
rightfitdegree/overfitting | 14,431,090,137,610 | 492919badb4056befc22a16bfa75080c17e9f6d6 | 65b6ae531284a0f75b97eb784b8aa67f0be87450 | /overfitting/VectorAleatorio.py | 2c3cabdfb4f9306aa02b4446768650518f72948f | []
| no_license | https://github.com/rightfitdegree/overfitting | 3a7ff73a9510fec1626606be68f66746dd4e2295 | c9b33b00498fc9eee70ea90173ccb13cad654038 | refs/heads/master | 2020-07-12T05:50:49.673047 | 2019-09-09T14:40:41 | 2019-09-09T14:40:41 | 204,735,483 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
random.seed()
import pandas as pd
import numpy as np
#from scipy.optimize import minimize_scalar
class RandPol(pd.DataFrame):
"""Dataframe para generar polinomios aleatorios
en intervalo -1 ≤ X ≤ 1; -1 ≤ Y ≤ 1
"""
#Headers para columnas del DataFrame
Nº = 'Nº'
X = 'X'
Y = 'Y'
POL = 'Pol'
ERR = 'Err'
POL_ERR = 'Pol+Err'
POL_ERR_NORM ='Norm(Pol+Err)'
POL_NORM = 'Norm(Pol)'
#Constantes para definir los intervalos
MIN_X = -1.0
MAX_X = 1.0
MIN_Y = -1.0
MAX_Y = 1.0
#Cantidad de puntos aleatorios para generar un polinomio por regresión de
#los puntos
CANTIDAD_PUNTOS = 10
TITULOS_COLUMNAS = [Nº, X, Y]
GradoDelPolinomio = None
Polinomio = None
PolinomioNormalizado = None
#Error = None
#Polinomio_Error = None
#generación de errores
@property
def _constructor(self):
return RandPol
def __init__(self, *args, **kwargs):
#print("Iniciando " + self.__class__.__name__)
super().__init__(*args, **kwargs)
#Creación de puntos aleatorios
self[RandPol.X] = self.__RandomPointsX()
self[RandPol.Y] = self.__RandomPoints()
self.GradoDelPolinomio = self.__GradoPolinomialAleatorio()
#print("Grado del polinomio: " , self.GradoDelPolinomio)
#Obtención de polinomio ajustado a los puntos
self.Polinomio = self.GenerarPolinomio()
#print("Polinomio: \n", self.Polinomio)
#Evaluación del polinomio
self[RandPol.POL] = self.Polinomio(self[RandPol.X])
#Generación de errores
self[RandPol.ERR] = self.__RandomPoints()
self[RandPol.POL_ERR] = self[RandPol.POL] + self[RandPol.ERR]
#normalización
#Obtención de máximos
extremo = max(self[RandPol.POL_ERR], key=abs)
self[RandPol.POL_ERR_NORM] = self[RandPol.POL_ERR] / extremo
self.PolinomioNormalizado = self.Polinomio / extremo
#print("Polinomio normalizado: \n", self.PolinomioNormalizado)
#print('Dataframe: \n',self)
#myplot = self.plot(x =RandPol.X, y =RandPol.Y, kind ='scatter')
print("Terminado RandPol")
def XY(self):
"""Returns 'X' column joined to 'Y' column
"""
return np.array( self[RandPol.X].values.tolist() + self[RandPol.Y].values.tolist() )
def __GradoPolinomialAleatorio(self):
return random.randint(0,9)
def __RandomPoints(self, cantidad=CANTIDAD_PUNTOS, min=MIN_X, max=MAX_X) -> []:
if cantidad < 2 :
raise Exception('La cantidad de puntos debe ser mayor a 2')
respuesta = np.random.uniform(low=min, high=max, size=cantidad)
#print('Random uniform:', respuesta)
return respuesta
def __RandomPointsX(self, cantidad=CANTIDAD_PUNTOS, min=MIN_X, max=MAX_X) -> []:
if cantidad < 2 :
raise Exception('La cantidad de puntos debe ser mayor a 2')
respuesta = self.__RandomPoints(cantidad, min, max)
respuesta[0] = min
respuesta[-1] = max
#print(respuesta)
return respuesta
def GenerarPolinomio(self):
x = np.array(self[RandPol.X])
y = np.array(self[RandPol.Y])
z = np.polyfit(x, y, self.GradoDelPolinomio)
return np.poly1d(z) | UTF-8 | Python | false | false | 3,378 | py | 2 | VectorAleatorio.py | 2 | 0.603931 | 0.597082 | 0 | 123 | 26.308943 | 92 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.