repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
deng0515001/jsonml
| 4,183,298,180,805 |
743af2b2b155b26f4749802eb4f9cebfc51c4e2c
|
6f9ed9292208c78299e18f04ef9ec75fd72bc2ba
|
/jsonml/start.py
|
618f01f7199b03730ff3ad8623ff6f0f96e17126
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/deng0515001/jsonml
|
0496bac1440e9e4f49cc93e2252f4a32d9e01c67
|
a28c3a4b102c37243b63d39ff4a5de1e8a414c21
|
refs/heads/master
| 2022-12-16T16:31:30.611759 | 2020-09-21T09:01:03 | 2020-09-21T09:01:03 | 297,275,557 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from jsonml import source
from pandas import DataFrame
import json
from jsonml import dataprocess
from jsonml.dataprocess import MDataFrame
import numpy as np
import importlib
from jsonml import datautil
from jsonml import datesutil
import copy
import time
import jsonml.model as mmodel
from jsonml.model import ModelProcess
import pandas as pd
from sklearn.model_selection import train_test_split
import gc
import logging
import re
import os
logger = logging.getLogger('jsonml')
group_udf_mapping = {
'Max': 'max',
'Min': 'min',
'Mean': 'mean',
'Sum': 'sum',
'Count': 'size',
"Std": "std",
"Var": "var",
"Sem": "sem",
"FirstValid": "first",
"LatestValid": "last",
"NthValid": "nth"
}
def load_udf(name, paramlist):
if "." in name:
parts = name.split(".")
mod_name = ".".join([parts[i] for i in range(0, len(parts) - 1)])
cls_name = parts[-1]
else:
mod_name = 'jsonml.udf'
cls_name = name
mod = importlib.import_module(mod_name)
cls = getattr(mod, cls_name)
udf = cls(*paramlist)
return udf
class Pipeline:
def __init__(self, config_path=None, **xxkwargs):
root_path = os.path.dirname(os.path.abspath(__file__))
self.udf_param_dict = source.read_json(os.path.join(root_path, 'udf_param.json'))
logging.debug(self.udf_param_dict)
self.config = source.read_json(config_path)
level = logging.DEBUG if 'debug' in self.config and self.config['debug'] else logging.INFO
logger.setLevel(level)
self.config = self.check_config(self.config, **xxkwargs)
self.shuffles = self.parse_process_data_stage()
logging.debug(self.shuffles)
self.shuffle_data = []
self.current_batch_index = 0
self.is_predict = True if 'model' in self.config and "run_mode" in self.config['model'] and \
self.config['model']['run_mode'] == 'predict' else False
def batch_process(self, df):
'''
流式处理一批数据,
当前为无序数组时,会执行第一个shuffle之前的操作
当前为有序数据时,会处理完所有数据
当前为预测模式时,会处理完所有数据,并且进行模型处理和输出
:param df: 输入df
:return: None
'''
self.current_batch_index = self.current_batch_index + 1
if len(self.shuffles) > 0:
for stage in self.shuffles[0]:
df = self.process_stage(df, stage)
self.shuffle_data.append(df)
if self.is_predict:
df = self.shuffle_process()
self.process_model(df)
def shuffle_process(self):
'''
流式处理一批数据,
当前为无序数组时,会执行第一个shuffle之后的操作
当前为有序数据时,会进行所有的数据合并处理
当前为预测模式时,不进行任何处理
:return: 处理后的数据
'''
gc.collect()
if len(self.shuffle_data) == 0:
return None
df = self.shuffle_data[0] if len(self.shuffle_data) == 0 else pd.concat(self.shuffle_data, axis=0, ignore_index=True, sort=False)
self.shuffle_data = []
for i in range(1, len(self.shuffles)):
for stage in self.shuffles[i]:
df = self.process_stage(df, stage)
return df
def check_config(self, config, **xxkwargs):
'''
配置替换
将原来所有的文件写法的配置,还原成全量配置;去掉注释
:param config: 老的配置
:param xxkwargs: 需要替换的变量
:return: 替换后的配置
'''
new_config = {}
for key, value in config.items():
if "notes" == key:
continue
if key.endswith("_file") and isinstance(value, str):
if "[" in value:
parts = value.split("[")
key_config = source.read_json(parts[0])
for index in range(1, len(parts)):
part = parts[index][:-1]
sub_key = part if datautil.str2int(part, -1) == -1 else datautil.str2int(part, -1)
key_config = key_config[sub_key]
else:
key_config = source.read_json(value)
new_config[key[:-5]] = key_config
elif isinstance(value, str) and '$' in value:
params = re.findall(r'\${.*}', value)
for param in params:
param = param[2:-1]
if param in xxkwargs:
new_config[key] = re.sub('\${.*}', xxkwargs[param], value)
elif isinstance(value, dict):
new_config[key] = self.check_config(value, **xxkwargs)
elif isinstance(value, list) and len(value) > 0 and isinstance(value[0], dict):
new_config[key] = [self.check_config(item, **xxkwargs) for item in value]
else:
new_config[key] = value
return new_config
def parse_params(self, strategy, name):
'''
解析一个udf里面所有的param参数
:param strategy: udf策略字段
:param name: udf名称
:return: param参数list
'''
params = []
for key, value in strategy.items():
key = key if name not in self.udf_param_dict or key not in self.udf_param_dict[name] else self.udf_param_dict[name][key]
if "param" in key:
params.append((int(key[5:]), value))
params.sort()
return [value for (key, value) in params]
def read_data(self):
'''
数据读入
:return: df
'''
csource = self.config['source']
type = "text" if "type" not in csource else csource['type']
cinput = csource['input']
if type == "text":
columns = None if "columns" not in csource else csource['columns']
data_type = 'str' if "data_type" not in csource else csource['data_type']
select_columns = [] if "select_columns" not in csource else csource["select_columns"]
drop_columns = [] if "drop_columns" not in csource else csource["drop_columns"]
key_columns = [] if "key_columns" not in csource else csource["key_columns"]
keep_key_columns = True if "keep_key_columns" not in csource else csource["keep_key_columns"]
filter = '' if "filter" not in csource else csource["filter"]
column_info = []
if columns is not None:
for index, column in enumerate(columns):
parts = column.strip().split(":") if len(column.strip()) > 0 else ["value-" + str(index + 1)]
if len(parts) == 1:
column_info.append([parts[0], 'str', ''])
elif len(parts) == 2:
column_info.append([parts[0], parts[1], ''])
elif len(parts) == 3:
column_info.append([parts[0], parts[1], parts[2]])
elif len(parts) > 3:
column_info.append([parts[0], parts[1], ':'.join(parts[2:])])
else:
raise Exception(column)
path = cinput["path"]
is_stream = False if "is_stream" not in cinput else cinput['is_stream']
if not is_stream:
args = ['field_delimiter', 'ignore_first_line', 'ignore_error_line', 'ignore_blank_line']
kwargs = {key: cinput[key] for key in args if key in cinput}
df = source.csv(path, columns=column_info, **kwargs)
if len(select_columns) > 0:
for column in drop_columns:
if column in select_columns:
select_columns.remove(column)
df = df[[select for select in select_columns]]
elif len(drop_columns) > 0:
df.drop(drop_columns, axis=1, inplace=True)
if len(key_columns) > 0:
if not keep_key_columns:
names = {key: "keys_" + key for key in key_columns}
df.rename(columns=names, inplace=True)
else:
for key in key_columns:
df["keys_" + key] = df[key]
if isinstance(filter, str) and filter != '':
df.query(filter, inplace=True)
df.reset_index(drop=True, inplace=True)
elif isinstance(filter, dict):
name = filter["name"]
filter_columns = filter["input_columns"]
udf = load_udf(name, self.parse_params(filter, name))
mask = df.apply(lambda row: udf.process(*tuple([row[filter_column] for filter_column in filter_columns])),axis=1)
df = df[mask]
df.reset_index(drop=True, inplace=True)
if data_type in ['int', 'float']:
df = df.apply(pd.to_numeric)
return df
else:
def callback(df):
if len(select_columns) > 0:
for column in drop_columns:
if column in select_columns:
select_columns.remove(column)
df = df[[select for select in select_columns]]
if len(drop_columns) > 0:
df.drop(drop_columns, axis=1, inplace=True)
if len(key_columns) > 0:
if not keep_key_columns:
names = {key: "keys_" + key for key in key_columns}
df.rename(columns=names, inplace=True)
else:
for key in key_columns:
df["keys_" + key] = df[key]
if isinstance(filter, str) and filter != '':
df.query(filter, inplace=True)
df.reset_index(drop=True, inplace=True)
elif isinstance(filter, dict):
name = filter["name"]
filter_columns = filter["input_columns"]
udf = load_udf(name, self.parse_params(filter, name))
mask = df.apply(lambda row: udf.process(*tuple([row[filter_column] for filter_column in filter_columns])), axis=1)
df = df[mask]
df.reset_index(drop=True, inplace=True)
if data_type in ['int', 'float']:
df = df.apply(pd.to_numeric)
self.batch_process(df)
args = ['field_delimiter', 'ignore_first_line', 'ignore_error_line', 'ignore_blank_line', 'batch_count', 'batch_key']
kwargs = {key: cinput[key] for key in args if key in cinput}
if path == "stdin":
source.stdin_stream(columns=column_info, callback=callback, **kwargs)
else:
source.csv_stream(path, columns=column_info, callback=callback, **kwargs)
elif type == "es":
logger.error("do not support now")
else:
logger.error("do not support now")
def process_data(self, df):
'''
非流式数据处理
:param df: 输入数据
:return: 处理后的数据
'''
if 'process' not in self.config:
return df
config = self.config['process']
stages = [(stage_id, stage) for stage_id, stage in config.items()]
stages.sort(key=lambda elem: int(elem[0].split("_")[1]))
for stage_id, stage in stages:
df = self.process_stage(df, stage)
return df
def process_stage(self, df, stage):
'''
一个stage的数据处理,流式和非流式均如此
:param df: 输入数据
:param stage: stage详细配置
:return: 执行后的数据
'''
start = time.time()
stage_type = 'map' if 'type' not in stage else stage['type']
strategies = stage['strategies']
if stage_type == 'group':
group_keys = stage['group_key_columns']
sort_keys = [] if 'sort_key_columns' not in stage else stage['sort_key_columns']
keep_group_keys = True if 'keep_group_keys' not in stage else stage['keep_group_keys']
df = self.group_stage(df, strategies, group_keys, sort_keys, keep_group_keys)
logger.info('********* end group stage: group keys = ' + str(group_keys) +
' cost = ' + str(time.time() - start) + ' **********')
elif stage_type == 'map':
mdf = MDataFrame(df)
for strategy in strategies:
self.process_strategy(mdf, strategy)
df = mdf.datas()
logger.info('********* end map stage: cost = ' + str(time.time() - start) + ' **********')
logger.debug(df)
logger.debug(df.columns.values.tolist())
return df
def parse_process_data_stage(self):
'''
把stages分成若干shuffle,用于流式非有序数据执行
:return: 分开后的shuffle list配置
'''
shuffles = []
if 'process' not in self.config:
return shuffles
is_sorted = True if 'is_sorted' not in self.config['source'] else self.config['source']['is_sorted']
config = self.config['process']
stages = [(stage_id, stage) for stage_id, stage in config.items()]
stages.sort(key=lambda elem: int(elem[0].split("_")[1]))
shuffle_stages = []
for stage_id, stage in stages:
stage_type = 'map' if 'type' not in stage else stage['type']
if stage_type == 'group':
shuffle_stages.append(stage)
if not is_sorted:
shuffles.append(shuffle_stages)
shuffle_stages = [stage]
elif stage_type == 'map':
shuffle_stages.append(stage)
if len(shuffle_stages) > 0:
shuffles.append(shuffle_stages)
return shuffles
def group_stage(self, df, strategies, group_keys, sort_keys, keep_group_keys):
'''
group stage数据处理
:param df: 输入数据
:param strategies: group策略
:param group_keys: group的key
:param sort_keys: group后内部排序的key
:param keep_group_keys: 是否保留group key
:return: group stage数据处理后的数据
'''
df_columns = df.columns.values.tolist()
strategies_list = self.parse_group_params(strategies, df_columns, group_keys)
logger.debug(strategies_list)
if sort_keys is not None and len(sort_keys) > 0:
df.sort_values(sort_keys, inplace=True)
result_df = df.groupby(by=group_keys, as_index=False, sort=False).agg(strategies_list)
mdf = MDataFrame(result_df)
udf = load_udf('Copy', [])
output_columns = ["keys_" + column for column in group_keys]
mdf.process_udf(udf, copy.deepcopy(group_keys), output_columns, keep_group_keys)
return mdf.datas()
def parse_group_params(self, strategies, src_columns, keys):
'''
解析group的配置
:param strategies:
:param src_columns:
:param keys:
:return:
'''
processed_columns = copy.deepcopy(keys)
strategies_dict = {}
for strategy in strategies:
logger.debug(strategy)
if "input_columns" in strategy:
input_columns = strategy["input_columns"]
processed_columns.extend(input_columns)
elif "default" in strategy and strategy['default']:
input_columns = copy.deepcopy(src_columns)
for column in src_columns:
if column in processed_columns:
input_columns.remove(column)
else:
raise Exception("input_columns can not be empty!")
logger.debug(input_columns)
name = strategy["name"]
for input_column in input_columns:
if name in group_udf_mapping:
strategies_dict[input_column] = group_udf_mapping[name]
else:
strategies_dict[input_column] = load_udf(name, self.parse_params(strategy, name)).process
return strategies_dict
def process_strategy(self, mdf, strategy, strategy_names=None):
'''
udf处理
:param mdf: 输入数据
:param strategy: udf配置
:param strategy_names: udf名字,udf支持name在外和在内两种
:return: 处理后的数据
'''
names = strategy_names if strategy_names is not None else strategy["name"]
if names == "Output":
self.save_file(mdf.data, strategy)
return None
if names == "GroupAuc":
df = mdf.datas()
key_columns = strategy["key_columns"]
key_data = df[key_columns[0]].tolist() if len(key_columns) == 1 else [tuple(x) for x in df[key_columns].values]
group_auc, detail_auc = mmodel.cal_group_auc(df['label'].tolist(), df['pred_prob'].tolist(), key_data)
logger.info(f'group_auc = {group_auc}')
if strategy["detail"]:
logger.info(f'detail_auc : ')
for key, auc in detail_auc.items():
logger.info(f'key = {key}, auc = {auc}')
return None
elif names == "RenameColumn":
input_columns = strategy["input_columns"]
output_columns = strategy["output_columns"]
columns_dict = {}
for index, input in enumerate(input_columns):
columns_dict[input] = output_columns[index]
mdf.rename(columns_dict)
return None
elif names == "CopyColumn":
input_columns = strategy["input_columns"]
output_columns = strategy["output_columns"]
mdf.copy_column(input_columns, output_columns)
return None
elif names == "AddColumn":
input_columns = strategy["input_columns"]
value = strategy['value']
mdf.add_column(input_columns, value)
return None
elif names == "DropColumn":
mdf.drop(strategy["input_columns"])
return None
elif names == "OrderColumn":
columns = strategy["input_columns"]
if isinstance(columns, str) and "," in columns:
columns = columns.split(",")
columns = [column.strip() for column in columns]
# 增加key columns,放在最前面
key_column = [column for column in mdf.columns() if column.startswith('keys_') and column not in columns]
if len(key_column) > 0:
key_column.extend(columns)
columns = key_column
mdf.order_column(columns)
return None
input_columns = copy.deepcopy(strategy["input_columns"])
if isinstance(input_columns, dict):
logger.debug("****** parse sub strategy *******")
input_columns = self.process_strategy(mdf, input_columns)
output_columns = copy.deepcopy(input_columns) if "output_columns" not in strategy else copy.deepcopy(strategy[
"output_columns"])
split_column_count = 0 if "split_column_count" not in strategy else strategy["split_column_count"]
suffix_use_label = False if "suffix_use_label" not in strategy else strategy["suffix_use_label"]
if suffix_use_label and "labels" in strategy:
labels = copy.deepcopy(strategy["labels"])
default_label = 'others' if 'default_label' not in strategy else strategy['default_label']
labels.append(default_label)
for index, output_column in enumerate(output_columns):
pre = output_column if not isinstance(output_column, list) else output_column[0]
output_columns[index] = [pre + '_' + str(label) for label in labels]
elif split_column_count > 1:
for index, output_column in enumerate(output_columns):
pre = output_column if not isinstance(output_column, list) else output_column[0]
output_columns[index] = [pre + '_' + str(i) for i in range(split_column_count)]
prefix = "" if "output_columns_prefix" not in strategy else strategy["output_columns_prefix"]
suffix = "" if "output_columns_suffix" not in strategy else strategy["output_columns_suffix"]
for index, output_column in enumerate(output_columns):
output_columns[index] = prefix + output_column + suffix if not isinstance(output_column, list) \
else [prefix + column + suffix for column in output_column]
keep_input_columns = False if "keep_input_columns" not in strategy else strategy["keep_input_columns"]
names = names if isinstance(names, list) else [names]
logger.debug("********* start to execute strategy " + str(names) + " **********")
logger.debug("input_columns: " + str(input_columns))
logger.debug("output_columns: " + str(output_columns))
start = time.time()
for name in names:
udf = load_udf(name, self.parse_params(strategy, name))
mdf.process_udf(udf, input_columns, output_columns, keep_input_columns)
if "drop_columns" in strategy:
mdf.drop(strategy["drop_columns"])
if "select_columns" in strategy:
mdf.select(strategy["select_columns"])
logger.debug(mdf)
logger.debug(mdf.columns())
cost = time.time() - start
logger.debug("********* stop to execute strategy " + str(names) + " cost = " + str(cost) + " **********")
return output_columns
def process_model(self, df):
'''
模型处理
:param df: 输入输出
:return: 无
'''
if 'model' not in self.config:
logger.info("no model in json, ignore model process")
return
gc.collect()
config = self.config['model']
columns = df.columns.values.tolist()
logger.info("********* start process mode ********")
logger.debug(columns)
logger.debug(df)
run_mod = 'train_test' if "run_mode" not in config else config["run_mode"]
models = []
for key, model in config.items():
if key.startswith("model_"):
models.append((key[6:], model))
models.sort()
logger.debug(models)
if run_mod == "predict":
model_select = 'ModelSelect' if "model_select" not in config else config["model_select"]
group_keys = [column for column in columns if column.startswith('keys_')]
group_key_df = df[group_keys]
df.drop(group_keys, axis=1, inplace=True)
for _, model in models:
logger.debug(model)
if "model_path" not in model:
raise Exception("model_path could not be null!")
model_path = model["model_path"]
model_process = ModelProcess()
model_process.load_model(model_path=model_path)
pred_df = model_process.predict(df)
group_key_df.columns = [key[5:] for key in group_keys]
df_temp = pd.concat([group_key_df, pred_df], axis=1, sort=False)
if 'strategies' in model:
strategies = model['strategies']
mdf = MDataFrame(df_temp)
for strategy in strategies:
self.process_strategy(mdf, strategy)
df_temp = mdf.datas()
if 'Output' in model:
self.save_file(df_temp, model['Output'])
elif run_mod == "train":
group_keys = [column for column in columns if column.startswith('keys_')]
df.drop(group_keys, axis=1, inplace=True)
validation_data_percent = 0.2 if "validation_data_percent" not in config else config[
"validation_data_percent"]
validation_data_percent = 0.2 if validation_data_percent > 0.5 or validation_data_percent < 0.01 else validation_data_percent
x_df, y_df = dataprocess.split_feature_and_label_df(df)
del df
train_x_df, valid_x_df, train_y_df, valid_y_df = train_test_split(x_df, y_df,
test_size=validation_data_percent, random_state=0)
del x_df, y_df
for _, model in models:
logger.debug(model)
model_type = model["model_type"]
model_config = model["model_config"]
model_process = ModelProcess(model_type, model_config)
model_process.train_model(train_x_df, train_y_df, test_x=valid_x_df, test_y=valid_y_df)
model_process.save_model(model["model_path"])
logger.info("model saved to " + os.path.abspath(model["model_path"]))
if 'feature_importance' in model:
feature_importance = model['feature_importance']
importance_types = ['gain'] if 'importance_type' not in feature_importance else feature_importance['importance_type']
for importance_type in importance_types:
score = model_process.feature_importance(importance_type)
all_features = [score.get(f, 0.) for f in model_process.features()]
all_features = np.array(all_features, dtype=np.float32)
all_features_sum = all_features.sum()
importance_list = [[f, score.get(f, 0.) / all_features_sum] for f in model_process.features()]
importance_list.sort(key=lambda elem: elem[1], reverse=True)
print("feature importance: " + importance_type)
for index, item in enumerate(importance_list):
print(index, item[0], item[1])
elif run_mod == "test":
group_keys = [column for column in columns if column.startswith('keys_')]
group_key_df = df[group_keys]
df.drop(group_keys, axis=1, inplace=True)
x_df, y_df = dataprocess.split_feature_and_label_df(df)
del df
for _, model in models:
logger.debug(model)
if "model_path" not in model:
raise Exception("model_path could not be null!")
model_process = ModelProcess()
model_process.load_model(model_path=model["model_path"])
pred_df = model_process.evaluate_model(x_df, y_df, ana_top=0.05)
group_key_df.columns = [key[5:] for key in group_keys]
df_temp = pd.concat([group_key_df, y_df, pred_df], axis=1, sort=False)
if 'strategies' in model:
strategies = model['strategies']
mdf = MDataFrame(df_temp)
for strategy in strategies:
self.process_strategy(mdf, strategy)
df_temp = mdf.datas()
if 'Output' in model:
self.save_file(df_temp, model['Output'])
elif run_mod == "train_test":
group_keys = [column for column in columns if column.startswith('keys_')]
group_key_df = df[group_keys]
df.drop(group_keys, axis=1, inplace=True)
test_data_percent = 0.2 if "test_data_percent" not in config else config["test_data_percent"]
test_data_percent = 0.2 if test_data_percent > 0.5 or test_data_percent < 0.01 else test_data_percent
validation_data_percent = 0.2 if "validation_data_percent" not in config else config["validation_data_percent"]
validation_data_percent = 0.2 if validation_data_percent > 0.5 or validation_data_percent < 0.01 else validation_data_percent
x_df, y_df = dataprocess.split_feature_and_label_df(df)
del df
train_x_df, test_x_df, train_y_df, test_y_df = train_test_split(x_df, y_df, test_size=test_data_percent, random_state=0)
del x_df, y_df
train_x_df, valid_x_df, train_y_df, valid_y_df = train_test_split(train_x_df, train_y_df, test_size=validation_data_percent, random_state=0)
for _, model in models:
logger.debug(model)
model_process = ModelProcess(model["model_type"], model["model_config"])
model_process.train_model(train_x_df, train_y_df, test_x=valid_x_df, test_y=valid_y_df)
model_process.save_model(model["model_path"])
logger.info("model saved to " + os.path.abspath(model["model_path"]))
pred_df = model_process.evaluate_model(test_x_df, test_y_df, ana_top=0.05)
group_key_df.columns = [key[5:] for key in group_keys]
df_temp = pd.concat([group_key_df, test_y_df, pred_df], axis=1, sort=False)
if 'strategies' in model:
strategies = model['strategies']
mdf = MDataFrame(df_temp)
for strategy in strategies:
self.process_strategy(mdf, strategy)
df_temp = mdf.datas()
if 'Output' in model:
self.save_file(df_temp, model['Output'])
def save_file(self, src_df, strategy):
'''
文件保存,或结果输出
:param src_df: 数据
:param strategy: 输出策略
:return: 无
'''
df = src_df.copy(deep=True)
columns = df.columns.values.tolist()
key_columns = [column for column in columns if column.startswith('keys_')]
if len(key_columns) > 0:
group_keys = {column:column[5:] for column in key_columns}
df.drop([column[5:] for column in key_columns if column[5:] in columns], axis=1, inplace=True)
df.rename(columns=group_keys, inplace=True)
path = 'pipeline.txt' if 'path' not in strategy else strategy['path']
type = 'text' if 'type' not in strategy else strategy['type']
if path == "stdout":
field_delimiter = ',' if 'field_delimiter' not in strategy else strategy['field_delimiter']
columns = None if 'columns' not in strategy else strategy['columns']
if columns:
df = df[[column for column in columns]]
source.stdout(df, field_delimiter)
elif type == 'text':
field_delimiter = ',' if 'field_delimiter' not in strategy else strategy['field_delimiter']
columns = None if 'columns' not in strategy else strategy['columns']
header = False if self.current_batch_index > 1 else True if 'header' not in strategy else strategy['header']
path = path if not path.endswith("/") else path + time.strftime("%Y%m%d%H%M%S", time.localtime()) + ".txt"
filepath, _ = os.path.split(path)
if not os.path.exists(filepath):
os.makedirs(filepath)
df.to_csv(path, sep=field_delimiter, columns=columns, header=header, mode='a+')
elif type == "excel":
df.to_excel()
else:
logger.info("we will support type " + type + " later")
def start(config_path, **xxkwargs):
start0 = time.time()
pipeline = Pipeline(config_path, **xxkwargs)
logger.info("read and parse config cost = " + str(time.time() - start0))
start1 = time.time()
df = pipeline.read_data()
if df is not None:
logger.info("read data cost = " + str(time.time() - start1))
logger.debug(df)
start1 = time.time()
df = pipeline.process_data(df)
logger.info("process data cost = " + str(time.time() - start1))
else:
start1 = time.time()
df = pipeline.shuffle_process()
logger.info("process data cost = " + str(time.time() - start1))
if df is not None:
start1 = time.time()
pipeline.process_model(df)
logger.info("process model cost = " + str(time.time() - start1))
logger.info("all cost = " + str(time.time() - start0))
if __name__ == "__main__":
start("common_config.json")
|
UTF-8
|
Python
| false | false | 32,852 |
py
| 14 |
start.py
| 11 | 0.543854 | 0.539646 | 0 | 738 | 42.4729 | 152 |
novonordisk-research/ProcessOptimizer
| 5,480,378,294,435 |
2619e627fae36b22a77bdd519f000c59eb88beb0
|
4db1ea8dcbc995edc45e4c773fed75b99ec99f80
|
/ProcessOptimizer/utils/get_rng.py
|
9d6cbb44483b058286399dd2fbd25f869aa15346
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/novonordisk-research/ProcessOptimizer
|
5396e22abf86a202b819e48dd9687b55c42373d2
|
c39e16b13c36f026827a861d2738bd248325af09
|
refs/heads/develop
| 2023-08-30T21:01:59.555144 | 2023-08-21T07:56:29 | 2023-08-21T07:56:29 | 311,978,669 | 33 | 9 |
NOASSERTION
| false | 2023-09-10T20:26:05 | 2020-11-11T13:27:36 | 2023-08-11T07:05:22 | 2023-09-10T20:26:05 | 40,282 | 24 | 9 | 16 |
Jupyter Notebook
| false | false |
from typing import Union
import numpy as np
def get_random_generator(
input: Union[int, np.random.RandomState, np.random.Generator, None]
) -> np.random.Generator:
"""Get a random generator from an input.
Parameters
----------
* `input` [int, float, RandomState instance, Generator instance, or None (default)]:
Set random state to something other than None for reproducible
results.
Returns
-------
* `rng`: [Generator instance]
Random generator.
"""
if input is None:
return np.random.default_rng()
elif isinstance(input, int):
return np.random.default_rng(input)
elif isinstance(input, np.random.RandomState):
return np.random.default_rng(input.randint(1000, size=10))
# Draws 10 integers under 1000 from the deprecated RandomState to use as a seed for
# the current RNG. This allows for 10**30 different values.
elif isinstance(input, np.random.Generator):
return input
else:
raise TypeError(
"Random state must be either None, an integer, a RandomState instance, or a Generator instance."
)
|
UTF-8
|
Python
| false | false | 1,148 |
py
| 69 |
get_rng.py
| 43 | 0.658537 | 0.644599 | 0 | 35 | 31.8 | 108 |
darae07/test-ui
| 7,421,703,524,141 |
951bfd9b871a60d79d5368bdbd1aefb8fa9c1269
|
b7c14b21d55779fa721046b4c4b87ce173d67edb
|
/algorithm/book_icote/07.럭키스트레이트.py
|
80c3678766a859d9f7f672697485f1a645758489
|
[] |
no_license
|
https://github.com/darae07/test-ui
|
762c5567a10c5a30276bf996e19df6848a8eda76
|
bb9e0f198bcc8fd4217c572bdb9da105bc21ce21
|
refs/heads/master
| 2022-11-28T22:34:55.259828 | 2022-10-24T12:48:37 | 2022-10-24T12:48:37 | 287,263,133 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding:utf-8 -*-
#
n = input()
left = 0
right = 0
for i in range(len(n)):
if i < len(n)//2:
left += int(n[i])
else:
right += int(n[i])
print('LUCKY' if left == right else 'READY')
|
UTF-8
|
Python
| false | false | 211 |
py
| 120 |
07.럭키스트레이트.py
| 94 | 0.488152 | 0.469194 | 0 | 12 | 16.583333 | 44 |
binking/PythonAndAlgorithms
| 16,655,883,196,880 |
11f70454eb68e5a47ff0f3cb43ebf3d10c3b1968
|
1cf18a662c222756013d62eaf68b66e781b6aef3
|
/threads/threadings_pool.py
|
78443c157c762a7c6c6d4e2d19c3c1ff1446d957
|
[] |
no_license
|
https://github.com/binking/PythonAndAlgorithms
|
159156650bb6f5d620ce5748955acbf4af53f0c0
|
8616d71bfed25dbaad3ec0d1d250992aebc65e97
|
refs/heads/master
| 2017-12-23T18:12:35.263019 | 2017-09-08T10:44:56 | 2017-09-08T10:44:56 | 72,450,209 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import time
import threading
from multiprocessing.pool import ThreadPool
def print_num(n):
ct = threading.current_thread()
print "Process %s prints %d" % (ct.name, n)
time.sleep(5)
print "hey hey beautiful"
def main():
#import ipdb; ipdb.set_trace()
for i in range(5):
# print "Create %d-th process." % i
t = threading.Thread(target=print_num, args=(i, ))
t.setDaemon(True)
t.start()
t.join() # otherwise, when main process finished, children threads will be all killed
print "All Done !"
def use_pool():
pool = ThreadPool(processes=4)
pool.map(print_num, range(5))
pool.close()
pool.join()
if __name__ == "__main__":
# main()
use_pool()
|
UTF-8
|
Python
| false | false | 731 |
py
| 32 |
threadings_pool.py
| 30 | 0.611491 | 0.606019 | 0 | 29 | 24.172414 | 89 |
asamn/javathehardway
| 9,526,237,463,150 |
5c92c5dfc7c9f519dbac5ea4812d04d7e63ff726
|
1bbbbc7bf6d6667a12ffe9fb6bee033db8699aa4
|
/environment/chapter6/sentimental/greedy.py
|
1f2537bd71ca069e677571c9046e64290a3d977d
|
[] |
no_license
|
https://github.com/asamn/javathehardway
|
f1004d1fc3cb79b2203ef1b59e2cccb756540278
|
69a2598279e0969ef8ad188a7901181aac7ae022
|
refs/heads/master
| 2020-07-28T04:24:36.645460 | 2020-02-04T14:16:28 | 2020-02-04T14:16:28 | 209,308,325 | 0 | 0 | null | false | 2019-09-24T16:55:44 | 2019-09-18T12:55:14 | 2019-09-24T16:54:06 | 2019-09-24T16:55:43 | 7,319 | 0 | 0 | 0 |
C
| false | false |
# :( input of 23 yields output of 92, cs50 does not take in account half dollar coins
# include <stdio.h>
# include <cs50.h>
# include <math.h>
import cs50
import math
def main():
print("THIS PROGRAM DOES NOT COUNT HALF DOLLAR COINS BECASUE CS50 IS MEGALOMANIAC TRASH\n\n")
print("Enter Amount\n") # prevents output repitition from pressing enter key
amount = cs50.get_float("") # prevents output repitition
changedAmount = round(amount * 100) # double is too precise
coins = (0)
HalfDollars = (0) # nonexistent
Quarters = (0)
Dimes = (0)
Nickels = (0)
Pennies = (0)
DollarCoins = (0)
if amount <= 0:
# asks again
main()
else:
while changedAmount == 25 or changedAmount > 25:
#
changedAmount = changedAmount - 25
coins = coins + 1
Quarters = Quarters + 1
while changedAmount == 10 or changedAmount > 10:
#
changedAmount = changedAmount - 10
coins = coins + 1
Dimes = Dimes + 1
while changedAmount == 5 or changedAmount > 5:
#
changedAmount = changedAmount - 5
coins = coins + 1
Nickels = Nickels + 1
while changedAmount > 0 and changedAmount < 5: # its zero becasue > 1 is two
#
changedAmount = changedAmount - 1
coins = coins + 1
Pennies = Pennies + 1
if changedAmount > 0.1 and changedAmount < 1: # prevents missing cent from decimals between 1 and 0, 0.1 because it could take -0.000000015, 0.999999 is the cause of missing cents
#
changedAmount = changedAmount - 1
coins = coins + 1
Pennies = Pennies + 1
if (changedAmount < -0.1): # prevents extra cent, -0.1 because it could take in something like -0.00000015
#
changedAmount = changedAmount + 1
coins = coins - 1
Pennies = Pennies - 1
print(coins)
main() # runs the function
|
UTF-8
|
Python
| false | false | 2,021 |
py
| 173 |
greedy.py
| 121 | 0.582385 | 0.535379 | 0 | 68 | 28.735294 | 184 |
robertmetcalf/chia-log
| 13,683,765,853,169 |
16eb88f98e0b4dc403ba59f38573cb31dfa362d6
|
26f27b63386bacfd7d8350b07cecb7f52663046a
|
/src/plots.py
|
35259f42c966d350257f7d5af0f41a61792d67b0
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/robertmetcalf/chia-log
|
906811ca83bbebc13212999c8c83541ac2e8ceed
|
d1160f0fe8920e581a3a0ffd242f70597b5061fd
|
refs/heads/main
| 2023-05-25T20:40:06.586044 | 2021-06-07T02:13:46 | 2021-06-07T02:13:46 | 361,777,874 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# system packages
from pathlib import Path
from typing import List
import re
# local packages
from src.config import Config
from src.plot import Plot
class Plots:
'''
Process plots in log files. A log file may contain more than one plot entry.
'''
def __init__ (self, config:Config) -> None:
self._config = config
self._files:List[Path] = [] # files that were processed
self._plots:List[Plot] = [] # a single plot extracted from a log file
self._plot_ids:List[str] = [] # plot id's are used to check for duplicates
@property
def files (self) -> List[Path]:
'''Return a list of files processed, each element is a Path() object.'''
return self._files
@property
def plots (self) -> List[Plot]:
'''Return a list of Plot() objects.'''
return self._plots
def extract (self, log_file_path:Path) -> None:
'''Extract one or more plots from a log file.'''
# define the output from a single plot; a log file may contain more than
# one plot
plot_begin = r'Starting plotting progress (.+?)'
plot_end = r'Renamed final file'
pattern = plot_begin + plot_end
if log_file_path in self._files:
return
with open(log_file_path, 'r') as f:
log_data = f.read()
data_replace = log_data.replace('\n', ' ')
plots = re.findall(pattern, data_replace)
self._config.logger.debug(f'number of plots {len(plots)}')
# process each plot in the log file
for index, result in enumerate(plots, 1):
self._config.logger.debug(f'results len {len(result)}')
plot = Plot(self._config, log_file_path, index)
if plot.extract(result):
plot_id = plot.parameters.plot_id
if plot_id not in self._plot_ids:
self._plots.append(plot)
self._plot_ids.append(plot_id)
self._files.append(log_file_path)
def post_process (self) -> None:
'''Post-process each plot and add more information.'''
for plot in self._plots:
plot.set_plot_configuration() # group plots by disks/SSDs used
plot.set_plot_date() # plot yyyy-mm and yyyy-mm-dd and
plot.set_plot_time() # start, end, and elapsed time
'''
from datetime import datetime
from collections import namedtuple
Range = namedtuple('Range', ['start', 'end'])
r1 = Range(start=datetime(2012, 1, 15), end=datetime(2012, 5, 10))
r2 = Range(start=datetime(2012, 3, 20), end=datetime(2012, 9, 15))
latest_start = max(r1.start, r2.start)
earliest_end = min(r1.end, r2.end)
delta = (earliest_end - latest_start).seconds + 1
overlap = max(0, delta)
'''
def sort_by_start_time (self) -> List[Plot]:
'''
Return all Plot() objects sorted by the start time. Duplicate start
times look at the final end time as a secondary sort.
'''
return []
|
UTF-8
|
Python
| false | false | 2,715 |
py
| 16 |
plots.py
| 12 | 0.661878 | 0.64825 | 0 | 94 | 27.882979 | 77 |
MasahiroK/TSTP
| 11,536,282,177,427 |
2537e5bdd1e94af5242a10c2e45d1bd1132b589f
|
84571c99d5789c5f55a17462cae9bcbcaddb8759
|
/14_c3.py
|
ec84c24a6ce2d439150ed3285dad05c9998931aa
|
[] |
no_license
|
https://github.com/MasahiroK/TSTP
|
68a1e034a9b2147d518e8d9f7fe52d6556d4ddaa
|
e3881f2e60b594644f1fb7489808d8761c80e163
|
refs/heads/master
| 2020-05-19T06:11:41.884420 | 2019-05-04T08:05:53 | 2019-05-04T08:05:53 | 184,867,978 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Jojo:
"""docstring fo Jojo."""
def __init__(self, name):
self.name = name
def compare(object1, object2):
return object1 is object2
jony = Jojo("jony")
same_jony = jony
print(compare(jony, same_jony))
another_jony = Jojo("jonathan")
print(compare(jony, another_jony))
|
UTF-8
|
Python
| false | false | 297 |
py
| 18 |
14_c3.py
| 18 | 0.656566 | 0.643098 | 0 | 15 | 18.8 | 34 |
robertocml/Python_Scripting
| 8,521,215,142,759 |
1d00447c9249acef7552572eb83b506f92d089fe
|
102b1b2c8feb063eb9c6ac5cde7dfd9837f5c9d7
|
/Keras_ANN.py
|
31d471cc52f3db9c4594f23f21e66b55bb843e3e
|
[] |
no_license
|
https://github.com/robertocml/Python_Scripting
|
06c7b19e4fd118e9a2be602e0e677842f708569e
|
3256228f17a11ef06d5bc4718584f4691461909f
|
refs/heads/master
| 2020-12-07T13:23:48.415830 | 2020-04-23T00:57:21 | 2020-04-23T00:57:21 | 232,730,983 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Activation
from keras.layers.core import Dense
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
#linear stack of layers, accept an array and within has elements , each one represent a layer
model = Sequential([
Dense(16, input_shape=(1,), activation=relu),
Dense(32, input_shape=(1,), activation=relu),
Dense(2, input_shape=(1,), activation=softmax)
])
|
UTF-8
|
Python
| false | false | 508 |
py
| 15 |
Keras_ANN.py
| 12 | 0.765748 | 0.75 | 0 | 14 | 35.285714 | 93 |
circleupx/PythonGui
| 15,582,141,388,975 |
3383966a1b1621fb26e7e5386b033a4a8a77a1ed
|
25880dae073dffce46709ed9321df19c80ed5d2d
|
/Tkinter/Example08a.py
|
f38d2187015975d236b40a7cafa3629cd859ff57
|
[] |
no_license
|
https://github.com/circleupx/PythonGui
|
89748180197149328acfc8df6d547799c120747e
|
724552a30b577dda51ebb6c73edb0c7f5dfb3606
|
refs/heads/master
| 2018-01-08T18:19:50.164536 | 2015-10-30T16:23:38 | 2015-10-30T16:23:38 | 45,259,421 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Adding a toolbar to our GUI
"""
def drop():
print("New Project")
def drop2():
print("New")
from tkinter import *
gui_object = Tk()
# create a menu object using Menu class
menu = Menu(gui_object)
# configure the menu
gui_object.config(menu=menu)
# Tkinter already knows what a meny is, there is no need to specify where the menu goes.
# Create a File and Edit Sub Menu
fileMenu = Menu(menu, tearoff=False)
# add drop down functionality, in Tkinter this is known as cascading
menu.add_cascade(label="File", menu=fileMenu)
# Line above creates a button
# adding to the submenu
fileMenu.add_command(label="New Project", command=drop)
fileMenu.add_command(label="New", command=drop2)
# Create space between each menu option
fileMenu.add_separator()
# ********* toolbar ***************#
toolbar = Frame(gui_object, bg="blue")
toolbar_button = Button(toolbar, text="Insert Image", command =drop)
toolbar_button.pack(side=LEFT,padx=2, pady=2)
toolbar.pack(side=TOP, fill=X)
gui_object.mainloop()
|
UTF-8
|
Python
| false | false | 1,021 |
py
| 17 |
Example08a.py
| 17 | 0.70715 | 0.703232 | 0 | 51 | 19.019608 | 88 |
Elimut/GIG-PROG1
| 14,860,586,879,686 |
b12ec1bdf8f128272eea19d6c06d6a6ea8ca950a
|
b2e888205df2316104c9c1b42c59dddfb4ffed9e
|
/prog/cv2/bignumber.py
|
117b319a5e1ed9ea605e592c2dd0fb8925b4fb31
|
[] |
no_license
|
https://github.com/Elimut/GIG-PROG1
|
e706ef35605a31fb591c7ea757210102ef0b46bf
|
e95a961a858c725868ac7d25281c0b92c705ae0a
|
refs/heads/master
| 2016-09-05T17:43:47.239305 | 2015-04-12T06:08:22 | 2015-04-12T06:08:22 | 33,805,485 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
number = float(input("Input number: "))
if number > 10:
print("Number is big.")
else:
print("Number si small.")
|
UTF-8
|
Python
| false | false | 120 |
py
| 118 |
bignumber.py
| 108 | 0.625 | 0.608333 | 0 | 5 | 23 | 39 |
adonSh/rpn
| 4,544,075,407,781 |
e0c073525eb2d9de3c5c1431d82e84b346b5f07c
|
83481fa1dd956481c634d780b9db964a233d4b6e
|
/intstack.py
|
6c5c5617e795efe84fff4214a0030b70fa9a5505
|
[] |
no_license
|
https://github.com/adonSh/rpn
|
d2ed6d0bdaf17dd578a6ff4715c8776e2123d88f
|
d72804d80bd7106f7d29e8e0e0adc445ecbe265e
|
refs/heads/master
| 2021-08-16T10:17:42.842383 | 2020-07-16T14:10:41 | 2020-07-16T14:10:41 | 204,026,796 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
""" Implementation of an immutable integer stack for RPN calculator.
Always contains at least one value (0).
"""
from typing import List
Stack = List
INIT = 0
def new() -> Stack[int]:
return [INIT]
def print(s: Stack[int]) -> Stack[int]:
print(s)
return s
def peek(s: Stack[int]) -> int:
return s[len(s) - 1]
def pop(s: Stack[int]) -> Stack[int]:
return s[:-1] if len(s) > 1 else s
def push(s: Stack[int], n: int) -> Stack[int]:
return s + [n]
|
UTF-8
|
Python
| false | false | 478 |
py
| 6 |
intstack.py
| 2 | 0.608787 | 0.598326 | 0 | 23 | 19.782609 | 68 |
puppy0608/odoo
| 8,443,905,751,667 |
530b85f6bdf2995597636b8040addecc27b836de
|
340e8cd8d047f666f9e7c865ad86ce055690e2cb
|
/home/models.py
|
3fa1014ed6a5e810022fd18136a6131e8c1d633d
|
[] |
no_license
|
https://github.com/puppy0608/odoo
|
d744062709619565dca6fe821e62bfbe7ca2ed3c
|
7ebb1b6ffc22fea05e2c38510adf37337febdf22
|
refs/heads/master
| 2023-02-06T05:08:33.479918 | 2020-12-24T07:34:16 | 2020-12-24T07:34:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
# Create your models here.
class Template(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
category = models.CharField(max_length=255)
structure = models.TextField(max_length=255)
class User(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
address = models.CharField(max_length=255)
phone = models.CharField(max_length=255)
class Address(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
city = models.CharField(max_length=255)
code = models.CharField(max_length=255)
class Data(models.Model):
id=models.AutoField(primary_key=True)
first_name = models.CharField(max_length=255)
middle_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
title = models.CharField(max_length=255)
gender = models.CharField(max_length=255)
company_name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
phone_number = models.CharField(max_length=255)
skype = models.CharField(max_length=255)
contact_type = models.CharField(max_length=255)
birthday = models.CharField(max_length=255)
birthday_location = models.CharField(max_length=255)
blood_group = models.CharField(max_length=255)
material_status = models.CharField(max_length=255)
user_id = models.CharField(max_length=255)
latitude = models.CharField(max_length=255)
longtitude = models.CharField(max_length=255)
contact_id = models.CharField(max_length=255)
provider_type = models.CharField(max_length=255)
user_role = models.CharField(max_length=255)
opt = models.CharField(max_length=255)
support_need = models.CharField(max_length=255)
|
UTF-8
|
Python
| false | false | 1,932 |
py
| 15 |
models.py
| 8 | 0.725155 | 0.673913 | 0 | 45 | 41.933333 | 56 |
thgeorgiou/uniwa-cloud-todoapp
| 1,254,130,500,599 |
df1e723b7446147b2cdf73471cae3f78aca4c8d9
|
0b82e286081e1b60102ddcb89ded2e61b84e5a65
|
/todoapp/__init__.py
|
113a5be0fc9c13662526f96a8d59eb2a8cba3db7
|
[
"Unlicense"
] |
permissive
|
https://github.com/thgeorgiou/uniwa-cloud-todoapp
|
d11929497f981b4af929a761dd30ea4fdfc4890b
|
be595fb2d94a353a86b906d74879fe7a70af18bf
|
refs/heads/master
| 2023-01-06T11:27:50.175522 | 2020-08-29T18:01:20 | 2020-08-29T18:01:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from datetime import datetime, date
from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///local_store.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # To surpress warning
db = SQLAlchemy(app)
class Todo(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(200), nullable=False)
date_created = db.Column(
db.DateTime, default=datetime.utcnow, nullable=False)
date_completed = db.Column(db.DateTime, default=None)
def __repr__(self):
return '<Task %r>' % self.id
# HACK Run create_all here so the database gets created on heroku
# since each dyno runs with a different disk, running an 'init' task
# creates a disk that gets lost. This will ensure each run has a database
# to work with.
db.create_all()
@app.route('/', methods=['GET'])
def index():
'''
Returns the main app screen filled with any created tasks.
'''
tasks = Todo.query.order_by(Todo.date_created).all()
return render_template('index.html.j2', tasks=tasks)
@app.route('/', methods=['POST'])
def handle_form():
'''
Creates a new task on form submission
'''
content = request.form['content']
task = Todo(content=content)
try:
db.session.add(task)
db.session.commit()
return redirect('/')
except:
return 'Something went wrong! :('
@app.route('/complete/<int:id>')
def complete(id):
'''Marks a task done by the ID'''
task = Todo.query.get_or_404(id)
try:
task.date_completed = datetime.utcnow()
db.session.commit()
return redirect('/')
except:
return 'Something went wrong! :('
@app.route('/uncomplete/<int:id>')
def uncomplete(id):
'''Marks a task undone by the ID'''
task = Todo.query.get_or_404(id)
try:
task.date_completed = None
db.session.commit()
return redirect('/')
except:
return 'Something went wrong! :('
@app.route('/delete/<int:id>')
def delete(id):
'''Deletes a task'''
task = Todo.query.get_or_404(id)
try:
db.session.delete(task)
db.session.commit()
return redirect('/')
except:
return 'Something went wrong! :('
@app.route('/edit/<int:id>', methods=['GET', 'POST'])
def update(id):
'''Handles editing of tasks (both view and form submission)'''
task = Todo.query.get_or_404(id)
if request.method == 'POST':
task.content = request.form['content']
try:
db.session.commit()
return redirect('/')
except:
return 'Something went wrong! :('
else:
return render_template('edit.html.j2', task=task)
def serve():
app.run(debug=True, host='0.0.0.0', port=5000)
|
UTF-8
|
Python
| false | false | 2,860 |
py
| 7 |
__init__.py
| 2 | 0.622028 | 0.613287 | 0 | 106 | 25.981132 | 75 |
cloudmesh-deprecated/deprecated-teefaa
| 3,307,124,822,884 |
f0ff92253d6576a072862904174c647770f2c7c7
|
26c813665041f62d21c41af57715d99c8be4ed04
|
/teefaa/init.py
|
37cd50b5f588b06e86eaa7089191000bf71fb252
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/cloudmesh-deprecated/deprecated-teefaa
|
5535956f0199fbe6df4a4d7fb02dc68c473b3042
|
51685c4b06920c1d6591723aac9b9ef4514c976e
|
refs/heads/master
| 2021-05-26T20:54:20.043224 | 2014-01-28T15:42:32 | 2014-01-28T15:42:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import os
import time
import argparse
import subprocess
from .libexec.common import print_logo
class TeefaaInit(object):
def setup(self, parser):
init = parser.add_parser(
'init',
help="Initialize Teefaa environment")
init.set_defaults(func=self.do_init)
def do_init(self, args):
print_logo()
print("Initializing Teefaa environment...")
|
UTF-8
|
Python
| false | false | 439 |
py
| 8 |
init.py
| 1 | 0.630979 | 0.630979 | 0 | 22 | 18.954545 | 53 |
agustinhenze/mibs.snmplabs.com
| 3,015,067,064,461 |
2669cf2bb1fddad0d877e6ba2750056c9d1a5e9c
|
85a9ffeccb64f6159adbd164ff98edf4ac315e33
|
/pysnmp/BIANCA-BRICK-BINARY-MIB.py
|
1b429de32c627e77905583251f9ce7fdde1da765
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/agustinhenze/mibs.snmplabs.com
|
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
refs/heads/master
| 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 |
Apache-2.0
| true | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | 2019-09-20T14:09:17 | 2019-08-19T15:06:57 | 234,316 | 0 | 0 | 0 | null | false | false |
#
# PySNMP MIB module BIANCA-BRICK-BINARY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BIANCA-BRICK-BINARY-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:21:01 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint")
DisplayString, = mibBuilder.importSymbols("RFC1158-MIB", "DisplayString")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Integer32, NotificationType, MibIdentifier, ObjectIdentity, Unsigned32, IpAddress, ModuleIdentity, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, iso, Counter64, Gauge32, TimeTicks, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "NotificationType", "MibIdentifier", "ObjectIdentity", "Unsigned32", "IpAddress", "ModuleIdentity", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "iso", "Counter64", "Gauge32", "TimeTicks", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
bintec = MibIdentifier((1, 3, 6, 1, 4, 1, 272))
bibo = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4))
ipsec = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 26))
binTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 26, 65), )
if mibBuilder.loadTexts: binTable.setStatus('mandatory')
binEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 26, 65, 1), ).setIndexNames((0, "BIANCA-BRICK-BINARY-MIB", "binEntIndex"))
if mibBuilder.loadTexts: binEntry.setStatus('mandatory')
binEntIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 26, 65, 1, 1), Integer32())
if mibBuilder.loadTexts: binEntIndex.setStatus('mandatory')
binEntNextIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 26, 65, 1, 2), Integer32())
if mibBuilder.loadTexts: binEntNextIndex.setStatus('mandatory')
binEntSetId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 26, 65, 1, 3), Integer32())
if mibBuilder.loadTexts: binEntSetId.setStatus('mandatory')
binEntData = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 26, 65, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255)))
if mibBuilder.loadTexts: binEntData.setStatus('mandatory')
binPublicTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 26, 67), )
if mibBuilder.loadTexts: binPublicTable.setStatus('mandatory')
binPublicEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 26, 67, 1), ).setIndexNames((0, "BIANCA-BRICK-BINARY-MIB", "binPublicEntIndex"))
if mibBuilder.loadTexts: binPublicEntry.setStatus('mandatory')
binPublicEntIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 26, 67, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: binPublicEntIndex.setStatus('mandatory')
binPublicEntNextIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 26, 67, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: binPublicEntNextIndex.setStatus('mandatory')
binPublicEntSetId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 26, 67, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: binPublicEntSetId.setStatus('mandatory')
binPublicEntData = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 26, 67, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: binPublicEntData.setStatus('mandatory')
binFileTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 26, 66), )
if mibBuilder.loadTexts: binFileTable.setStatus('mandatory')
binFileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 26, 66, 1), ).setIndexNames((0, "BIANCA-BRICK-BINARY-MIB", "binFileEntSetId"))
if mibBuilder.loadTexts: binFileEntry.setStatus('mandatory')
binFileEntName = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 26, 66, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: binFileEntName.setStatus('mandatory')
binFileEntSize = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 26, 66, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: binFileEntSize.setStatus('mandatory')
binFileEntPublic = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 26, 66, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("false", 1), ("true", 2))).clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: binFileEntPublic.setStatus('mandatory')
binFileEntSetId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 26, 66, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: binFileEntSetId.setStatus('mandatory')
mibBuilder.exportSymbols("BIANCA-BRICK-BINARY-MIB", bintec=bintec, binEntData=binEntData, binFileEntSetId=binFileEntSetId, binEntSetId=binEntSetId, binPublicTable=binPublicTable, binFileEntry=binFileEntry, binFileEntPublic=binFileEntPublic, binEntNextIndex=binEntNextIndex, binPublicEntIndex=binPublicEntIndex, ipsec=ipsec, bibo=bibo, binFileEntName=binFileEntName, binPublicEntSetId=binPublicEntSetId, binEntry=binEntry, binFileTable=binFileTable, binPublicEntNextIndex=binPublicEntNextIndex, binPublicEntry=binPublicEntry, binFileEntSize=binFileEntSize, binPublicEntData=binPublicEntData, binTable=binTable, binEntIndex=binEntIndex)
|
UTF-8
|
Python
| false | false | 5,626 |
py
| 24,656 |
BIANCA-BRICK-BINARY-MIB.py
| 19,915 | 0.754533 | 0.680412 | 0 | 54 | 103.185185 | 634 |
yanghr/SVD_Prune_EDLCV
| 14,121,852,519,019 |
9c9e469c48907aa9ba26cab689ae0b6a6e749bad
|
b52acc6831f031e2a1814ec1a0401a608c7ff07e
|
/CNN/imagenet/Regularization.py
|
b56d998fff96dbb52de7c07e8e2ddb5d57e2d224
|
[] |
no_license
|
https://github.com/yanghr/SVD_Prune_EDLCV
|
f51eab5e61242acab8cdb2f37bf7790e7dce04d9
|
f17f9b29b6425f918e3b9ff0b799818b1f12922a
|
refs/heads/master
| 2022-05-30T23:16:53.736433 | 2020-04-18T20:58:20 | 2020-04-18T20:58:20 | 256,533,767 | 15 | 6 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import torch
def Reg_Loss(parameters,reg_type = 'Hoyer'):
"""
type can be : Hoyer,Hoyer-Square,L1
"""
reg = 0.0
for param in parameters:
if param.requires_grad and torch.sum(torch.abs(param))>0:
if reg_type == "Hoyer":
reg += torch.sum(torch.abs(param))/torch.sqrt(torch.sum(param**2))-1#Hoyer
elif reg_type == "Hoyer-Square":
reg += (torch.sum(torch.abs(param))**2)/torch.sum(param**2)-1#Hoyer-Square
elif reg_type == "L1":
reg += torch.sum(torch.abs(param))#L1
else:
reg = 0.0
return reg
def Reg_Loss_Param(param,reg_type = 'Hoyer'):
"""
Regularization for single parameter
"""
reg = 0.0
if param.requires_grad and torch.sum(torch.abs(param))>0:
if reg_type == "Hoyer":
reg = torch.sum(torch.abs(param))/torch.sqrt(torch.sum(param**2))-1#Hoyer
elif reg_type == "Hoyer-Square":
reg = (torch.sum(torch.abs(param))**2)/torch.sum(param**2)-1#Hoyer-Square
elif reg_type == "L1":
reg = torch.sum(torch.abs(param))#L1
else:
reg = 0.0
return reg
def orthogology_loss(mat,device = 'cpu'):
loss = 0.0
if mat.requires_grad:
if mat.size(0)<=mat.size(1):
mulmat = mat.matmul(mat.transpose(0,1))#AxA'
else:
mulmat = mat.transpose(0,1).matmul(mat)#A'xA
loss = torch.sum((mulmat-torch.eye(mulmat.size(0),device = device))**2)/(mulmat.size(0)*mulmat.size(1))
return loss
|
UTF-8
|
Python
| false | false | 1,617 |
py
| 14 |
Regularization.py
| 9 | 0.536178 | 0.513296 | 0 | 44 | 34.795455 | 111 |
RadoslawBylica/DiscordBot
| 4,595,615,056,561 |
8499ab61b75ecf4108e33cb231119cb78d97faad
|
de2a590028974a65b59426c8bf114810044d6f69
|
/BakerChanData.py
|
1ba89dec846034a6432a0cd40a66174078a44cfe
|
[] |
no_license
|
https://github.com/RadoslawBylica/DiscordBot
|
3e0fc52042924c9af3fbdf4715b52117d593ead5
|
ed98c0976155663aa628ae326c05a4baf287f9ca
|
refs/heads/master
| 2022-11-29T11:24:30.760834 | 2020-07-29T20:35:10 | 2020-07-29T20:35:10 | 283,593,676 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from os import path
class __Messages__():
__Language__ = None
__LanguageEnabled__ = ["Poland", "English"]
__PolandMessages__ = {
"OpenExtension":"Poprawnie załadowano rozszerzenie.",
"CloseExtension":"Poprawnie rozłączono rozszerzenie.",
"RestartExtension":"Poprawnie przeładowano rozszerzenie.",
"CheckFailure":"Nie masz uprawnień do tej funckji.",
"on_ready":"Baker chan jest gotowa upiec trochę chleba.",
"activity":"na śmierć i życie.",
"ping":"Baker chan mówi, że ping na serwerze wynosi",
"clear":"Baker chan mówi, że nie może usunąć tylu wiadomości.",
"play":"Baker chan mówi, że takiej piosenki nie ma w jej bazie danych :c",
"download1":"Piosenka o takiej nazwie już jest w bazie danych.",
"download2":"Baker chan nie ma jeszcze takiej funkcjonalności."
}
__EnglishMessages__ = {
"OpenExtension":"Module was loaded corectly.",
"CloseExtension":"Module was disloaded corectly.",
"RestartExtension":"Module was reloaded corectly.",
"CheckFailure":"You don't have permission to use this function.",
"on_ready":"Baker chan is ready to bake some bread.",
"activity":"to dead or alive.",
"ping":"Baker chan says, that ping on this server is ",
"clear":"Baker chan says, that she can't delete that amount of messages.",
"play":"Baker chan says, that this song doesn't exist in her database :c",
"download1":"That song is in Baker chan database.",
"download2":"Baker chan can't do that yet."
}
def __init__(self, Language:str = "Poland"):
try:
self.__LanguageEnabled__.index(Language)
except ValueError:
print(f"{Language} is not available.")
else:
self.__Language__ = Language
def ChangeLanguage(self, Language:str):
try:
self.__LanguageEnabled__.index(Language)
except ValueError:
print(f"{Language} is not available.")
else:
self.__Language__ = Language
def Get(self, Index:str):
if self.__Language__ == "Poland":
return self.__PolandMessages__.get(Index)
if self.__Language__ == "English":
return self.__EnglishMessages__.get(Index)
class __Settings__():
Prefix = None
Token = None
MainPath = None
BodyName = None
BodyPath = None
SongsFolder = None
def __init__(self, MainPath:str):
self.Prefix = "."
self.Token = 'Not Public'
self.MainPath = MainPath
self.BodyName = "BakerChanBody"
self.BodyPath = path.join(self.MainPath, self.BodyName)
self.SongsFolder = "SongsDatabase"
if __name__ != "__main__":
def init(MainPath:str):
global Messages
global Settings
Messages = __Messages__()
Settings = __Settings__(MainPath)
|
UTF-8
|
Python
| false | false | 2,943 |
py
| 7 |
BakerChanData.py
| 7 | 0.602327 | 0.600958 | 0 | 81 | 35.08642 | 82 |
mr-ping/statwords
| 13,898,514,184,160 |
59e8ed22e2d317980bfc11646c40d837e42719a9
|
7d71f5a4cedefea7a0b35b2800fdf5904a05dfea
|
/statwords/parser/parser.py
|
b2461deea689e52c7b774ba413bb1108d17d22c1
|
[] |
no_license
|
https://github.com/mr-ping/statwords
|
4cff820fe0ac71b338682b09dc0dfab5885a7a1c
|
e2f0b7ec5336ef32f88f41b79919b496e5c57397
|
refs/heads/master
| 2016-05-30T02:02:19.706219 | 2014-11-23T18:22:58 | 2014-11-23T18:22:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from xml.etree import ElementTree as ET
import re
from StringIO import StringIO
from matplotlib import pyplot as plt
class XmlParser(object):
""" Parsing a xml file into a Element generator
methods:
stat_words: count the matching words in the original file with your\
offering.
plot_counting: ploting a histogram with the words's counting and\
storing it to a file-like object.
"""
def __init__(self, file_obj):
if 'the size of file_obj meet the requirement of my server':
tree = ET.parse(file_obj)
self.generator = tree.iter()
else:
pass
def stat_words(self, words):
""" Counting words in the original file
Args:
words: a list consist of the words you want to count.
Return:
A words dictionary that describing the counting result
"""
words_dict = dict()
for word in words:
words_dict[word] = 0
for node in self.generator:
if node.text:
for word in words:
count = len(re.findall(word.lower(), node.text.lower()))
words_dict[word] += count
return words_dict
def plot_result(self, words_dict):
tmp_file = StringIO()
plt.figure(1)
plt.title('How many times the words appeared in the xml file')
plt.xlabel('words')
plt.ylabel('numbers')
plt.bar(left=range(len(words_dict)),
height=words_dict.values(),
align='center')
plt.xticks(range(len(words_dict)), words_dict.keys())
plt.savefig(tmp_file)
plt.clf()
return tmp_file
|
UTF-8
|
Python
| false | false | 1,730 |
py
| 12 |
parser.py
| 8 | 0.569364 | 0.568208 | 0 | 57 | 29.350877 | 76 |
mcalanog/projects
| 14,173,392,093,890 |
2e98f063b5090a5160f9fb1cc39dc4f12239c44e
|
2bd23901f7419301a6963f5af38572c3ee211fdc
|
/project2/exp_eval.py
|
09cca144ccff64e7336b708eda864140b903b667
|
[] |
no_license
|
https://github.com/mcalanog/projects
|
892400a39f4860c21160a5e54e8e5f0d95f5b6bb
|
262adbef8e5717a8bd8bdfb7972c82c0d9f30b38
|
refs/heads/master
| 2020-12-27T20:06:38.778675 | 2020-02-03T19:13:40 | 2020-02-03T19:13:40 | 238,036,278 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Name: Maeve Calanog
#CPE 202-05
#Project 2:
#Reformating algebraic expression; using stacks, prioritization of operators
from stack_array import Stack
# You do not need to change this class
class PostfixFormatException(Exception):
pass
def postfix_eval(input_str):
"""Evaluates a postfix expression"""
"""Input argument: a string containing a postfix expression where tokens
are space separated. Tokens are either operators + - * / ** or numbers
Returns the result of the expression evaluation.
Raises an PostfixFormatException if the input is not well-formed"""
thelist= Stack(30)
newinput = input_str.split()
operlist= ['+', '-', '*', '/', '**', '<<', '>>'] #to recall operators throughout function
if len(newinput)== 0: #if nothing in input nothing gets returned
return ''
for item in newinput: #go through all number or operator in input
if item in operlist: #if it is an operator
if thelist.size() < 2: #there must be two numbers to preform an operation
raise PostfixFormatException("Insufficient operands")
value1= thelist.pop()
value2= thelist.pop()#pop top two items in list
if item == '+':
thelist.push(value2 + value1)
if item == '-':
thelist.push(value2 - value1)
if item == '*':
thelist.push(value2 * value1)
if item == '/': #cannot divide by zero
if value1 == 0:
raise ValueError
thelist.push(value2 / value1)
if item == '**':
thelist.push(value2 ** value1)
if item == '>>' or item == '<<':
if type(value1) == float or type(value2) == float: #cannot preform operation if a float is used
raise PostfixFormatException("Illegal bit shift operand")
if '-' in str(value1):
raise PostfixFormatException("cannot shift with negative number")
if item == '>>':
thelist.push(int(value2) >> int(value1))
if item == '<<':
thelist.push(int(value2) << int(value1))
elif item.isdigit() or item.replace('-', '', 1).isdigit(): #check that it is a integer or float before putting into list
thelist.push(int(item))
else:
try:
thelist.push(float(item))
except ValueError:
raise PostfixFormatException('Invalid token')
if thelist.size()>1: #there should be one more number than operator in order to preform this function
raise PostfixFormatException('Too many operands')
return thelist.pop() #after going through whole list, only item in stack should be the solution
def infix_to_postfix(input_str):
"""Converts an infix expression to an equivalent postfix expression"""
"""Input argument: a string containing an infix expression where tokens are
space separated. Tokens are either operators + - * / ** parentheses ( ) or numbers
Returns a String containing a postfix expression """
newlist= [] #will store the items until string is complete
newinput = input_str.split()#string split up into list
if len(input_str) == 0:
return ''
thelist= Stack(30)
operlist = ['+', '-', '*', '/', '**', '<<', '>>', '(', ')']
for item in newinput:#go through every number or operator from input
if item not in operlist: #makes sure its a number or float
if item.isdigit():
newlist += [item]
elif item.replace('-', '', 1).isdigit() or item.replace('.', '', 1).isdigit():
newlist += [item]
if item in operlist and thelist.size() > 0: #for any operator and the stack already has something in it
if item == '+' or item== '-': #lowest priority (anything besides other + - can go on top, in the stack)
if thelist.peek() != '(':
while thelist.size() > 0 and thelist.peek() != '(':
next= thelist.pop()
if next!= '(':
newlist += [next]
thelist.push(item)
if item == '*' or item == '/':
if thelist.peek() == "+" or thelist.peek() == "-" or thelist.peek() == "(":
thelist.push(item)
else:
while thelist.size() > 0 and (thelist.peek() != '+' and thelist.peek() != '-' and thelist.peek() != '('):
next = thelist.pop()
if next != '(':
newlist += [next]
thelist.push(item)
if item == "**":
if thelist.peek() != ">>" and thelist.peek() != "<<":
thelist.push(item)
else:
while thelist.size() > 0 and (thelist.peek() == ">>" or thelist.peek() == "<<"):
next = thelist.pop()
if next != '(':
newlist += [next]
thelist.push(item)
if item == ">>" or item == "<<":
while thelist.size() > 0 and (thelist.peek() == ">>" or thelist.peek() == "<<"):
next = thelist.pop()
if next != '(':
newlist += [next]
thelist.push(item)
if item == "(":
thelist.push(item)
if item == ")":
while thelist.size() > 0 and thelist.peek()!= "(":
newlist += [thelist.pop()]
if thelist.size() > 0 and thelist.peek() == "(":
thelist.pop()
elif item in operlist and thelist.size() == 0:
thelist.push(item)
while thelist.size() > 0:
theitem= thelist.pop()
if theitem != '(':
newlist += [theitem]
return ' '.join(newlist)
def prefix_to_postfix(input_str):
"""Converts a prefix expression to an equivalent postfix expression"""
"""Input argument: a string containing a prefix expression where tokens are
space separated. Tokens are either operators + - * / ** parentheses ( ) or numbers
Returns a String containing a postfix expression(tokens are space separated)"""
listinput= input_str.split()
if len(input_str) == 0:
return ''
listinput= list(reversed(listinput))
thislist= Stack(30)
operlist = ['+', '-', '*', '/', '**', '<<', '>>', '(', ')']
for object in listinput:
if object in operlist and thislist.size() >= 2:
item1 = thislist.pop()
item2 = thislist.pop()
word = [str(item1), str(item2), str(object)]
thislist.push(" ".join(word))
elif object.replace('-', '', 1).isdigit() or object.replace('.', '', 1).isdigit():
thislist.push(object)
return thislist.pop()
|
UTF-8
|
Python
| false | false | 7,124 |
py
| 16 |
exp_eval.py
| 14 | 0.516002 | 0.508001 | 0 | 148 | 46.135135 | 128 |
varunvv/Projects
| 3,238,405,365,177 |
149d4a4912b8a5360556454a3cf5f9de46f58cef
|
ce42ef49295893b1883e999688f4a055be3a4aa2
|
/Utilities/Contrast_adjustment/adj-contrast.py
|
5c095cd8757ef110bd8427bb2b844d588de9e028
|
[] |
no_license
|
https://github.com/varunvv/Projects
|
a94e0817cbbbe8707e3de22e2cc5f74b8f413459
|
86ea17692e344ddab8d73742f8a9f76e1a550d26
|
refs/heads/master
| 2020-05-30T23:02:40.629059 | 2019-06-04T05:31:14 | 2019-06-04T05:31:14 | 190,007,483 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import cv2
import numpy as np
def adjust_brightness(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #convert it to hsv
h, s, v = cv2.split(hsv)
x = v - v*.65
x = x.astype('uint8')
final_hsv = cv2.merge((h, s, x))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
#cv2.imwrite("image_processed.jpg", img)
def adjust_hue(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #convert it to hsv
h, s, v = cv2.split(hsv)
x = h - h *.5
x = x.astype('uint8')
final_hsv = cv2.merge((x, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
#cv2.imwrite("image_processed.jpg", img)
def adjust_saturation(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #convert it to hsv
h, s, v = cv2.split(hsv)
x = s - s *.5
x = x.astype('uint8')
final_hsv = cv2.merge((h, x, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
#cv2.imwrite("image_processed.jpg", img)
def adjust_gamma(image, gamma=1.0):
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(image, table)
def custom_adjust(img):
h,w,_ = np.shape(img)
print h,w
pimg = cv2.imread('7.png', 1)
cv2.imshow('original',pimg)
# pimg = adjust_brightness(original)
# cv2.imshow("brightness reduced", pimg)
# #pimg = adjust_hue(pimg)
# #pimg = adjust_saturation(pimg)
# gamma = .4
# pimg = adjust_gamma(pimg, gamma=gamma)
# cv2.imshow("gammam image 1", pimg)
pimg = custom_adjust(pimg)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
UTF-8
|
Python
| false | false | 1,609 |
py
| 13 |
adj-contrast.py
| 4 | 0.625233 | 0.586078 | 0 | 65 | 23.753846 | 65 |
privateHmmmm/leetcode
| 15,367,393,032,460 |
c4ba1a67c91e09e64c7ac998555295207892aea7
|
056adbbdfb968486ecc330f913f0de6f51deee33
|
/065-valid-number/valid-number.py
|
7eb4fe9b2f25120e2035e88853e0c365410d7c4d
|
[] |
no_license
|
https://github.com/privateHmmmm/leetcode
|
b84453a1a951cdece2dd629c127da59a4715e078
|
cb303e610949e953b689fbed499f5bb0b79c4aea
|
refs/heads/master
| 2021-05-12T06:21:07.727332 | 2018-01-12T08:54:52 | 2018-01-12T08:54:52 | 117,215,642 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding:utf-8 -*-
# Validate if a given string is numeric.
#
#
# Some examples:
# "0" => true
# " 0.1 " => true
# "abc" => false
# "1 a" => false
# "2e10" => true
#
#
# Note: It is intended for the problem statement to be ambiguous. You should gather all requirements up front before implementing one.
#
#
#
# Update (2015-02-10):
# The signature of the C++ function had been updated. If you still see your function signature accepts a const char * argument, please click the reload button to reset your code definition.
#
class Solution(object):
def isNumber(self, s):
"""
:type s: str
:rtype: bool
"""
"""
(+-)10.234e56
"""
s = s.strip()
numSeen = False
numAfterE = True
eSeen = False
pointSeen = False
for i in range(0, len(s)):
if s[i].isdigit() == True:
numSeen = True
numAfterE = True
elif s[i] == 'e': # not e1, not e1e2
if eSeen or not numSeen:
return False
eSeen = True
numAfterE = False
elif s[i] == '.':
if eSeen or pointSeen: # not 12e1.2, not 1.2.1
return False
pointSeen = True
elif s[i] in ['+', '-']: # -12 or 12e-12
if i !=0 and s[i-1] != 'e':
return False
else:
return False
return numSeen and numAfterE
|
UTF-8
|
Python
| false | false | 1,567 |
py
| 292 |
valid-number.py
| 291 | 0.474793 | 0.44799 | 0 | 60 | 24.95 | 190 |
zackster/HipHopGoblin
| 16,209,206,607,770 |
7cee32acdfc0b90b0e1a2afc7307739ef6648721
|
6f7b37dd5876dad69fd259cd91c8e00db23b0912
|
/examples/artist_reviews.py
|
75fda4a7de35890d223e9ae9ddd8b8f5c983c71b
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/zackster/HipHopGoblin
|
78f98e161124f487a784e505dcddd26cfdbfc170
|
d994759906e581f365fd954837c3f29a5266dcd8
|
refs/heads/master
| 2021-01-16T20:55:24.877152 | 2011-08-23T01:42:11 | 2011-08-23T01:42:11 | 2,250,398 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Uncomment to set the API key explicitly. Otherwise Pyechonest will
# look in the ECHO_NEST_API_KEY environment variable for the key.
#from pyechonest import config
#config.ECHO_NEST_API_KEY='YOUR API KEY'
from pyechonest import artist
td_results = artist.search(name='The Decemberists')
if td_results:
td = td_results[0]
for review_document in td.reviews:
print 'Album Review: "%s" by %s' % (review_document['release'], td.name)
for key, val in review_document.iteritems():
print ' \'%s\': %s' % (key, val)
else:
print 'Artist not found.'
|
UTF-8
|
Python
| false | false | 587 |
py
| 88 |
artist_reviews.py
| 58 | 0.67632 | 0.674617 | 0 | 17 | 33.529412 | 80 |
amoux/david
| 11,338,713,696,207 |
82d906584890ef809565e278ccc6b936548b63d6
|
7a3ef0e0643cbf23defd3d48b8496037b0373500
|
/david/lang/spelling.py
|
469561747371cc3696094dabf5d47d1cca815024
|
[] |
no_license
|
https://github.com/amoux/david
|
d1ef452ebdc31d99555cab15cbbb472c35612a94
|
825f30806696e5c77f669ec936fda0e8db7829f3
|
refs/heads/master
| 2023-08-03T22:42:51.332841 | 2021-10-06T18:57:37 | 2021-10-06T18:57:37 | 199,486,952 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Spelling Corrector in Python 3; see http://norvig.com/spell-correct.html.
Copyright (c) 2007-2016 Peter Norvig MIT license:
www.opensource.org/licenses/mit-license.php
"""
import re
from collections import Counter
from typing import Dict, List, Pattern
class Speller:
"""Spell correction based on Peter Norvig's implementation."""
alphabet = "abcdefghijklmnopqrstuvwxyz"
def __init__(
self,
filepath: str = None,
document: List[str] = None,
word_count: Dict[str, int] = None,
):
"""Speller uses word counts as a metric for correcting words.
`filepath` : A file containing lines of string sequences.
`document` : An iterable document of string sequences.
`word_count` : An instance of `collections.Counter` with
existing word count pairs.
"""
self.word_count = word_count
if filepath is not None:
self.word_count_from_file(filepath)
elif document is not None:
self.word_count_from_doc(document)
def word_count_from_file(self, filepath: str):
"""Load and tokenize texts into word count pairs from a file."""
tokens = self.tokenize(open(filepath).read())
self.word_count = Counter(tokens)
def word_count_from_doc(self, document: List[str]):
"""Set the word count dictionary from a document of string sequences."""
tokens = []
for doc in document:
tokens.extend(self.tokenize(doc))
self.word_count = Counter(tokens)
def most_common(self, k=10):
"""Return the most common words from the dictionary counter."""
return self.word_count.most_common(k)
def tokenize(self, sequence: str):
"""Regex based word tokenizer."""
return re.findall("[a-z]+", sequence.lower())
def correct_string(self, sequence: str):
"""Return the correct spell form a string sequence."""
return re.sub("[a-zA-Z]+", self.correct_match, sequence)
def correct_match(self, match: Pattern[str]):
"""Spell correct word in match, and preserve proper case."""
word = match.group()
def case_of(text):
"""Return the case-function appropriate for text."""
return (
str.upper
if text.isupper()
else str.lower
if text.islower()
else str.title
if text.istitle()
else str
)
return case_of(word)(self._correct(word.lower()))
def _known(self, words):
return {w for w in words if w in self.word_count}
def _edits0(self, word):
return {word}
def _edits1(self, word):
def splits(word):
return [(word[:i], word[i:]) for i in range(len(word) + 1)]
pairs = splits(word)
deletes = [a + b[1:] for (a, b) in pairs if b]
transposes = [a + b[1] + b[0] + b[2:] for (a, b) in pairs if len(b) > 1]
replaces = [a + c + b[1:] for (a, b) in pairs for c in self.alphabet if b]
inserts = [a + c + b for (a, b) in pairs for c in self.alphabet]
return set(deletes + transposes + replaces + inserts)
def _edits2(self, word):
return {e2 for e1 in self._edits1(word) for e2 in self._edits1(e1)}
def _correct(self, word):
candidates = (
self._known(self._edits0(word))
or self._known(self._edits1(word))
or self._known(self._edits2(word))
or [word]
)
return max(candidates, key=self.word_count.get)
|
UTF-8
|
Python
| false | false | 3,599 |
py
| 73 |
spelling.py
| 53 | 0.583773 | 0.575438 | 0 | 105 | 33.27619 | 82 |
felixinho/element
| 4,011,499,493,818 |
f99268b9e45d098c6314d026f14050c579cedcc3
|
b31bb5f77bdd33e6f4ae4424d4e28517a50ff0e1
|
/pyfiles/elegant/elegantcomputation.py
|
f344e724d30f9c38c664434dce5792fe5bf74061
|
[] |
no_license
|
https://github.com/felixinho/element
|
e847170b0db7c1255237c597ab8467b8671c83c7
|
9bf5898afa4466b1e2d0a7f17d7e85fef3962e02
|
refs/heads/master
| 2018-04-22T22:03:35.610490 | 2017-10-22T18:05:46 | 2017-10-22T18:05:46 | 91,264,898 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import division, print_function
import numpy as np
import os, time
import subprocess
from getbasiclatticedata import getlatticedata
from sdds import SDDS
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def returntwissdata(ele_path, twi_path, defns_path, activelattice='../../lattices/active.lte'):
activelattice = os.path.normpath(activelattice)
tt1 = time.clock()
processstring = "export RPN_DEFNS='" + defns_path + "' && elegant " + ele_path
sub = subprocess.call(processstring, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# sub.wait()
print('time for elgant computation', time.clock() - tt1)
data = SDDS(0)
data.load(twi_path)
sub = subprocess.Popen('rm ' + twi_path, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
sub.wait()
# Get inconvenient attribute dictionary
parameterDataDict = dict(zip(data.parameterName, [val[0] for val in data.parameterData]))
columnDataDict = dict(zip(data.columnName, [val[0] for val in data.columnData]))
# Get convenient attribute dictionary
# Get twissdata parameter
latticedata = AttrDict()
# latticedata = getlatticedata(activelattice)
latticedata.Cs = columnDataDict['s']
latticedata.LatticeLength = float(latticedata.Cs[-1])
# latticedata aus elegant
latticedata.ElementType = []
latticedata.ElementName = []
latticedata.ElementPosition = []
latticedata.ElementLength = []
latticedata.ElementRadius = []
latticedata.Elementn_kicks = []
prev_step_name = ''
prev_el_pos = 0
el_n_kicks = 1
tmp_ElementLength = []
for i, s in enumerate(latticedata.Cs):
if columnDataDict['ElementName'][i] != prev_step_name:
dostuff = True
if columnDataDict['ElementType'][i].upper() == 'DRIF':
latticedata.ElementName.append(columnDataDict['ElementName'][i])
latticedata.ElementType.append('DRIF')
elif columnDataDict['ElementType'][i].upper() == 'CSBEND':
latticedata.ElementName.append(columnDataDict['ElementName'][i])
latticedata.ElementType.append('BEND')
elif columnDataDict['ElementType'][i].upper() == 'KQUAD':
latticedata.ElementName.append(columnDataDict['ElementName'][i])
latticedata.ElementType.append('QUAD')
elif columnDataDict['ElementType'][i].upper() == 'KSEXT':
latticedata.ElementName.append(columnDataDict['ElementName'][i])
latticedata.ElementType.append('SEXT')
elif columnDataDict['ElementType'][i].upper() == 'KOCT':
latticedata.ElementName.append(columnDataDict['ElementName'][i])
latticedata.ElementType.append('OCT')
else:
# latticedata.ElementName.append(columnDataDict['ElementName'][i])
# latticedata.ElementType.append(columnDataDict['ElementType'][i])
dostuff = False
if dostuff:
latticedata.ElementPosition.append(s)
tmp_ElementLength.append(s - prev_el_pos)
latticedata.Elementn_kicks.append(el_n_kicks)
prev_el_pos = s
el_n_kicks = 1
else:
el_n_kicks += 1
prev_step_name = columnDataDict['ElementName'][i]
latticedata.ElementLength = tmp_ElementLength[1:]
latticedata.ElementLength.append(latticedata.LatticeLength - latticedata.ElementPosition[-1])
# twissdata
twissdata = AttrDict()
twissdata.Cs = np.array(columnDataDict['s'])
twissdata.betax = np.array(columnDataDict['betax'])
twissdata.alphax = np.array(columnDataDict['alphax'])
twissdata.psix = np.array(columnDataDict['psix'])
twissdata.etax = np.array(columnDataDict['etax'])
twissdata.betay = np.array(columnDataDict['betay'])
twissdata.alphay = np.array(columnDataDict['alphay'])
twissdata.psiy = np.array(columnDataDict['psiy'])
twissdata.etay = np.array(columnDataDict['etay'])
# print(len(twissdata.betax))
const_c = 299792458
twissdata.Qx = parameterDataDict['nux']
twissdata.QxFreq = (twissdata.Qx % 1) * const_c / latticedata.LatticeLength / 1000 # kHz
twissdata.Qy = parameterDataDict['nuy']
twissdata.QyFreq = (twissdata.Qy % 1) * const_c / latticedata.LatticeLength / 1000 # kHz
twissdata.alphac = parameterDataDict['alphac']
latticedata.LatticeName = os.path.basename(activelattice)
# print(latticedata.LatticeName)
# print(latticedata.LineName)
# print(latticedata.LinePosition)
# print(latticedata.LineLength)
# print(twissdata.Qx)
# print('done')
return latticedata, twissdata
if __name__ == '__main__':
latticedata, twissdata = returntwissdata('twissOutput_fast.ele', 'output.twi', 'defns.rpn')
print(len(latticedata.ElementType))
print(len(latticedata.ElementPosition))
print(len(latticedata.ElementName))
print(twissdata.Qx)
|
UTF-8
|
Python
| false | false | 5,108 |
py
| 33 |
elegantcomputation.py
| 25 | 0.6574 | 0.651331 | 0 | 135 | 36.837037 | 106 |
aurianeb/projet
| 3,788,161,197,381 |
28175ee4fec8e2072be0117185a1744fd248e8c3
|
3db67b16a77bf03cd54d571eca6e726d6b76f09e
|
/conseil_de_films/conseil_de_films/urls.py
|
08e6aeda44c31778cdf4bb3802e2cc6a07e000a0
|
[] |
no_license
|
https://github.com/aurianeb/projet
|
07d9b3c69434b3f68c39d5b196def4c001f685e9
|
d10ec7243ed041a4374f5603c6432c397ca60813
|
refs/heads/master
| 2016-08-12T02:42:59.674609 | 2016-02-02T08:51:10 | 2016-02-02T08:51:10 | 47,886,121 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^films/', include('films.urls')),
url(r'^admin/', include(admin.site.urls)), # La section admin permet notament de gérer facilement à la main la base de données
]
|
UTF-8
|
Python
| false | false | 273 |
py
| 10 |
urls.py
| 7 | 0.718519 | 0.718519 | 0 | 7 | 37.428571 | 130 |
rishi-hi-5/codes
| 15,410,342,672,724 |
ffae573504c165e8c347339b033a1a2c4ecac0a4
|
22ad2ffe61066572fd3c09e0d242bf8e88391a34
|
/hackearth/anagram.py
|
c5de9cf6317dd0364ad1ddfd8d3abda360744baf
|
[] |
no_license
|
https://github.com/rishi-hi-5/codes
|
5e52e7c55dcda365f7daf9fb108fde62f2408065
|
9ab8b11a702fd7a39a694a3bfa3fa5641fe61400
|
refs/heads/master
| 2021-01-10T05:14:02.091265 | 2018-02-22T14:53:13 | 2018-02-22T14:53:13 | 52,864,061 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
t=int(input())
while t!=0:
s1=input()
s2=input()
h1=[0 for i in range(0,26)]
h2=[0 for i in range(0,26)]
for i in s1:
h1[ord(i)-97]+=1
for i in s2:
h2[ord(i)-97]+=1
cnt=0
for i in range(0,26):
cnt+=(abs(h1[i]-h2[i]))
print(cnt)
t-=1
|
UTF-8
|
Python
| false | false | 303 |
py
| 253 |
anagram.py
| 230 | 0.455446 | 0.356436 | 0 | 17 | 16.823529 | 31 |
rhaehfaos23/jijinalimi_backend_photo
| 16,423,954,964,181 |
b8c738e8859c1c49e7b34d9c26676caa3c370842
|
267e77683f7f8a3e0e6fe388c0368093a0c052bd
|
/custom_logging_handler.py
|
569932483e654d2e579ab5434cd06008527b892e
|
[] |
no_license
|
https://github.com/rhaehfaos23/jijinalimi_backend_photo
|
df62ec033e10c24fcc747ba4beb7ced967ea485e
|
efb71a429d5accf7f48b70f075a8254b767427c3
|
refs/heads/master
| 2021-05-23T08:34:23.847681 | 2020-04-05T09:50:26 | 2020-04-05T09:50:26 | 253,201,380 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import logging.handlers
import traceback
import requests
import informations as i
from datetime import datetime
from setting_management import MailgunSetting
class MailgunLogHandler(logging.handlers.HTTPHandler):
def __init__(self, subject: str, setting: MailgunSetting):
super().__init__('', '')
self.subject = subject
self.setting = setting
def emit(self, record) -> None:
text = f'[{record.asctime}] {record.levelname}: {record.message}\n'
if record.exc_info is not None:
text += traceback.format_exc()
res = requests.post(i.mg_request_url.format(self.setting.domain),
auth=('api', self.setting.mg_api_key),
data={
'from': self.setting.sender,
'to': self.setting.recipient,
'subject': self.subject,
'text': text
})
print(f'[{datetime.now()}] 에러 이메일 전송. {res.status_code} : {res.reason}')
|
UTF-8
|
Python
| false | false | 1,100 |
py
| 15 |
custom_logging_handler.py
| 9 | 0.536832 | 0.536832 | 0 | 30 | 35.166667 | 80 |
Yasir-Tec/Code-PYTHON-Django-
| 8,048,768,736,070 |
c128bc2edf35646d05626d444d781dde959ea2b9
|
1ffabbbe36902cb4ab50a868a42501b986ec84c7
|
/migrations/0035_auto_20200113_1041.py
|
ed66c78885a6dc029bc34ba1df83d0c06762d3ed
|
[] |
no_license
|
https://github.com/Yasir-Tec/Code-PYTHON-Django-
|
fe209b114c1d0400d2ce65c4fe0fc508adb4075b
|
b3224732f0862a51201ce862575291e26999ea94
|
refs/heads/master
| 2022-11-26T09:44:33.369429 | 2020-07-29T08:28:58 | 2020-07-29T08:28:58 | 283,441,653 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 3.0 on 2020-01-13 18:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('management', '0034_auto_20200111_0346'),
]
operations = [
migrations.AlterField(
model_name='document',
name='username',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='guide',
name='fname',
field=models.CharField(blank='NOT ALLOTED', default=True, max_length=40),
),
]
|
UTF-8
|
Python
| false | false | 589 |
py
| 58 |
0035_auto_20200113_1041.py
| 34 | 0.578947 | 0.519525 | 0 | 23 | 24.608696 | 85 |
spitis/stable-baselines
| 2,989,297,265,045 |
b29145bf9fdfd19d6df81b1e57cc6b07edab629c
|
4e86a5c284e8f9944af9ff24ab2d82454861c8ce
|
/stable_baselines/common/replay_buffer.py
|
2682b7d9593930633d533c6dca161873994f28a5
|
[
"MIT"
] |
permissive
|
https://github.com/spitis/stable-baselines
|
fa899a3667b87d2f5db868a90ba68fa114b55c74
|
f62cd6698b2427c0fb5ac452b9059a59b22cde81
|
refs/heads/master
| 2020-04-02T08:11:30.875827 | 2018-12-18T12:10:02 | 2018-12-18T12:10:02 | 154,233,254 | 0 | 0 |
MIT
| true | 2018-10-24T22:04:34 | 2018-10-23T00:00:25 | 2018-10-24T19:11:32 | 2018-10-24T22:04:34 | 7,393 | 0 | 0 | 0 |
Python
| false | null |
import numpy as np, random
from collections import OrderedDict, deque
import multiprocessing as mp
from stable_baselines.common.vec_env import CloudpickleWrapper
def worker_init(process_trajectory_fn_wrapper):
global process_trajectory
process_trajectory = process_trajectory_fn_wrapper.var
def worker_fn(trajectory):
global process_trajectory
return process_trajectory(trajectory)
class RingBuffer(object):
"""This is a collections.deque in numpy, with pre-allocated memory"""
def __init__(self, maxlen, shape, dtype='float32'):
"""
A buffer object, when full restarts at the initial position
:param maxlen: (int) the max number of numpy objects to store
:param shape: (tuple) the shape of the numpy objects you want to store
:param dtype: (str) the name of the type of the numpy object you want to store
"""
self.maxlen = maxlen
self.start = 0
self.length = 0
self.shape = shape
self.data = np.zeros((maxlen, ) + shape).astype(dtype)
def __len__(self):
return self.length
def __getitem__(self, idx):
if idx < 0 or idx >= self.length:
raise KeyError()
return self.data[(self.start + idx) % self.maxlen]
def get_batch(self, idxs):
"""
get the value at the indexes
:param idxs: (int or numpy int) the indexes
:return: (np.ndarray) the stored information in the buffer at the asked positions
"""
return self.data[(self.start + idxs) % self.length]
def append(self, var):
"""
Append an object to the buffer
:param var: (np.ndarray) the object you wish to add
"""
if self.length < self.maxlen:
# We have space, simply increase the length.
self.length += 1
elif self.length == self.maxlen:
# No space, "remove" the first item.
self.start = (self.start + 1) % self.maxlen
else:
# This should never happen.
raise RuntimeError()
self.data[(self.start + self.length - 1) % self.maxlen] = var
def _append_batch_with_space(self, var):
"""
Append a batch of objects to the buffer, *assuming* there is space.
:param var: (np.ndarray) the batched objects you wish to add
"""
len_batch = len(var)
start_pos = (self.start + self.length) % self.maxlen
self.data[start_pos : start_pos + len_batch] = var
if self.length < self.maxlen:
self.length += len_batch
assert self.length <= self.maxlen, "this should never happen!"
else:
self.start = (self.start + len_batch) % self.maxlen
def append_batch(self, var):
"""
Append a batch of objects to the buffer.
:param var: (np.ndarray) the batched objects you wish to add
"""
len_batch = len(var)
assert len_batch < self.maxlen, 'trying to add a batch that is too big!'
start_pos = (self.start + self.length) % self.maxlen
if start_pos + len_batch <= self.maxlen:
# If there is space, add it
self._append_batch_with_space(var)
else:
# No space, so break it into two batches for which there is space
first_batch, second_batch = np.split(var, [self.maxlen - start_pos])
self._append_batch_with_space(first_batch)
# use append on second call in case len_batch > self.maxlen
self._append_batch_with_space(second_batch)
class ReplayBuffer(object):
def __init__(self, limit, item_shape):
"""
The replay buffer object
:param limit: (int) the max number of transitions to store
:param item_shape: a list of tuples of (str) item name and (tuple) the shape for item
Ex: [("observations0", env.observation_space.shape),\
("actions",env.action_space.shape),\
("rewards", (1,)),\
("observations1",env.observation_space.shape ),\
("terminals1", (1,))]
"""
self.limit = limit
self.items = OrderedDict()
for name, shape in item_shape:
self.items[name] = RingBuffer(limit, shape=shape)
def sample(self, batch_size):
"""
sample a random batch from the buffer
:param batch_size: (int) the number of element to sample for the batch
:return: (list) the sampled batch
"""
if self.size==0:
return []
# Draw such that we always have a proceeding element.
batch_idxs = np.random.randint(low=0, high=(self.size - 1), size=batch_size)
transition = []
for buf in self.items.values():
item = buf.get_batch(batch_idxs)
transition.append(item)
return transition
def add(self, *items):
"""
Appends a single transition to the buffer
:param items: a list of values for the transition to append to the replay buffer,
in the item order that we initialized the ReplayBuffer with.
"""
for buf, value in zip(self.items.values(), items):
buf.append(value)
def add_batch(self, *items):
"""
Append a batch of transitions to the buffer.
:param items: a list of batched transition values to append to the replay buffer,
in the item order that we initialized the ReplayBuffer with.
"""
if (items[0].shape) == 1 or len(items[0]) == 1:
self.add(*items)
return
for buf, batched_values in zip(self.items.values(), items):
buf.append_batch(batched_values)
def __len__(self):
return self.size
@property
def size(self):
# Get the size of the RingBuffer on the first item type
return len(next(iter(self.items.values())))
class EpisodicBuffer(object):
def __init__(self, n_subbuffers, process_trajectory_fn, n_cpus=None):
"""
A simple buffer for storing full length episodes (as a list of lists).
:param n_subbuffers: (int) the number of subbuffers to use
"""
self._main_buffer = []
self.n_subbuffers = n_subbuffers
self._subbuffers = [[] for _ in range(n_subbuffers)]
n_cpus = n_cpus or n_subbuffers
self.fn = process_trajectory_fn
self.pool = mp.Pool(n_cpus, initializer=worker_init, initargs=(CloudpickleWrapper(process_trajectory_fn),))
def commit_subbuffer(self, i):
"""
Adds the i-th subbuffer to the main_buffer, then clears it.
"""
self._main_buffer.append(self._subbuffers[i])
self._subbuffers[i] = []
def add_to_subbuffer(self, i, item):
"""
Adds item to i-th subbuffer.
"""
self._subbuffers[i].append(item)
def __len__(self):
return len(self._main_buffer)
def process_trajectories(self):
"""
Processes trajectories
"""
return self.pool.map(worker_fn, self._main_buffer)
def clear_main_buffer(self):
self._main_buffer = []
def clear_all(self):
self._main_buffer = []
self._subbuffers = [[] for _ in range(self.n_subbuffers)]
def close(self):
self.pool.close()
def her_final(trajectory, compute_reward):
"""produces hindsight experiences where desired_goal is replaced with final achieved_goal"""
final_achieved_goal = trajectory[-1][4]
if np.allclose(final_achieved_goal, trajectory[-1][5]):
return [] # don't add successful trajectories twice
hindsight_trajectory = []
for o1, action, reward, o2, achieved_goal, desired_goal in trajectory:
new_reward = compute_reward(achieved_goal, final_achieved_goal, None)
hindsight_trajectory.append([o1, action, new_reward, o2, new_reward, final_achieved_goal])
if np.allclose(new_reward, 1.0):
break
return hindsight_trajectory
def her_future(trajectory, k, compute_reward, process_successful_trajectories=True):
"""produces hindsight experiences where desired_goal is replaced with future achieved_goals
if short circuit is true, cuts of the end of the trajectory where the achieved goal does not move"""
final_achieved_goal = trajectory[-1][4]
if not process_successful_trajectories and np.allclose(final_achieved_goal, trajectory[-1][5]):
return [] # don't add successful trajectories twice
achieved_goals = np.array([transition[4] for transition in trajectory])
len_ag = len(achieved_goals)
achieved_goals_range = np.array(range(len_ag))
hindsight_experiences = []
for i, (o1, action, _, o2, achieved_goal, _) in enumerate(trajectory):
sampled_goals = np.random.choice(achieved_goals_range[i:], min(k, len_ag - i), replace=False)
sampled_goals = achieved_goals[sampled_goals]
for g in sampled_goals:
reward = compute_reward(achieved_goal, g, None)
hindsight_experiences.append([o1, action, reward, o2, reward, g])
return hindsight_experiences
def her_future_landmark(trajectory, k, compute_reward, process_successful_trajectories=True):
"""produces hindsight experiences where desired_goal is replaced with future achieved_goals
if short circuit is true, cuts of the end of the trajectory where the achieved goal does not move.
Also generates the landmarks for the hindsight experiences where the landmarks are sampled
from the states visited in between the state and hindsight goal."""
final_achieved_goal = trajectory[-1][4]
if not process_successful_trajectories and np.allclose(final_achieved_goal, trajectory[-1][5]):
return [] # don't add successful trajectories twice
achieved_goals = np.array([transition[4] for transition in trajectory])
states = np.array([transition[0] for transition in trajectory])
len_ag = len(achieved_goals)
achieved_goals_range = np.array(range(len_ag))
hindsight_experiences = []
landmark_experiences = []
for i, (o1, action, _, o2, achieved_goal, _) in enumerate(trajectory):
sampled_goals_idx = np.random.choice(achieved_goals_range[i:], min(k, len_ag - i), replace=False)
sampled_goals = achieved_goals[sampled_goals_idx]
for j, g in zip(sampled_goals_idx, sampled_goals):
reward = compute_reward(achieved_goal, g, None)
hindsight_experiences.append([o1, action, reward, o2, reward, g])
# Sample a landmark value
if (j-i) > 1: # More than 1 time steps apart
landmark_idx = np.random.choice(range(i+1,j)) # Doesn't include the ith and jth state
sampled_landmark = states[landmark_idx]
landmark_experiences.append([o1, action, sampled_landmark, g])
return hindsight_experiences, landmark_experiences
def her_future_with_states(trajectory, k, compute_reward):
"""produces hindsight experiences where desired_goal is replaced with future achieved_goals
if short circuit is true, cuts of the end of the trajectory where the achieved goal does not move"""
achieved_goals = np.array([transition[3] for transition in trajectory])
len_ag = len(achieved_goals)
achieved_goals_range = np.array(range(len_ag))
hindsight_experiences = []
for i, (o1, action, _, o2, _, _) in enumerate(trajectory):
sampled_goals = np.random.choice(achieved_goals_range[i:], min(k, len_ag - i), replace=False)
sampled_goals = achieved_goals[sampled_goals]
for g in sampled_goals:
reward = compute_reward(o2, g, None)
hindsight_experiences.append([o1, action, reward, o2, reward, g])
return hindsight_experiences
def her_landmark(trajectory, k, compute_reward):
"""produces hindsight experiences where desired_goal is replaced with future achieved_goals,
and initial state is sampled from the states prior to the current state"""
return
class HerFutureAchievedPastActual():
def __init__(self, k, p, compute_reward, past_goal_memory=10000):
self.k = k # future
self.p = p # past goals
self.compute_reward = compute_reward
self.goal_mem=deque(maxlen=past_goal_memory)
def __call__(self, trajectory):
actual_goal = trajectory[0][5]
self.goal_mem.append(actual_goal)
achieved_goals = np.array([transition[4] for transition in trajectory])
len_ag = len(achieved_goals)
achieved_goals_range = np.array(range(len_ag))
hindsight_experiences = []
for i, (o1, action, _, o2, achieved_goal, _) in enumerate(trajectory):
sampled_goals = np.random.choice(achieved_goals_range[i:], min(self.k, len_ag - i), replace=False)
sampled_goals = list(achieved_goals[sampled_goals])
sampled_goals += random.choices(self.goal_mem, k=self.p)
for g in sampled_goals:
reward = self.compute_reward(achieved_goal, g, None)
hindsight_experiences.append([o1, action, reward, o2, reward, g])
return hindsight_experiences
class HerFutureAchievedPastActualLandmark():
def __init__(self, k, p, compute_reward, past_goal_memory=10000):
self.k = k # future
self.p = p # past goals
self.compute_reward = compute_reward
self.goal_mem=deque(maxlen=past_goal_memory)
def __call__(self, trajectory):
actual_goal = trajectory[0][5]
self.goal_mem.append(actual_goal)
achieved_goals = np.array([transition[4] for transition in trajectory])
states = np.array([transition[0] for transition in trajectory])
len_ag = len(achieved_goals)
achieved_goals_range = np.array(range(len_ag))
hindsight_experiences = []
landmark_experiences = []
for i, (o1, action, _, o2, achieved_goal, _) in enumerate(trajectory):
sampled_goals_idx = np.random.choice(achieved_goals_range[i:], min(self.k, len_ag - i), replace=False)
sampled_goals = list(achieved_goals[sampled_goals_idx])
for j, g in zip(sampled_goals_idx, sampled_goals):
reward = self.compute_reward(achieved_goal, g, None)
hindsight_experiences.append([o1, action, reward, o2, reward, g])
# Sample a landmark value
if (j-i) > 1: # More than 1 time steps apart
landmark_idx = np.random.choice(range(i+1,j)) # Doesn't include the ith and jth state
sampled_landmark = states[landmark_idx]
landmark_experiences.append([o1, action, sampled_landmark, g])
sampled_actual_goals = random.choices(self.goal_mem, k=self.p)
for g in sampled_actual_goals:
reward = self.compute_reward(achieved_goal, g, None)
hindsight_experiences.append([o1, action, reward, o2, reward, g])
return hindsight_experiences, landmark_experiences
class HerFutureAchievedPastAchieved():
def __init__(self, k, p, compute_reward, past_goal_memory=10000):
self.k = k # future
self.p = p # past goals
self.compute_reward = compute_reward
self.goal_mem=deque(maxlen=past_goal_memory)
def __call__(self, trajectory):
achieved_goals = np.array([transition[4] for transition in trajectory])
for ag in achieved_goals:
self.goal_mem.append(ag)
len_ag = len(achieved_goals)
achieved_goals_range = np.array(range(len_ag))
hindsight_experiences = []
for i, (o1, action, _, o2, achieved_goal, _) in enumerate(trajectory):
sampled_goals = np.random.choice(achieved_goals_range[i:], min(self.k, len_ag - i), replace=False)
sampled_goals = list(achieved_goals[sampled_goals])
sampled_goals += random.choices(self.goal_mem, k=self.p)
for g in sampled_goals:
reward = self.compute_reward(achieved_goal, g, None)
hindsight_experiences.append([o1, action, reward, o2, reward, g])
return hindsight_experiences
class HerFutureAchievedPastActualVarying():
def __init__(self, k, compute_reward, past_goal_memory=10000):
self.k = k # total goals
self.compute_reward = compute_reward
self.goal_mem=deque(maxlen=past_goal_memory)
def __call__(self, trajectory):
actual_goal = trajectory[0][5]
self.goal_mem.append(actual_goal)
achieved_goals = np.array([transition[4] for transition in trajectory])
len_ag = len(achieved_goals)
achieved_goals_range = np.array(range(len_ag))
hindsight_experiences = []
for i, (o1, action, _, o2, achieved_goal, _) in enumerate(trajectory):
sampled_goals = np.random.choice(achieved_goals_range[i:], min(self.k, len_ag - i), replace=False)
sampled_goals = list(achieved_goals[sampled_goals])
for g in sampled_goals:
if np.random.random() > (float(i) / len_ag + 0.25):
# replace the future goal with an actual goal
g = random.choice(self.goal_mem)
reward = self.compute_reward(achieved_goal, g, None)
hindsight_experiences.append([o1, action, reward, o2, reward, g])
return hindsight_experiences
|
UTF-8
|
Python
| false | false | 16,074 |
py
| 82 |
replay_buffer.py
| 20 | 0.675999 | 0.668844 | 0 | 412 | 38.01699 | 111 |
semenko/linux-rdp-gateway
| 4,277,787,448,652 |
ff7a40b128fcd20a48acfe62fde4ea4529bf898f
|
cb2c0834d63cd3eaaca34ea16bdabc60e22b85d0
|
/app.py
|
1f6bfe1f5983db95fa3caf1f00974732df1618ca
|
[
"MIT"
] |
permissive
|
https://github.com/semenko/linux-rdp-gateway
|
5d79e75f102c777d419e93f98e21941734ad9d81
|
8fd62a7c8c7a9465ba99a9395bf9505fc4c12670
|
refs/heads/master
| 2016-09-08T10:40:41.963646 | 2014-02-09T02:54:23 | 2014-02-09T02:54:23 | 16,055,547 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
A poor man's RDP gateway for linux.
NOTE: THIS IS NOT A REAL RDP GATEWAY!
By: Nick Semenkovich <semenko@alum.mit.edu> http://nick.semenkovich.com
License: MIT
Google auth code derived from Flask-Oauthlib / Bruno Rocha / https://github.com/rochacbruno
"""
from flask import Flask, redirect, render_template, url_for, session, request, Response, jsonify
from flask_oauthlib.client import OAuth
import struct
import socket
app = Flask(__name__, static_url_path='')
app.config.from_pyfile('secrets.cfg') # Add your Google ID & Secret there.
RESTRICTED_DOMAIN = app.config.get('RESTRICTED_DOMAIN') # Require this domain for authentication
SITE_NAME = app.config.get('SITE_NAME')
# username:s:DOMAIN\username
RDP_FILE_TEMPLATE = """
full address:s:%(hostname)s:%(port)d
disable wallpaper:i:0
gatewayusagemethod:i:0
"""
app.secret_key = 'development'
oauth = OAuth(app)
google = oauth.remote_app(
'google',
consumer_key=app.config.get('GOOGLE_ID'),
consumer_secret=app.config.get('GOOGLE_SECRET'),
request_token_params={
'scope': ['https://www.googleapis.com/auth/userinfo.email', 'https://www.googleapis.com/auth/userinfo.profile']
},
base_url='https://www.googleapis.com/oauth2/v1/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://accounts.google.com/o/oauth2/token',
authorize_url='https://accounts.google.com/o/oauth2/auth',
)
@app.route('/')
def index():
if 'google_token' in session:
me = google.get('userinfo')
try:
if me.data[u'hd'] != RESTRICTED_DOMAIN or me.data[u'verified_email'] != True:
session.pop('google_token', None)
return render_template('error.html', domain=RESTRICTED_DOMAIN, site_name=SITE_NAME)
except KeyError:
session.pop('google_token', None)
return render_template('_base.html', site_name=SITE_NAME)
# return jsonify({"data": me.data})
return render_template('authenticated.html', auth_data=me.data, computer_target=app.config.get('COMPUTER_MAP')[str(me.data[u'email'])], site_name=SITE_NAME)
return render_template('_base.html', site_name=SITE_NAME)
@app.route('/computer.rdp')
def getrdp():
if 'google_token' in session:
me = google.get('userinfo')
try:
if me.data[u'hd'] != RESTRICTED_DOMAIN or me.data[u'verified_email'] != True:
session.pop('google_token', None)
return redirect(url_for('logout'))
except KeyError:
session.pop('google_token', None)
return render_template('_base.html', site_name=SITE_NAME)
try:
target = request.args.get('target') # TODO: Sanitize this.
target_hosts = {'hostname': app.config.get('DOMAIN_SECRET'), 'port': app.config.get('PORT_MAP')[target]}
except KeyError:
return redirect(url_for('logout'))
return Response(RDP_FILE_TEMPLATE % target_hosts,
mimetype="application/rdp",
headers={"Content-Disposition":
"attachment; filename=computer.rdp"})
return redirect(url_for('logout'))
@app.route('/login')
def login():
return google.authorize(callback=url_for('authorized', _external=True))
@app.route('/logout')
def logout():
session.pop('google_token', None)
return redirect(url_for('index'))
@app.route('/login/authorized')
@google.authorized_handler
def authorized(resp):
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['google_token'] = (resp['access_token'], '')
#me = google.get('userinfo')
return redirect(url_for('index'))
#return jsonify({"data": me.data})
@google.tokengetter
def get_google_oauth_token():
return session.get('google_token')
# Send a WOL packet
def wake_on_lan(macaddress):
""" Switches on remote computers using WOL. """
# Check macaddress format and try to compensate.
if len(macaddress) == 12:
pass
elif len(macaddress) == 12 + 5:
sep = macaddress[2]
macaddress = macaddress.replace(sep, '')
else:
raise ValueError('Incorrect MAC address format')
# Pad the synchronization stream.
data = ''.join(['FFFFFFFFFFFF', macaddress * 20])
send_data = ''
# Split up the hex values and pack.
for i in range(0, len(data), 2):
send_data = ''.join([send_data,
struct.pack('B', int(data[i: i + 2], 16))])
# Broadcast it to the LAN.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.sendto(send_data, ('<broadcast>', 7))
if __name__ == '__main__':
app.debug = True
app.run()
else:
# Secure app in prod
app.config['SESSION_COOKIE_SECURE'] = True
|
UTF-8
|
Python
| false | false | 4,986 |
py
| 7 |
app.py
| 1 | 0.630967 | 0.626755 | 0 | 152 | 31.802632 | 164 |
noltron000/cartesian-product
| 18,537,078,860,729 |
b3e8fc15245993057360f5de96db9dfe4ad00a82
|
0c17e1618b728b1821b8158773029a7a63cef766
|
/cartesian.py
|
78cfaa21823ddbc0ec9a97d9e43475b290043bf3
|
[] |
no_license
|
https://github.com/noltron000/cartesian-product
|
c4deb99e65a42d2c6f378e9c1c79d2cfdb0bcde6
|
6387ffa9538f9adfc7889f4d2d3ab0682b4d2326
|
refs/heads/master
| 2020-05-16T14:30:18.447493 | 2019-05-10T16:03:31 | 2019-05-10T16:03:31 | 183,105,415 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def cartesian_product(array):
'''
Vocabulary:
- a collection is an array of arrays
- a group is an array within a collection
- old is the user's input
- new is the transformed output
- tmp is data not added to new for technical reasons
Variable Names:
- old_collection is the user's input
- old_group is a nested array in old_collection
- new_collection is the expected output
- new_group is a nested array in new_collection
- tmp_collection is data not yet added to new_collection
- tmp_group is data not yet added to tmp_collection
'''
# new_collection starts as an empty array
# old_collection equals the input array parameter
new_collection = [[]]
old_collection = array
# start by iterating over the groups in old_collection
for old_group in old_collection:
# new_collection can't be modified while in a loop
# tmp_collection will store a subset of new data
# it will be added to new_collection after the next loop
tmp_collection = []
# iterate over new_collections
# it will start with just an array with one empty array
# every time we go through another old_group it fills up
for new_group in new_collection:
# finally, look through the data in old_group
# this data is important;
# its what we need to transfer to the new structure
for data in old_group:
# new_group contains a set of important data
# there is still yet more data to append to it...
# ...set tmp_group to new_group and add data to it
tmp_group = new_group.copy()
tmp_group.append(data)
# now transfer the data to our tmp_collection
# it will be added to the new_collection to output
tmp_collection.append(tmp_group)
# exit old_group loop
pass
# exit new_collection loop
pass
# an item can't be changed while being looped over;
# we are no longer looping over new_collection
new_collection = tmp_collection
# exit old_collection loop
pass
# after the loops are over, we can return the result
return new_collection
if __name__ == '__main__':
# this part is just calling the function as an example
X={'X'}
Y={'Y'}
Z={'Z'}
simple_array = [[1,2,3],['a','b','c'],[X,Y,Z]]
result = cartesian_product(simple_array)
for group in result:
print(group)
|
UTF-8
|
Python
| false | false | 2,249 |
py
| 2 |
cartesian.py
| 1 | 0.703424 | 0.70209 | 0 | 78 | 27.833333 | 58 |
4140/bl1
| 1,322,849,929,785 |
da68bcc5b292db122fd0957a81545d733d8a55ca
|
fc6f4abd0ec1ef77a02e4c56d75567475f96fd87
|
/bl1/matches/migrations/0001_initial.py
|
db718bacf7fdaa5d10a69aac40c6293dc1b6d48a
|
[] |
no_license
|
https://github.com/4140/bl1
|
c2c70eff8fb3fe50a5b3423b25dfa6f5e954946d
|
e02f4ddc7da9935a751bfb75b067e7f51cc492c7
|
refs/heads/master
| 2020-03-06T07:04:16.965178 | 2017-03-27T08:23:00 | 2017-03-27T08:23:00 | 86,173,051 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-24 16:36
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('source_id', models.IntegerField(unique=True)),
('start', models.DateTimeField()),
('team1_goals', models.IntegerField(default=0)),
('team2_goals', models.IntegerField(default=0)),
('goals', django.contrib.postgres.fields.jsonb.JSONField()),
('stadium', models.CharField(default='n/a', max_length=100)),
('finished', models.CharField(choices=[('false', 'Not finished'), ('true', 'Finished')], default='true', max_length=10)),
],
),
migrations.CreateModel(
name='MatchRound',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.IntegerField(unique=True)),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('source_id', models.IntegerField()),
('logo', models.URLField(max_length=250)),
('matches_played', models.IntegerField(default=0)),
('matches_won', models.IntegerField(default=0)),
('matches_lost', models.IntegerField(default=0)),
],
),
migrations.AddField(
model_name='match',
name='match_round',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='matches.MatchRound'),
),
migrations.AddField(
model_name='match',
name='team1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='team1', to='matches.Team'),
),
migrations.AddField(
model_name='match',
name='team2',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='team2', to='matches.Team'),
),
migrations.AddField(
model_name='match',
name='winner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='winner', to='matches.Team'),
),
migrations.AlterUniqueTogether(
name='match',
unique_together=set([('team1', 'team2', 'match_round')]),
),
]
|
UTF-8
|
Python
| false | false | 3,029 |
py
| 13 |
0001_initial.py
| 6 | 0.563552 | 0.550347 | 0 | 74 | 39.932432 | 146 |
ERAU2020/my-first-binder
| 14,070,312,900,529 |
078d754fed51d3400de63a3ee4b2ce58d6e508b4
|
800574cb726931e45fdfdb9d09b7896768ca7ce5
|
/linear_regression_sept2020.py
|
47bd054b61d4c2ec729923ce4816315dfa70917d
|
[] |
no_license
|
https://github.com/ERAU2020/my-first-binder
|
4a59bbe78c2b035eb464809fa8a36d2a8d73a097
|
769e7f1dacd64a13a4afe2ae8138cf077d8d427d
|
refs/heads/master
| 2023-01-07T07:14:47.669421 | 2020-11-03T18:41:59 | 2020-11-03T18:41:59 | 286,777,790 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 22 14:14:44 2020
@author: lehrs
"""
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
# represents the heights of a group of people in metres
heights = [[1.6], [1.65], [1.7], [1.73], [1.8]]
# represents the weights of a group of people in kgs
weights = [[60], [65], [72.3], [75], [80]]
plt.title('Weights plotted against heights')
plt.xlabel('Heights in metres')
plt.ylabel('Weights in kilograms')
plt.plot(heights, weights, 'k.')
# axis range for x and y
plt.axis([1.5, 1.85, 50, 90])
plt.grid(True)
# Create and fit the model
model = LinearRegression()
model.fit(X=heights, y=weights)
plt.show()
# make a prediction, expects multidimension array
# make a single prediction
a1 = model.predict([[1.75]])
a1[0,0] # comes back as a multi-dimensional array first row, first column [0][0] or [0,0]
a1[0][0]
# Out[25]: 76.0387
# plot the regression line
extreme_heights = [[0], [1.8]]
extreme_weights = model.predict(extreme_heights)
plt.plot(extreme_heights, extreme_weights, 'b*')
print(model.intercept_[0])
print(np.round(model.intercept_[0], 2))
print(model.coef_)
print(model.coef_[0])
print(model.coef_[0][0])
print(np.round(model.coef_[0][0], 2))
pw = model.predict(heights) # compute predicted weights from the model
plt.plot(heights, weights, 'b*')
plt.plot(heights, pw, 'k.')
plt.plot(heights, pw, 'r')
plt.show()
# bottom of page 104 Residual Sum of Squares
# verify this old school way
weights - pw
((weights - pw)**2)
np.sum((weights-pw)**2)
mu = np.mean(weights)
print('Mean weight %.3f' % mu)
dw_sum = 0;
tss = 0;
for i in range(len(weights)):
dw = weights[i][0]-pw[i][0]
dw_squared = dw**2
dw_sum = dw_sum + dw_squared
var = weights[i] - mu
var_squared = var**2
tss = tss + var_squared
print('%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f' % (weights[i][0], pw[i][0], dw, dw_squared, var, var_squared))
print('residual sum is %.3f' % dw_sum)
print('total sum is %.3f' % tss)
print('R Squared %.4f' % (1 - dw_sum/tss))
print('Residual sum of squares: %.2f' %
np.sum((weights - model.predict(heights)) ** 2))
# RSS should be small as possible
# test data
heights_test = [[1.58], [1.62], [1.69], [1.76], [1.82]]
weights_test = [[58], [63], [72], [73], [85]]
# Total Sum of Squares (TSS)
weights_test_mean = np.mean(np.ravel(weights_test))
TSS = np.sum((np.ravel(weights_test) -
weights_test_mean) ** 2)
print("TSS: %.2f" % TSS)
# Residual Sum of Squares (RSS)
RSS = np.sum((np.ravel(weights_test) -
np.ravel(model.predict(heights_test)))
** 2)
print("RSS: %.2f" % RSS)
# R_squared
R_squared = 1 - (RSS / TSS)
print("R-squared: %.2f" % R_squared)
# using scikit-learn to calculate r-squared
print('R-squared: %.4f' % model.score(heights_test,
weights_test))
|
UTF-8
|
Python
| false | false | 3,042 |
py
| 22 |
linear_regression_sept2020.py
| 17 | 0.608481 | 0.563445 | 0 | 112 | 25.125 | 109 |
YashAgarwalDev/Learn-Python
| 13,262,859,033,338 |
f3296070845b027b63aa6694aa70a53ce9f01b0b
|
bcab933a9c679ebbe83f5658f0a6f36fc069329d
|
/Break2.py
|
e412a44fb07637020e247b0d81e5ed0789e1fb3c
|
[] |
no_license
|
https://github.com/YashAgarwalDev/Learn-Python
|
3c8d8db6cf260de52db24de01cafb6ef317d90ef
|
02e2d3fc814783258abf27bde4041d90b2320229
|
refs/heads/master
| 2020-06-24T01:58:42.095074 | 2020-05-08T06:47:36 | 2020-05-08T06:47:36 | 198,815,934 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
Number = [34,207,99,89,77]
for n in Number:
if(n%5==0):
print(n)
break;
else:
print("didn't find any number which is not divisible with 5")
|
UTF-8
|
Python
| false | false | 179 |
py
| 34 |
Break2.py
| 32 | 0.536313 | 0.458101 | 0 | 7 | 22.428571 | 65 |
bskaggs/rk
| 16,561,393,895,560 |
bc809cd8613be94056d367b31f14c453eac03302
|
a05b9b819cced81c1a7a4852dd07c5dc36c36a6b
|
/scripts/rkscript
|
9773fdeeaeacfea71b8c69af17d8c890bd9ef4fe
|
[
"Unlicense"
] |
permissive
|
https://github.com/bskaggs/rk
|
0ef5f263242d5003863960253efb81bd578d7941
|
505f37cf07a831f2a09f68b16e38e484c9cbd9fd
|
refs/heads/master
| 2020-07-11T16:41:36.351985 | 2016-05-28T15:33:52 | 2016-05-28T15:33:52 | 59,901,115 | 1 | 0 | null | true | 2016-05-28T15:27:38 | 2016-05-28T15:27:38 | 2016-04-20T08:26:22 | 2015-10-23T21:19:44 | 374 | 0 | 0 | 0 | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Remote jupyter kernel via SSH
Make sure that you can login to a remote machine without entering password.
"""
from datetime import datetime
from errno import EACCES, ENOTDIR
from getpass import getuser
from json import load
from os import chmod, getcwd, getpid, makedirs, remove
from os.path import dirname, exists, expanduser, isfile, join, split
from site import getsitepackages
from sys import argv
from configobj import ConfigObj
from execnet import makegateway
from paramiko.util import log_to_file
from rk.ssh import paramiko_tunnel
arguments_number = 3 # interpreter, local_connection_file,
# remote_username_at_remote_host
messages = {} # Strings for output
week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday',
'Sunday']
module_name = "rk"
module_location = join(getsitepackages()[0], module_name)
config_rk_abs_path = join(module_location, "config/rk.ini")
config = ConfigObj(config_rk_abs_path)
def create_directory(directory_name, mode=0o777):
"""Recursive directory creation function
os.chmod work only for last directory
"""
try:
makedirs(directory_name, mode)
except Exception as exception:
error_code = exception.errno
if error_code == EACCES: # 13 (Python3 PermissionError)
print(messages["_error_NoRoot"])
exit(1)
elif error_code == ENOTDIR: # 20 (Python3 NotADirectoryError)
path = directory_name
while path != '/':
if isfile(path):
try:
remove(path)
except Exception as exception: # Python3 PermissionError
error_code = exception.errno
if error_code == EACCES: # 13
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] %
strerror(error_code))
exit(1)
path = dirname(path)
try:
makedirs(directory_name, mode)
except Exception as exception: # Python3 PermissionError
error_code = exception.errno
if error_code == EACCES: # 13
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] % strerror(error_code))
exit(1)
else:
print(messages["_error_Oops"] % strerror(error_code))
exit(1)
def get_date_time():
"""Get yyyy-mm-dd_hh.mm.ss"""
def normalize(element):
"""Add '0' from front"""
if len(element) == 1:
element = '0' + element
return element
now = datetime.now()
year = str(now.year)
month = normalize(str(now.month))
day = normalize(str(now.day))
hour = normalize(str(now.hour))
minute = normalize(str(now.minute))
second = normalize(str(now.second))
date = year + '-' + month + '-' + day
time = hour + '.' + minute + '.' + second
date_time = date + '_' + time
return date_time
def create_messages():
"""Create "messages" dictionary"""
config_messages_rel_path = config["config_messages_rel_path"]
config_messages_abs_path = join(module_location, config_messages_rel_path)
with open(config_messages_abs_path, 'r') as f:
messages_list = f.read().splitlines()
for i in range(0, len(messages_list), 2):
messages[messages_list[i]] = messages_list[i+1]
create_messages()
argv_len = len(argv) - 1 # argv[0]: is the script name
if argv_len == arguments_number:
interpreter = argv[1] # An entry point or an absolute path
# to language interpreter on a remote machine
local_connection_file = argv[2] # Absolute path of a local connection file
remote_username_at_remote_host = argv[3] # Just a remote host or,
# if your username is different on a remote machine,
# use this syntax: remote username AT remote host.
else:
print(messages["_error_ArgumentsNumber"] % (arguments_number, argv_len))
exit(1)
local_username = getuser()
if '@' in remote_username_at_remote_host:
remote_username, remote_host = remote_username_at_remote_host.split('@')
if local_username != remote_username:
# Local username is NOT the same as a remote username
remote_connection_file = local_connection_file.replace(local_username,
remote_username)
else:
# Local username is the same as a remote username
remote_connection_file = local_connection_file
remote_username_at_remote_host = remote_host
else:
# Local username is the same as a remote username
remote_connection_file = local_connection_file
remote_username = local_username
remote_host = remote_username_at_remote_host
# Load a connection file
with open(local_connection_file, 'r') as f:
cfg = load(f)
# GET a current working directory of a process
cwd = getcwd()
# Launch a kernel process on a remote machine
gw = makegateway("ssh=%s//python=%s" % (remote_username_at_remote_host,
interpreter))
ch = gw.remote_exec("""
import socket
from json import dumps
from os import chdir, getcwd, getpid, remove
from os.path import exists, expanduser, isdir, isfile, join, split
from struct import pack
try:
from ipykernel.kernelapp import launch_new_instance
except ImportError:
from IPython.kernel.zmq.kernelapp import launch_new_instance
remote_connection_file = "%s"
cfg = %s
last_cwd = "%s"
remote_ports = {}
ports = [k for k,v in cfg.items() if k.endswith("_port")]
# Select random ports
for port in ports:
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, pack("ii", 0, 0))
sock.bind(('', 0)) # Random free port from 1024 to 65535
sock_name = sock.getsockname()[1]
remote_ports[port] = sock_name
cfg[port] = sock_name
sock.close()
channel.send(remote_ports)
remote_pid = getpid()
channel.send(remote_pid)
if not exists(remote_connection_file):
dir_name, file_name = split(remote_connection_file)
if exists(dir_name) and isdir(dir_name):
# Write a connection file
with open(remote_connection_file, 'w') as f:
f.write(dumps(cfg))
else:
default_j4_dir_name = "/run/user/1000/jupyter"
if ((default_j4_dir_name != dir_name) and
exists(default_j4_dir_name) and
isdir(default_j4_dir_name)):
remote_connection_file = join(default_j4_dir_name, file_name)
# Write a connection file to jupyter 4 "j4" default dir
with open(remote_connection_file, 'w') as f:
f.write(dumps(cfg))
else:
path = "~/.ipython/profile_default/security"
default_j3_dir_name = (expanduser(path))
if ((default_j3_dir_name != dir_name) and
exists(default_j3_dir_name) and
isdir(default_j3_dir_name)):
remote_connection_file = join(default_j3_dir_name,
file_name)
# Write a connection file to jupyter 3 "j3" default dir
with open(remote_connection_file, 'w') as f:
f.write(dumps(cfg))
else:
cwd = getcwd()
remote_connection_file = join(cwd, file_name)
# Write a connection file to cwd
with open(remote_connection_file, 'w') as f:
f.write(dumps(cfg))
# SET a current working directory of a process
if exists(last_cwd) and isdir(last_cwd):
chdir(last_cwd)
launch_new_instance(["-f", remote_connection_file])
# Delete a connection file
if exists(remote_connection_file) and isfile(remote_connection_file):
remove(remote_connection_file)
""" % (remote_connection_file, cfg, cwd))
# Local and remote ports dicts
local_ports = {k: v for k,v in cfg.items() if k.endswith("_port")}
remote_ports = ch.receive()
# Local and remote PIDs
local_pid = getpid()
remote_pid = ch.receive()
# Create paramiko log file
paramiko_log_location, paramiko_log_file_name = split(local_connection_file)
paramiko_log_file_name = paramiko_log_file_name.replace("kernel", "paramiko")
paramiko_log_file_name = paramiko_log_file_name.replace(".json", ".txt")
paramiko_log_abs_path = join(paramiko_log_location, paramiko_log_file_name)
log_to_file(paramiko_log_abs_path)
# Redirect localhost:local_port to remote_host:remote_port
for k,v in local_ports.items():
paramiko_tunnel(v, remote_ports[k], remote_username_at_remote_host)
# Create rk log file
date_time = get_date_time()
date, time = date_time.replace('.', ':').split('_')
date = date + ' ' + week[datetime.weekday(datetime.now())]
rk_log_file_name = "%s@%s_%s.txt" % (local_username, remote_host, date_time)
rk_log_location = config["rk_log_location"]
if '~' in rk_log_location:
rk_log_location = expanduser(rk_log_location)
rk_log_abs_path = join(rk_log_location, rk_log_file_name)
if exists(rk_log_location) and isfile(rk_log_location):
try:
remove(rk_log_location)
except Exception as exception: # Python3 PermissionError
error_code = exception.errno
if error_code == EACCES: # 13
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] % strerror(error_code))
exit(1)
if not exists(rk_log_location):
create_directory(rk_log_location, 0o777)
path = rk_log_location
while path != '/':
try:
chmod(path, 0o777)
except OSError:
break
path = dirname(path)
try:
with open(rk_log_abs_path, 'w') as f:
f.write("date: %s\n" % date)
f.write("time: %s\n" % time)
f.write("\n")
if local_username == remote_username:
f.write("usernames: %s\n" % local_username)
else:
f.write("usernames: %s<->%s\n" % (local_username, remote_username))
f.write("remote host: %s\n" % remote_host)
f.write("\n")
for k,v in local_ports.items():
f.write("%ss: %s<->%s\n" % (k.replace('_', ' '), v,
remote_ports[k]))
f.write("\n")
f.write("pids: %s<->%s\n" % (local_pid, remote_pid))
except Exception as exception:
error_code = exception.errno
if error_code == EACCES: # 13 (Python3 PermissionError)
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] % strerror(error_code))
exit(1)
# Waits for closing, i.e. remote_exec() finish
ch.waitclose()
# Delete paramiko log file
if exists(paramiko_log_abs_path) and isfile(paramiko_log_abs_path):
try:
remove(paramiko_log_abs_path)
except Exception as exception:
error_code = exception.errno
if error_code == EACCES: # 13 (Python3 PermissionError)
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] % strerror(error_code))
exit(1)
# Delete rk log file
if exists(rk_log_abs_path) and isfile(rk_log_abs_path):
try:
remove(rk_log_abs_path)
except Exception as exception:
error_code = exception.errno
if error_code == EACCES: # 13 (Python3 PermissionError)
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] % strerror(error_code))
exit(1)
|
UTF-8
|
Python
| false | false | 12,029 | 9 |
rkscript
| 4 | 0.590905 | 0.582925 | 0 | 309 | 37.928803 | 79 |
|
bernardbeckerman/spark
| 13,743,895,369,248 |
e5c14eea6037f81653905fe80ecbe44cd09990e2
|
f14b1cc501b88442468b6a52f3bc1117970216cd
|
/src/upvote_percentage_by_favorites.py
|
b36163737a0c99dd332b2b9b74c08dc5b9fec882
|
[] |
no_license
|
https://github.com/bernardbeckerman/spark
|
f546764a34d24f11a9c27d3eb8c79763cf79ed80
|
5f85e9fbd6bfb0f1b5eb46f3d678a20df2709aa8
|
refs/heads/master
| 2021-06-09T00:23:18.509750 | 2016-12-13T23:27:50 | 2016-12-13T23:27:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import print_function
from collections import Counter
from lxml import etree
import gzip
from pyspark import SparkContext
sc = SparkContext("local[*]", "temp")
import os
from collections import defaultdict
def localpath(path):
return 'file://' + str(os.path.abspath(os.path.curdir)) + '/' + path
def isLine(line):
return line.strip() != '' and line.strip().split()[0] == '<row'
#class Record(object):
# def __init__(self, post, vote):
# self.post = post
# self.vote = vote
def parse(line):
root = etree.fromstring(line)
dv = dict(root.items())
return (dv["PostId"], dv["VoteTypeId"])
def calc_ratio(x):
d = dict(Counter(list(x[1])))
k2 = d.get('2',0)
k3 = d.get('3',0)
k5 = d.get('5',0)
if (k2 + k3 != 0):
return (k5, float(k2)/(k2+k3))
return (k5, -1)
def agg_in(x,y):
return (x[0] + y*(y!=-1), x[1] + (y!=-1))
def agg_out(x,y):
return (x[0] + y[0], x[1] + y[1])
def ave_agg(v):
if v[1] != 0:
return (v[0]/v[1])
return -1
data = sc.textFile(localpath("stats/allVotes/*")) \
.filter(isLine) \
.map(parse) \
.groupByKey() \
.map(calc_ratio) \
.aggregateByKey((0,0), agg_in, agg_out) \
.mapValues(ave_agg) \
.collect()
#data.foreach(print)
#print data
for entry in data:
print (str((entry[0],entry[1])) + ",")
|
UTF-8
|
Python
| false | false | 1,395 |
py
| 9 |
upvote_percentage_by_favorites.py
| 7 | 0.558423 | 0.5319 | 0 | 59 | 22.644068 | 72 |
masMAY/Latihan-Dasar-Python
| 16,114,717,319,257 |
f2b87e16859faedb33374f1e297e60c3f37c079e
|
515c527e53b5a0c39365c491cdf9ce46c7ca5eb4
|
/python/latihan python dasar/nested_for.py
|
ba815aca37d7a467c44dbf44564be88a50c1d256
|
[] |
no_license
|
https://github.com/masMAY/Latihan-Dasar-Python
|
e1627326b94d939c879670ed77bede237a1be2d6
|
820ae7d092039f234b1a718ce247d627841d2f63
|
refs/heads/master
| 2021-01-11T20:14:17.031034 | 2017-01-17T03:37:26 | 2017-01-17T03:37:26 | 79,073,019 | 0 | 1 | null | false | 2017-01-17T03:37:26 | 2017-01-16T01:49:16 | 2017-01-16T02:06:53 | 2017-01-17T03:37:26 | 15 | 0 | 1 | 0 |
Python
| null | null |
print "nested foo on prime number case among 1 t0 20"
for i in range(1,20):
count_zero_remainder = 0
for j in range (1, i+1):
num_remainder = i%j
print num_remainder
if num_remainder == 0:
count_zero_remainder = count_zero_remainder + 1
if count zero_remainder == 2:
print i, "is a prime number"
else:
print i, "it is not a prime number"
|
UTF-8
|
Python
| false | false | 357 |
py
| 18 |
nested_for.py
| 17 | 0.67507 | 0.638655 | 0 | 13 | 26.461538 | 53 |
tangermi/nlp
| 13,718,125,549,375 |
2ca6a38da8b53553c68c67588a19630182572d76
|
c06d18ac5b87b3b82fc486454c422b119d6c1ee9
|
/src/evaluation/similarity/siamese.py
|
7e5ef4540df15b7b374f58e3bdea4c59b0b8bd38
|
[] |
no_license
|
https://github.com/tangermi/nlp
|
b3a4c9612e6049463bf12bc9abb7aff06a084ace
|
aa36b8b20e8c91807be73a252ff7799789514302
|
refs/heads/master
| 2022-12-09T12:33:15.009413 | 2020-04-03T04:03:24 | 2020-04-03T04:03:24 | 252,056,010 | 0 | 0 | null | false | 2022-12-08T07:26:55 | 2020-04-01T02:55:05 | 2020-04-03T04:03:27 | 2022-12-08T07:26:55 | 37,923 | 0 | 0 | 7 |
Jupyter Notebook
| false | false |
# -*- coding:utf-8 -*-
from ..base import Base
import os
import numpy as np
import tensorflow as tf
from utils.similarity.siamese import Similarity as s
class Siamese(Base):
def __init__(self, dic_config={}, dic_engine={}, dic_score={}):
self.dic_engine = dic_engine
self.dic_score = dic_score
Base.__init__(self, dic_config)
self.logger.info(dic_engine)
self.logger.info(self.dic_score)
def init(self):
self.test_path = os.path.join(self.dic_engine['_in'], self.dic_engine['test'])
self.model_path = os.path.join(self.dic_engine['model_in'], self.dic_engine['model_file'])
self.out_file = os.path.join(self.dic_engine['_out'], self.dic_score['out_file'])
def load(self):
self.model = tf.keras.models.load_model(self.model_path, compile=False)
with np.load(self.test_path) as test:
self.te_pairs_1 = test['te_pairs_1']
self.te_y_1 = test['te_y_1']
self.te_pairs_2 = test['te_pairs_2']
self.te_y_2 = test['te_y_2']
self.te_pairs_3 = test['te_pairs_3']
self.te_y_3 = test['te_y_3']
def evaluate(self):
model = self.model
te_pairs_1 = self.te_pairs_1
te_y_1 = self.te_y_1
te_pairs_2 = self.te_pairs_2
te_y_2 = self.te_y_2
te_pairs_3 = self.te_pairs_3
te_y_3 = self.te_y_3
# compute final accuracy on training and test sets
y_pred = model.predict([te_pairs_1[:, 0], te_pairs_1[:, 1]])
self.te_acc_1 = s.compute_accuracy(te_y_1, y_pred)
# predict test set 2
y_pred = model.predict([te_pairs_2[:, 0], te_pairs_2[:, 1]])
self.te_acc_2 = s.compute_accuracy(te_y_2, y_pred)
# predict test set 3
y_pred = model.predict([te_pairs_3[:, 0], te_pairs_3[:, 1]])
self.te_acc_3 = s.compute_accuracy(te_y_3, y_pred)
def dump(self):
with open(self.out_file, 'w', encoding='utf-8') as f:
f.seek(0)
f.write('模型精确度:')
f.write('\n* 测试集准确度: %0.2f%%' % (100 * self.te_acc_1))
f.write('\n用["dress", "sneaker", "bag", "shirt"]这4个分类的物品测试(训练集中没出现过的品类):')
f.write('\n* 测试准确度: %0.2f%%' % (100 * self.te_acc_2))
f.write('\n用整个数据集来测试(包括训练集中没出现过的品类):')
f.write('\n* 测试准确度: %0.2f%%' % (100 * self.te_acc_3))
f.truncate()
def run(self):
self.init()
self.load()
self.evaluate()
self.dump()
|
UTF-8
|
Python
| false | false | 2,633 |
py
| 342 |
siamese.py
| 330 | 0.546944 | 0.520575 | 0 | 68 | 35.808824 | 98 |
Bye-lemon/DUT-CS-Homework
| 6,296,422,086,009 |
cdb49db1451a87acb6982ca72f5d663f76903748
|
69ceee163e8ed655840a2dd026c5de185991fe35
|
/Operating System/Memory Schedule/Scheduler.py
|
8f20c1a55f5aa8914bcb82a1d7f55cbf450d4226
|
[] |
no_license
|
https://github.com/Bye-lemon/DUT-CS-Homework
|
1b3dfaa44af66f04ba51707670a78ef96401a308
|
5437002925a22a644b15afca0394f24532ec3ab5
|
refs/heads/master
| 2021-06-12T11:02:11.009339 | 2020-12-27T02:52:14 | 2020-12-27T02:52:14 | 152,850,552 | 2 | 0 | null | false | 2021-03-20T00:49:55 | 2018-10-13T08:16:56 | 2020-12-27T02:52:19 | 2021-03-20T00:49:54 | 18,113 | 1 | 0 | 2 |
C
| false | false |
from DataStructure import PageTable
class AbstractMemoryScheduler(object):
def __init__(self, maxsize):
super().__init__()
self.pageTable = PageTable(maxsize)
self.requestTimes = 0
self.hitTimes = 0
def hit(self):
self.requestTimes += 1
self.hitTimes += 1
def notHit(self):
self.requestTimes += 1
def put(self):
pass
def report(self):
print("完成调度,缺页次数" + str(self.requestTimes - self.hitTimes) + ",缺页率" +
str((self.requestTimes - self.hitTimes) / self.requestTimes) + "。")
class LRUMemoryScheduler(AbstractMemoryScheduler):
def __init__(self, maxsize):
super().__init__(maxsize)
def put(self, page):
if self.pageTable.exist(page):
self.hit()
for k, v in self.pageTable.data.items():
self.pageTable.data.update({k: 0} if k == page else {k: v + 1})
elif not self.pageTable.full():
self.notHit()
for k, v in self.pageTable.data.items():
self.pageTable.data.update({k: v + 1})
self.pageTable.data.update({page: 0})
else:
self.notHit()
lruPage, lruCost = None, -1
for k, v in self.pageTable.data.items():
if lruPage is None or v > lruCost:
lruPage, lruCost = k, v
self.pageTable.data.pop(lruPage[0])
print("Drop Page {}".format(lruPage[0]))
for k, v in self.pageTable.data.items():
self.pageTable.data.update({k: v + 1})
self.pageTable.data.update({page: 0})
print(self.pageTable)
class FIFOMemoryScheduler(AbstractMemoryScheduler):
def __init__(self, maxsize):
super().__init__(maxsize)
def put(self, page):
if self.pageTable.exist(page):
self.hit()
elif not self.pageTable.full():
self.notHit()
self.pageTable.data.update({page: 0})
else:
self.notHit()
fifoPage = self.pageTable.data.popitem(last=False)
print("Drop Page {}".format(fifoPage[0]))
self.pageTable.data.update({page: 0})
print(self.pageTable)
if __name__ == "__main__":
#lru = LRUMemoryScheduler(3)
lru = FIFOMemoryScheduler(3)
lru.put('2')
lru.put('3')
lru.put('2')
lru.put('1')
lru.put('5')
lru.put('2')
lru.put('4')
lru.put('5')
lru.put('3')
lru.put('2')
lru.put('5')
lru.put('2')
lru.report()
|
UTF-8
|
Python
| false | false | 2,573 |
py
| 128 |
Scheduler.py
| 52 | 0.541061 | 0.52888 | 0 | 87 | 28.252874 | 81 |
jung9156/studies
| 8,220,567,412,418 |
f12b52c95b25a8b9fd742003967d8ddd349d8685
|
ee1e5cd7b3c04e6ac848287938b1698ca7b578a4
|
/lecture/algorithm/problem/4731.항구에 들어오는 배.py
|
609ee98c5fbb064a85755c9a4a9bb481811d9e50
|
[] |
no_license
|
https://github.com/jung9156/studies
|
d2a3aea773c3c54764232c077293e5a107e57e3a
|
7dd20bb5e4098c54168ca846d7f73d391564f72f
|
refs/heads/master
| 2023-01-04T14:54:51.986609 | 2020-10-22T06:01:21 | 2020-10-22T06:01:21 | 296,518,221 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
tn = int(input())
for ir in range(tn):
print('#{} '.format(ir + 1), end='')
n = int(input())
a = []
a += [int(input()) for ii in range(n)]
A = set()
ma = a[-1]
A.add(1)
if len(a) == 1:
cnt = 1
else:
cnt = 0
for i in a:
if i not in A:
A.add(i)
cnt += 1
gab = i - 1
while True:
i += gab
if i > ma:
break
A.add(i)
if a == list(A):
break
print(cnt)
|
UTF-8
|
Python
| false | false | 550 |
py
| 462 |
4731.항구에 들어오는 배.py
| 310 | 0.334545 | 0.32 | 0 | 28 | 18.678571 | 42 |
msherwood10/python
| 12,146,167,555,641 |
645cd2cb84e54a1c0efda7fd6eea3a5ae28389ad
|
8aa6940df17aca0ab69fffaae9c86becdd32d458
|
/assn-7.2-2.py
|
ff1aa33acfc6c53e6dc01fac86f31290661cc275
|
[] |
no_license
|
https://github.com/msherwood10/python
|
fc8fbbacbd86f776e1a0fb9d107aa7b4bcc2b993
|
1ce3fe4de5b87e0ba29828d59a453f0b51678e7d
|
refs/heads/master
| 2021-01-25T10:07:27.286821 | 2017-02-16T03:03:29 | 2017-02-16T03:03:29 | 28,399,696 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Use the file name mbox-short.txt as the file name
fname = "C:\Python27\mbox-short.txt"
fh = open(fname)
xspam = list()
count = 0
for line in fh:
if line.startswith("X-DSPAM-Confidence:"):
zeropos = line.find("0")
eol = line.find("\n")
spamtext = line[zeropos:eol]
spamval = float(spamtext)
xspam.append(spamval)
count = count + 1
xspamavg = sum(xspam)/count
print "Average spam confidence:",xspamavg
|
UTF-8
|
Python
| false | false | 453 |
py
| 26 |
assn-7.2-2.py
| 23 | 0.635762 | 0.624724 | 0 | 15 | 29.266667 | 51 |
wendazhou/reversible-inductive-construction
| 5,703,716,599,888 |
2b50687c157990d5adc0e15bb4a6e24771d19602
|
c584941c8c6005ed27d770f72b4ef4693cfeeff5
|
/code/genric/laman/_data.py
|
93606ea9c78563c61340e54165037c2e1a9983f1
|
[
"MIT"
] |
permissive
|
https://github.com/wendazhou/reversible-inductive-construction
|
2820b5e560d1cb127d653fd39db7d804de5e76db
|
14815d1b5ef5a35a569c0793888bc5548acd64be
|
refs/heads/master
| 2021-01-03T01:29:36.746897 | 2020-02-12T08:22:57 | 2020-02-12T08:22:57 | 239,858,618 | 0 | 0 |
MIT
| true | 2020-02-11T20:32:46 | 2020-02-11T20:32:45 | 2019-12-12T09:11:20 | 2019-12-10T22:11:26 | 38,821 | 0 | 0 | 0 | null | false | false |
import typing
class LamanSamplerConfig(typing.NamedTuple):
expected_corruption_steps: int
use_revisit: bool
num_steps: int
max_denoising_steps: int = 20
|
UTF-8
|
Python
| false | false | 171 |
py
| 73 |
_data.py
| 66 | 0.725146 | 0.71345 | 0 | 8 | 20.375 | 44 |
fabiano-teichmann/interfaces
| 3,083,786,550,703 |
c1b090692a35ccd84bb81da1f41d7e46a1428ef3
|
5d408cf33f025c3230f1ea033ba7a42fb31dc20d
|
/tests/test_dispatch_message_abc.py
|
3c0229b2b31ac78a50eaa53afdaa08f9df6a2d74
|
[] |
no_license
|
https://github.com/fabiano-teichmann/interfaces
|
91854d554405567f90ed6e31c778f479b1b1119e
|
e6fdb42141c05c1e15652010348be33b5c233af0
|
refs/heads/main
| 2023-08-30T11:31:25.693564 | 2021-11-15T00:12:10 | 2021-11-15T00:12:10 | 427,717,001 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pytest
from dispatch_message_abc import DispatchSMS, DispatchEmail
from schema import MessageSMS, MessageEmail
class TestDispatchMessageABC:
def test_send_sms_should_return_none(self):
assert DispatchSMS(
MessageSMS(to="479984848", sender="2102", message="Protocol is nice !!!")
)
def test_send_email_should_raise_exception(self):
with pytest.raises(
TypeError,
match="Can't instantiate abstract class DispatchEmail with abstract method confirm_receive",
):
DispatchEmail(
MessageEmail(
to="email@email.com",
subject="Protocol",
sender="ops@email.com",
message="Protocol is nice !!!",
cc=None,
cco=None,
)
)
|
UTF-8
|
Python
| false | false | 871 |
py
| 6 |
test_dispatch_message_abc.py
| 5 | 0.552239 | 0.537313 | 0 | 27 | 31.259259 | 104 |
Hapattaja/ruuvi-hass.io
| 13,297,218,787,611 |
e185bb87f78c06481cc9fc0d1a0fd6c5bee69f48
|
4ae89f73a913d090de3485059b25ce9027a898f9
|
/tests/test_basic_setup.py
|
0cb9b45896c1873d5ba0dc8dd908405a6b26cb18
|
[
"MIT"
] |
permissive
|
https://github.com/Hapattaja/ruuvi-hass.io
|
4f9c1cd668f59962518dbb24eec8f8f06d80c8f7
|
f6cd9e214c380fcea56f2158d38a3eeb222bc77a
|
refs/heads/master
| 2023-08-11T01:49:10.389053 | 2021-09-19T09:01:48 | 2021-09-19T09:01:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""The basic setup of the platform."""
from unittest.mock import MagicMock, patch
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from pytest_homeassistant_custom_component.common import MockConfigEntry
from custom_components.ruuvi.sensor import (
SENSOR_TYPES
)
from custom_components.ruuvi.sensor import (
async_setup_platform
)
from .const import FULL_CONFIG_DATA, MANDATORY_CONFIG_DATA, ONLY_CERTAIN_CONDITIONS_CONFIG_DATA
async def test_full_setup_platform(hass: HomeAssistant):
"""Test platform setup."""
async_add_entities = MagicMock()
with patch('custom_components.ruuvi.sensor.RuuviTagClient') as ruuvi_ble_client:
await async_setup_platform(hass, FULL_CONFIG_DATA, async_add_entities, None)
assert async_add_entities.called
async def test_basic_setup_component(hass: HomeAssistant):
"""Test platform setup."""
with patch('custom_components.ruuvi.sensor.RuuviTagClient') as ruuvi_ble_client:
assert await async_setup_component(hass, "sensor",
{
"sensor": [
MANDATORY_CONFIG_DATA,
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
for condition in SENSOR_TYPES.keys():
state = hass.states.get(f"sensor.ruuvitag_macaddress00_{condition}")
assert state is not None
async def test_monitored_conditions_setup(hass: HomeAssistant):
"""Test platform setup."""
with patch('custom_components.ruuvi.sensor.RuuviTagClient') as ruuvi_ble_client:
assert await async_setup_component(hass, "sensor",
{
"sensor": [
ONLY_CERTAIN_CONDITIONS_CONFIG_DATA,
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
expected_conditions = ['temperature', 'pressure']
non_expected_conditions = SENSOR_TYPES.keys() - expected_conditions
for condition in expected_conditions:
state = hass.states.get(f"sensor.ruuvitag_macaddress00_{condition}")
assert state is not None
for condition in non_expected_conditions:
state = hass.states.get(f"sensor.ruuvitag_macaddress00_{condition}")
assert state is None
|
UTF-8
|
Python
| false | false | 2,364 |
py
| 15 |
test_basic_setup.py
| 7 | 0.676819 | 0.674281 | 0 | 69 | 33.246377 | 95 |
dianedef/P3-Aidez-MacGyver
| 3,831,110,859,075 |
40754524e880b836c22ce514fca6830ef1b48f24
|
9f80a393208574d636483101c8ed2db4ff1d5862
|
/models/labyrinth.py
|
f10eeb74f33959cdd0e93487390141425e1edcaf
|
[] |
no_license
|
https://github.com/dianedef/P3-Aidez-MacGyver
|
4331e0a6e135d5133ef6268e55e4d1d5d30f923d
|
f448f896e7bd915aada672f7b0dd73d6dcf6e54b
|
refs/heads/master
| 2020-09-01T00:10:25.968244 | 2020-04-21T12:10:34 | 2020-04-21T12:10:34 | 218,823,380 | 0 | 2 | null | false | 2020-02-07T18:55:43 | 2019-10-31T17:35:47 | 2020-02-07T18:25:13 | 2020-02-07T18:25:11 | 2,740 | 0 | 1 | 1 |
Python
| false | false |
"""This module defines classes and functions related to the labyrinth in the game."""
import random
from models.position import Position
class Labyrinth:
def __init__(self):
"""This function initialize a labyrinth with paths, departure, end, and
walls."""
self.paths = []
self.start = None
self.end = None
self.walls = []
self.bar = []
self.item_positions = []
def define_path(self, filename):
"""This function creates the labyrinth's path from the text file
map.txt."""
with open(filename) as file:
content = file.readlines()
for num_line, line in enumerate(content):
for num_c, c in enumerate(line):
if c == "P":
self.paths.append(Position(num_c, num_line))
elif c == "D":
self.start = Position(num_c, num_line)
elif c == "A":
self.end = Position(num_c, num_line)
elif c == "-":
self.walls.append(Position(num_c, num_line))
elif c == "#":
self.bar.append(Position(num_c, num_line))
self.width = num_c + 1
self.length = num_line + 1
self.paths.append(self.end)
self.paths.append(self.start)
def random_pos(self, number):
"""This function returns path position that is neither the beginning
nor the end."""
positions = random.sample(self.paths[:-2], 3)
return positions[number]
|
UTF-8
|
Python
| false | false | 1,610 |
py
| 16 |
labyrinth.py
| 12 | 0.521118 | 0.518634 | 0 | 47 | 33.255319 | 85 |
vahid75/File-Server
| 584,115,564,965 |
7bc8c3e0060683392977687c4e6bc651c616786a
|
1692c13ff5b8b99107cfd0bf20dd056490736c28
|
/File_Server/middleware.py
|
833b1f153d7387aec238a6941542ea802a81dff1
|
[] |
no_license
|
https://github.com/vahid75/File-Server
|
fc3e0e1710c7bb0568e3dc318f486b8d03753ac6
|
13e5d5b467d62b5203bb58e0f8c1213b76e3b4e8
|
refs/heads/master
| 2022-10-20T15:20:42.415322 | 2020-06-28T18:27:14 | 2020-06-28T18:27:14 | 275,101,251 | 4 | 1 | null | false | 2020-06-28T18:32:03 | 2020-06-26T07:45:40 | 2020-06-28T18:27:18 | 2020-06-28T18:27:24 | 19 | 4 | 1 | 0 |
Python
| false | false |
from django.http import HttpResponseForbidden
import os
from django.conf import settings
from django.urls import reverse
from . import utils
def ip_middleware(get_response):
def middleware(request):
client_ip_address = request.META.get("HTTP_X_REAL_IP")
request.client_ip_address = client_ip_address
response = get_response(request)
if request.get_full_path() in [reverse('register_via_ip')]:
return response
ip_authorized = utils.authorize_with_ip(client_ip_address)
if not ip_authorized:
return HttpResponseForbidden('Forbidden. ask file server admin for access your privilages to server resources')
return response
return middleware
|
UTF-8
|
Python
| false | false | 732 |
py
| 11 |
middleware.py
| 6 | 0.699454 | 0.699454 | 0 | 21 | 33.714286 | 123 |
maraujo/pynmet
| 9,809,705,345,838 |
9c900a4bc4ba7e18d250d53ef00f8be07a06e164
|
08b2642e6748745378bda7dd117f54b6b6bb31ed
|
/Tests/default_metpy.py
|
105b1b8639f8cf1027766834e79897a2d559c32a
|
[] |
no_license
|
https://github.com/maraujo/pynmet
|
f47344d61b17ddd1d4098874d5030a5b99c84ad2
|
4c214042d23ec7ab09d874712a6b71f73b612c7c
|
refs/heads/master
| 2020-03-20T00:51:08.986661 | 2018-06-12T11:24:41 | 2018-06-12T11:24:41 | 137,057,897 | 0 | 0 | null | true | 2018-06-12T10:46:32 | 2018-06-12T10:46:32 | 2018-05-08T21:20:03 | 2018-01-15T02:53:12 | 219 | 0 | 0 | 0 | null | false | null |
import cartopy
import cartopy.crs as ccrs
from matplotlib.colors import BoundaryNorm
import matplotlib.pyplot as plt
import numpy as np
from metpy.cbook import get_test_data
from metpy.gridding.gridding_functions import (interpolate, remove_nan_observations,
remove_repeat_coordinates)
def basic_map(proj):
"""Make our basic default map for plotting"""
fig = plt.figure(figsize=(15, 10))
view = fig.add_axes([0, 0, 1, 1], projection=proj)
view.set_extent([-120, -70, 20, 50])
view.add_feature(cartopy.feature.NaturalEarthFeature(category='cultural',
name='admin_1_states_provinces_lakes',
scale='50m', facecolor='none'))
view.add_feature(cartopy.feature.OCEAN)
view.add_feature(cartopy.feature.COASTLINE)
view.add_feature(cartopy.feature.BORDERS, linestyle=':')
return view
def station_test_data(variable_names, proj_from=None, proj_to=None):
with open('/home/josue/station_data.txt') as f:
all_data = np.loadtxt(f, skiprows=1, delimiter=',',
usecols=(1, 2, 3, 4, 5, 6, 7, 17, 18, 19),
dtype=np.dtype([('stid', '3S'), ('lat', 'f'), ('lon', 'f'),
('slp', 'f'), ('air_temperature', 'f'),
('cloud_fraction', 'f'), ('dewpoint', 'f'),
('weather', '16S'),
('wind_dir', 'f'), ('wind_speed', 'f')]))
all_stids = [s.decode('ascii') for s in all_data['stid']]
data = np.concatenate([all_data[all_stids.index(site)].reshape(1, ) for site in all_stids])
value = data[variable_names]
lon = data['lon']
lat = data['lat']
if proj_from is not None and proj_to is not None:
try:
proj_points = proj_to.transform_points(proj_from, lon, lat)
return proj_points[:, 0], proj_points[:, 1], value
except Exception as e:
print(e)
return None
return lon, lat, value
from_proj = ccrs.Geodetic()
to_proj = ccrs.AlbersEqualArea(central_longitude=-97.0000, central_latitude=38.0000)
levels = list(range(-20, 20, 1))
cmap = plt.get_cmap('magma')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
x, y, temp = station_test_data('air_temperature', from_proj, to_proj)
x, y, temp = remove_nan_observations(x, y, temp)
x, y, temp = remove_repeat_coordinates(x, y, temp)
gx, gy, img = interpolate(x, y, temp, interp_type='linear', hres=75000)
img = np.ma.masked_where(np.isnan(img), img)
view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)
plt.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
|
UTF-8
|
Python
| false | false | 2,858 |
py
| 9 |
default_metpy.py
| 6 | 0.56718 | 0.544787 | 0 | 74 | 37.635135 | 95 |
SWE4103-Team1/UnitTestDemo
| 3,453,153,752,596 |
298d6988e7baa7573930655aecbf0df176ed1dff
|
86eb218f6d75f97010e47e1bbb0cd143c01f4cd0
|
/DemoApp/tests.py
|
fb68bacda54f9292ddfc72d89f1fa7fc5f365c1e
|
[] |
no_license
|
https://github.com/SWE4103-Team1/UnitTestDemo
|
cf50ec04db4c09d7a6bf45ba13d892447e932782
|
943292d512d6e23a50e0885c63fb3a41f076c749
|
refs/heads/master
| 2023-08-27T17:37:38.256427 | 2021-10-31T04:47:41 | 2021-10-31T04:47:41 | 411,110,449 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.test import TestCase
# Unit test class inherits from django.test.TestCase
# which inherits from unittest.TestCase
class DemoAppUnitTests(TestCase):
def test_true_is_true(self):
isTrue = True
self.assertTrue(isTrue)
def test_false_is_false(self):
isFalse = False
self.assertFalse(isFalse)
|
UTF-8
|
Python
| false | false | 312 |
py
| 4 |
tests.py
| 2 | 0.772436 | 0.772436 | 0 | 12 | 25 | 52 |
henchhing-limbu/Daily-Coding-Problems
| 12,463,995,098,435 |
c83587a53381468f840ca5cdc3eabfa76591beee
|
8eaaf6d5b40fee8e37cf0b8cdc30f83545d66308
|
/problem2.py
|
004ef1ee27e149ce77f77ec226fb40a6d6d3e385
|
[] |
no_license
|
https://github.com/henchhing-limbu/Daily-Coding-Problems
|
1c858cc4255051e02af49904cee9ac852818f609
|
9553e71333102be7a78dc63cc62b2c6ab778927c
|
refs/heads/master
| 2020-07-06T15:40:14.990903 | 2019-09-11T04:32:44 | 2019-09-11T04:32:44 | 203,070,225 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Given an array of integers, return a new array such that each element at index i of the new array is the product of all the numbers in the original array execpt the one at i.
For example, if our input was [1, 2, 3, 4, 5], the expected output would be [120, 60, 40, 30, 24]. If our input was [3, 2, 1], the expected output would be [2, 3, 6].
Follow-up: what if you can't use divison?
"""
def product_array(nums):
left_to_right_prod = [nums[0]]
right_to_left_prod = [nums[-1]]
length = len(nums)
products = [0] * length
for i in range(1, length - 1):
left_to_right_prod.append(left_to_right_prod[i-1] * nums[i])
right_to_left_prod.append(right_to_left_prod[i-1] * nums[-(i+1)])
products[0] = right_to_left_prod[-1]
products[-1] = left_to_right_prod[-1]
for i in range(1, length - 1):
products[i] = left_to_right_prod[i-1] * right_to_left_prod[-(i+1)]
return products
print(product_array([1, 2, 3, 4, 5]))
print(product_array([3, 2, 1]))
print(product_array([1]))
print(product_array([5, 10]))
|
UTF-8
|
Python
| false | false | 1,014 |
py
| 14 |
problem2.py
| 14 | 0.663708 | 0.614398 | 0 | 25 | 39.56 | 174 |
pyfarm/pyfarm-master
| 13,709,535,658,655 |
7a22b777e540a24eb5e53b9f6a083ebddb04d253
|
a439511176625ea34aa6b19a9bd39926e2f0bcda
|
/tests/test_master/test_jobs_api.py
|
708569d914c52a20d6c48a4b2bd815674ac6e479
|
[
"BSD-3-Clause",
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
https://github.com/pyfarm/pyfarm-master
|
bb6983f198d814ce40e77f5b843806d9e8e9ae9e
|
ea04bbcb807eb669415c569417b4b1b68e75d29d
|
refs/heads/master
| 2021-06-04T11:55:05.458239 | 2017-12-22T18:13:23 | 2017-12-22T18:13:23 | 12,912,885 | 2 | 3 | null | false | 2016-02-04T13:41:50 | 2013-09-18T03:10:36 | 2015-05-08T10:28:14 | 2016-02-04T13:41:50 | 7,059 | 4 | 3 | 33 |
Python
| null | null |
# No shebang line, this module is meant to be imported
#
# Copyright 2013 Oliver Palmer
# Copyright 2014 Ambient Entertainment GmbH & Co. KG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
# test class must be loaded first
from pyfarm.master.testutil import BaseTestCase
BaseTestCase.build_environment()
from pyfarm.master.utility import dumps
from pyfarm.master.application import get_api_blueprint
from pyfarm.master.config import config
from pyfarm.master.entrypoints import load_api
from pyfarm.master.application import db
from pyfarm.models.user import User
from pyfarm.models.job import Job
jobtype_code = """from pyfarm.jobtypes.core.jobtype import JobType
class TestJobType(JobType):
def get_command(self):
return "/usr/bin/touch"
def get_arguments(self):
return [os.path.join(
self.assignment_data["job"]["data"]["path"],
"%04d" % self.assignment_data[\"tasks\"][0][\"frame\"])]
"""
class TestJobAPI(BaseTestCase):
def setup_app(self):
super(TestJobAPI, self).setup_app()
self.api = get_api_blueprint()
self.app.register_blueprint(self.api)
load_api(self.app, self.api)
def create_a_jobtype(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
return "TestJobType", jobtype_id
def create_a_job(self, jobtypename):
response1 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": jobtypename,
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response1)
job_id = response1.json["id"]
return "Test Job", job_id
def test_job_schema(self):
response = self.client.get("/api/v1/jobs/schema")
self.assert_ok(response)
schema = Job.to_schema()
schema["start"] = "NUMERIC(10,4)"
schema["end"] = "NUMERIC(10,4)"
del schema["jobtype_version_id"]
schema["jobtype"] = \
"VARCHAR(%s)" % config.get("job_type_max_name_length")
schema["jobtype_version"] = "INTEGER"
del schema["user_id"]
schema["user"] = "VARCHAR(%s)" % config.get("max_username_length")
del schema["job_queue_id"]
schema["jobqueue"] = "VARCHAR(%s)" % config.get("max_queue_name_length")
self.assertEqual(response.json, schema)
def test_job_post(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response2)
software_id = response2.json['id']
software_min_version_id = response2.json["versions"][0]["id"]
software_max_version_id = response2.json["versions"][1]["id"]
response3 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"}
]
}))
self.assert_created(response3)
self.assertIn("time_submitted", response3.json)
time_submitted = response3.json["time_submitted"]
id = response3.json["id"]
self.assertEqual(response3.json,
{
"id": id,
"jobqueue": None,
"time_finished": None,
"time_started": None,
"end": 2.0,
"time_submitted": time_submitted,
"jobtype_version": 1,
"jobtype": "TestJobType",
"start": 1.0,
"maximum_agents": None,
"minimum_agents": None,
"output_link": None,
"priority": 0,
"weight": 10,
"state": "queued",
"parents": [],
"hidden": False,
"ram_warning": None,
"title": "Test Job",
"tags": [],
"user": None,
"by": 1.0,
"data": {"foo": "bar"},
"ram_max": None,
"notes": "",
"notified_users": [],
"batch": 1,
"environ": None,
"requeue": 3,
"software_requirements": [
{
"min_version": "1.0",
"max_version": "1.1",
"max_version_id": software_max_version_id,
"software_id": 1,
"min_version_id": software_min_version_id,
"software": "foo"
}
],
"tag_requirements": [],
"ram": 32,
"cpus": 1,
"children": [],
"to_be_deleted": False,
"autodelete_time": None,
"job_group_id": None,
"jobgroup": None,
"completion_notify_sent": False,
"num_tiles": None
})
def test_job_post_with_notified_users(self):
jobtype_name, jobtype_id = self.create_a_jobtype()
# Cannot create users via REST-API yet
user1_id = User.create("testuser1", "password").id
db.session.flush()
response1 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": jobtype_name,
"data": {"foo": "bar"},
"notified_users": [
{"username": "testuser1"}
]
}))
self.assert_created(response1)
self.assertIn("time_submitted", response1.json)
time_submitted = response1.json["time_submitted"]
id = response1.json["id"]
self.assertEqual(response1.json,
{
"id": id,
"jobqueue": None,
"job_group_id": None,
"jobgroup": None,
"time_finished": None,
"time_started": None,
"end": 2.0,
"time_submitted": time_submitted,
"jobtype_version": 1,
"jobtype": "TestJobType",
"start": 1.0,
"maximum_agents": None,
"minimum_agents": None,
"priority": 0,
"weight": 10,
"state": "queued",
"parents": [],
"hidden": False,
"ram_warning": None,
"title": "Test Job",
"tags": [],
"user": None,
"by": 1.0,
"data": {"foo": "bar"},
"ram_max": None,
"notes": "",
"notified_users": [
{
"id": user1_id,
"username": "testuser1",
"email": None,
"on_deletion": False,
"on_failure": True,
"on_success": True
}
],
"output_link": None,
"batch": 1,
"environ": None,
"requeue": 3,
"software_requirements": [],
"tag_requirements": [],
"ram": 32,
"cpus": 1,
"children": [],
"to_be_deleted": False,
"autodelete_time": None,
"completion_notify_sent": False,
"num_tiles": None
})
def test_job_post_bad_requirements(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": {
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"}
}))
self.assert_bad_request(response3)
response4 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": [1]
}))
self.assert_bad_request(response4)
response5 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1",
"unknown_key": 1
}]
}))
self.assert_bad_request(response5)
response6 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": [{}]
}))
self.assert_bad_request(response6)
def test_job_post_unknown_software_version(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": [
{
"software": "unknown_software",
"min_version": "1.0",
"max_version": "1.1",
}]
}))
self.assert_not_found(response3)
response3 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": [
{
"software": "foo",
"min_version": "unknown_version",
"max_version": "1.1",
}]
}))
self.assert_not_found(response3)
response4 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "unknown_version",
}]
}))
self.assert_not_found(response4)
def test_job_post_no_type(self):
response1 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"data": {"foo": "bar"}
}))
self.assert_bad_request(response1)
def test_job_post_bad_type(self):
response1 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"jobtype": 1,
"title": "Test Job",
"data": {"foo": "bar"}
}))
self.assert_bad_request(response1)
def test_job_post_with_jobtype_version(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing (updated)",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"jobtype_version": 1,
"data": {"foo": "bar"},
}))
self.assert_created(response3)
time_submitted = response3.json["time_submitted"]
id = response3.json["id"]
self.assertEqual(response3.json,
{
"id": id,
"jobqueue": None,
"job_group_id": None,
"jobgroup": None,
"time_finished": None,
"time_started": None,
"end": 2.0,
"time_submitted": time_submitted,
"jobtype_version": 1,
"jobtype": "TestJobType",
"start": 1.0,
"maximum_agents": None,
"minimum_agents": None,
"priority": 0,
"weight": 10,
"state": "queued",
"parents": [],
"hidden": False,
"ram_warning": None,
"title": "Test Job",
"tags": [],
"user": None,
"by": 1.0,
"data": {"foo": "bar"},
"ram_max": None,
"notes": "",
"notified_users": [],
"output_link": None,
"batch": 1,
"environ": None,
"requeue": 3,
"software_requirements": [],
"tag_requirements": [],
"ram": 32,
"cpus": 1,
"children": [],
"to_be_deleted": False,
"autodelete_time": None,
"completion_notify_sent": False,
"num_tiles": None
})
def test_job_post_unknown_type(self):
response1 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "unknown jobtype",
"data": {"foo": "bar"}
}))
self.assert_not_found(response1)
def test_jobs_list(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json["id"]
response3 = self.client.get("/api/v1/jobs/")
self.assert_ok(response3)
self.assertEqual(response3.json,
[
{
"title": "Test Job",
"state": "queued",
"id": id
},
])
def test_job_get(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json["id"]
time_submitted = response2.json["time_submitted"]
response3 = self.client.get("/api/v1/jobs/Test%20Job")
self.assert_ok(response3)
self.assertEqual(response3.json,
{
"jobqueue": None,
"ram_warning": None,
"title": "Test Job",
"state": "queued",
"jobtype_version": 1,
"jobtype": "TestJobType",
"maximum_agents": None,
"minimum_agents": None,
"weight": 10,
"environ": None,
"user": None,
"priority": 0,
"time_finished": None,
"start": 1.0,
"id": id,
"job_group_id": None,
"jobgroup": None,
"notes": "",
"notified_users": [],
"output_link": None,
"ram": 32,
"tags": [],
"hidden": False,
"data": {"foo": "bar"},
"software_requirements": [],
"tag_requirements": [],
"batch": 1,
"time_started": None,
"time_submitted": time_submitted,
"requeue": 3,
"end": 2.0,
"parents": [],
"cpus": 1,
"ram_max": None,
"children": [],
"by": 1.0,
"to_be_deleted": False,
"autodelete_time": None,
"completion_notify_sent": False,
"num_tiles": None
})
response4 = self.client.get("/api/v1/jobs/%s" % id)
self.assert_ok(response4)
self.assertEqual(response4.json,
{
"jobqueue": None,
"ram_warning": None,
"title": "Test Job",
"state": "queued",
"jobtype_version": 1,
"jobtype": "TestJobType",
"maximum_agents": None,
"minimum_agents": None,
"weight": 10,
"environ": None,
"user": None,
"priority": 0,
"time_finished": None,
"start": 1.0,
"id": id,
"job_group_id": None,
"jobgroup": None,
"notes": "",
"notified_users": [],
"output_link": None,
"ram": 32,
"tags": [],
"hidden": False,
"data": {"foo": "bar"},
"software_requirements": [],
"tag_requirements": [],
"batch": 1,
"time_started": None,
"time_submitted": time_submitted,
"requeue": 3,
"end": 2.0,
"parents": [],
"cpus": 1,
"ram_max": None,
"children": [],
"by": 1.0,
"to_be_deleted": False,
"autodelete_time": None,
"completion_notify_sent": False,
"num_tiles": None
})
def test_job_get_unknown(self):
response1 = self.client.get("/api/v1/jobs/Unknown%20Job")
self.assert_not_found(response1)
def test_job_update(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json["id"]
time_submitted = response2.json["time_submitted"]
response3 = self.client.post(
"/api/v1/jobs/Test%20Job",
content_type="application/json",
data=dumps({
"start": 2.0,
"end": 3.0,
"ram": 64
}))
self.assert_ok(response3)
self.assertEqual(response3.json,
{
"jobqueue": None,
"ram_warning": None,
"title": "Test Job",
"state": "queued",
"jobtype_version": 1,
"jobtype": "TestJobType",
"environ": None,
"user": None,
"maximum_agents": None,
"minimum_agents": None,
"output_link": None,
"priority": 0,
"weight": 10,
"time_finished": None,
"start": 2.0,
"id": id,
"job_group_id": None,
"jobgroup": None,
"notes": "",
"ram": 64,
"tags": [],
"hidden": False,
"data": {"foo": "bar"},
"software_requirements": [],
"tag_requirements": [],
"batch": 1,
"time_started": None,
"time_submitted": time_submitted,
"requeue": 3,
"end": 3.0,
"parents": [],
"cpus": 1,
"ram_max": None,
"children": [],
"by": 1.0,
"to_be_deleted": False,
"autodelete_time": None,
"completion_notify_sent": False,
"num_tiles": None
})
response4 = self.client.post(
"/api/v1/jobs/%s" % id,
content_type="application/json",
data=dumps({
"start": 2.0,
"end": 4.0,
}))
self.assert_ok(response4)
def test_job_update_unknown(self):
response1 = self.client.post(
"/api/v1/jobs/Unknown%20Job",
content_type="application/json",
data=dumps({
"start": 2.0
}))
self.assert_not_found(response1)
def test_job_update_bad_start_end(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json["id"]
time_submitted = response2.json["time_submitted"]
response3 = self.client.post(
"/api/v1/jobs/Test%20Job",
content_type="application/json",
data=dumps({
"start": 3.0,
"end": 2.0,
}))
self.assert_bad_request(response3)
def test_job_update_bad_disallowed_columns(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json["id"]
time_submitted = response2.json["time_submitted"]
response3 = self.client.post(
"/api/v1/jobs/Test%20Job",
content_type="application/json",
data=dumps({
"time_started": "2014-03-06T15:40:58.335259"
}))
self.assert_bad_request(response3)
response4 = self.client.post(
"/api/v1/jobs/Test%20Job",
content_type="application/json",
data=dumps({
"time_finished": "2014-03-06T15:40:58.335259"
}))
self.assert_bad_request(response4)
response5 = self.client.post(
"/api/v1/jobs/Test%20Job",
content_type="application/json",
data=dumps({
"time_submitted": "2014-03-06T15:40:58.335259"
}))
self.assert_bad_request(response5)
response6 = self.client.post(
"/api/v1/jobs/Test%20Job",
content_type="application/json",
data=dumps({
"jobtype_version_id": 1
}))
self.assert_bad_request(response6)
def test_job_update_unknown_columns(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json["id"]
time_submitted = response2.json["time_submitted"]
response3 = self.client.post(
"/api/v1/jobs/Test%20Job",
content_type="application/json",
data=dumps({
"unknown_column": 1
}))
self.assert_bad_request(response3)
def test_job_update_bad_requiremens(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json["id"]
time_submitted = response2.json["time_submitted"]
response3 = self.client.post(
"/api/v1/jobs/Test%20Job",
content_type="application/json",
data=dumps({
"software_requirements": 1
}))
self.assert_bad_request(response3)
def test_job_delete(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json["id"]
time_submitted = response2.json["time_submitted"]
response3 = self.client.delete("/api/v1/jobs/%s" % id)
self.assert_no_content(response3)
response4 = self.client.get("/api/v1/jobs/%s" % id)
if response4.status_code == 200:
self.assertTrue(response4.json["to_be_deleted"])
else:
self.assert_not_found(response4)
def test_job_get_tasks(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json["id"]
time_submitted = response2.json["time_submitted"]
response3 = self.client.get("/api/v1/jobs/Test%20Job/tasks/")
self.assert_ok(response3)
self.assertEqual(len(response3.json), 2)
task1_id = response3.json[0]["id"]
task1_submitted = response3.json[0]["time_submitted"]
task2_id = response3.json[1]["id"]
task2_submitted = response3.json[1]["time_submitted"]
self.assertEqual(response3.json,
[
{
"hidden": False,
"id": task1_id,
"attempts": 0,
"failures": 0,
"priority": 0,
"progress": 0.0,
"time_started": None,
"time_submitted": task1_submitted,
"frame": 1.0,
"time_finished": None,
"job_id": id,
"state": "queued",
"agent_id": None,
"last_error": None,
"sent_to_agent": False,
"tile": None
},
{
"hidden": False,
"id": task2_id,
"attempts": 0,
"failures": 0,
"priority": 0,
"progress": 0.0,
"time_started": None,
"time_submitted": task2_submitted,
"frame": 2.0,
"time_finished": None,
"job_id": id,
"state": "queued",
"agent_id": None,
"last_error": None,
"sent_to_agent": False,
"tile": None
}
])
def test_job_get_tasks_by_id(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json["id"]
time_submitted = response2.json["time_submitted"]
response3 = self.client.get("/api/v1/jobs/%s/tasks/" % id)
self.assert_ok(response3)
self.assertEqual(len(response3.json), 2)
task1_id = response3.json[0]["id"]
task1_submitted = response3.json[0]["time_submitted"]
task2_id = response3.json[1]["id"]
task2_submitted = response3.json[1]["time_submitted"]
self.assertEqual(response3.json,
[
{
"hidden": False,
"id": task1_id,
"attempts": 0,
"failures": 0,
"priority": 0,
"progress": 0.0,
"time_started": None,
"time_submitted": task1_submitted,
"frame": 1.0,
"time_finished": None,
"job_id": id,
"state": "queued",
"agent_id": None,
"last_error": None,
"sent_to_agent": False,
"tile": None
},
{
"hidden": False,
"id": task2_id,
"attempts": 0,
"failures": 0,
"priority": 0,
"progress": 0.0,
"time_started": None,
"time_submitted": task2_submitted,
"frame": 2.0,
"time_finished": None,
"job_id": id,
"state": "queued",
"agent_id": None,
"last_error": None,
"sent_to_agent": False,
"tile": None
}
])
def test_job_get_tasks_unknown_job(self):
response1 = self.client.get("/api/v1/jobs/Unknown%20Job/tasks/")
self.assert_not_found(response1)
def test_job_update_task(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json["id"]
response3 = self.client.get("/api/v1/jobs/Test%20Job/tasks/")
self.assert_ok(response3)
self.assertEqual(len(response3.json), 2)
task1_id = response3.json[0]["id"]
task1_submitted = response3.json[0]["time_submitted"]
task2_id = response3.json[1]["id"]
task2_submitted = response3.json[1]["time_submitted"]
response4 = self.client.post(
"/api/v1/jobs/%s/tasks/%s" % (id, task1_id),
content_type="application/json",
data=dumps({
"priority": 1,
"state": "done"
}))
self.assert_ok(response4)
task1_finished = response4.json["time_finished"]
self.assertEqual(response4.json,
{
"agent": None,
"hidden": False,
"id": task1_id,
"attempts": 0,
"failures": 0,
"priority": 1,
"progress": 1.0,
"time_started": None,
"time_submitted": task1_submitted,
"frame": 1.0,
"time_finished": task1_finished,
"job": {"id": id, "title": "Test Job"},
"job_id": id,
"state": "done",
"agent_id": None,
"last_error": None,
"sent_to_agent": False,
"tile": None
})
response5 = self.client.post(
"/api/v1/jobs/Test%%20Job/tasks/%s" % task2_id,
content_type="application/json",
data=dumps({"state": "done"}))
self.assert_ok(response5)
response6 = self.client.get("/api/v1/jobs/Test%20Job")
self.assert_ok(response6)
self.assertEqual(response6.json["state"], "done")
def test_job_update_unknown_task(self):
response1 = self.client.post(
"/api/v1/jobs/Unknown%20Job/tasks/5",
content_type="application/json",
data=dumps({"state": "done"}))
self.assert_not_found(response1)
def test_job_update_task_disallowed_columns(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json["id"]
response3 = self.client.get("/api/v1/jobs/Test%20Job/tasks/")
self.assert_ok(response3)
self.assertEqual(len(response3.json), 2)
task1_id = response3.json[0]["id"]
task1_submitted = response3.json[0]["time_submitted"]
task2_id = response3.json[1]["id"]
task2_submitted = response3.json[1]["time_submitted"]
response4 = self.client.post(
"/api/v1/jobs/%s/tasks/%s" % (id, task1_id),
content_type="application/json",
data=dumps({"time_started": "2014-03-06T15:40:58.338904"}))
self.assert_bad_request(response4)
response5 = self.client.post(
"/api/v1/jobs/%s/tasks/%s" % (id, task1_id),
content_type="application/json",
data=dumps({"time_finished": "2014-03-06T15:40:58.338904"}))
self.assert_bad_request(response5)
response6 = self.client.post(
"/api/v1/jobs/%s/tasks/%s" % (id, task1_id),
content_type="application/json",
data=dumps({"time_submitted": "2014-03-06T15:40:58.338904"}))
self.assert_bad_request(response6)
response7 = self.client.post(
"/api/v1/jobs/%s/tasks/%s" % (id, task1_id),
content_type="application/json",
data=dumps({"job_id": 1}))
self.assert_bad_request(response7)
response8 = self.client.post(
"/api/v1/jobs/%s/tasks/%s" % (id, task1_id),
content_type="application/json",
data=dumps({"frame": 1.0}))
self.assert_bad_request(response8)
def test_job_get_single_task(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": jobtype_code
}))
self.assert_created(response1)
jobtype_id = response1.json['id']
response2 = self.client.post(
"/api/v1/jobs/",
content_type="application/json",
data=dumps({
"start": 1.0,
"end": 2.0,
"title": "Test Job",
"jobtype": "TestJobType",
"data": {"foo": "bar"},
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json["id"]
response3 = self.client.get("/api/v1/jobs/Test%20Job/tasks/")
self.assert_ok(response3)
self.assertEqual(len(response3.json), 2)
task1_id = response3.json[0]["id"]
task1_submitted = response3.json[0]["time_submitted"]
task2_id = response3.json[1]["id"]
task2_submitted = response3.json[1]["time_submitted"]
response4 = self.client.get("/api/v1/jobs/Test%%20Job/tasks/%s" %
task1_id)
self.assert_ok(response4)
self.assertEqual(response4.json,
{
"agent": None,
"hidden": False,
"id": task1_id,
"attempts": 0,
"failures": 0,
"priority": 0,
"progress": 0.0,
"time_started": None,
"time_submitted": task1_submitted,
"frame": 1.0,
"time_finished": None,
"job": {"id": id, "title": "Test Job"},
"job_id": id,
"state": "queued",
"agent_id": None,
"last_error": None,
"sent_to_agent": False,
"tile": None
})
response5 = self.client.get("/api/v1/jobs/%s/tasks/%s" %
(id, task2_id))
self.assert_ok(response5)
self.assertEqual(response5.json,
{
"agent": None,
"hidden": False,
"id": task2_id,
"attempts": 0,
"failures": 0,
"priority": 0,
"progress": 0.0,
"time_started": None,
"time_submitted": task2_submitted,
"frame": 2.0,
"time_finished": None,
"job": {"id": id, "title": "Test Job"},
"job_id": id,
"state": "queued",
"agent_id": None,
"last_error": None,
"sent_to_agent": False,
"tile": None
})
def test_job_get_unknown_single_task(self):
response1 = self.client.get("/api/v1/jobs/Unknown%20Job/tasks/1")
self.assert_not_found(response1)
def test_job_notified_user_add(self):
jobtype_name, jobtype_id = self.create_a_jobtype()
job_name, job_id = self.create_a_job(jobtype_name)
# Cannot create users via REST-API yet
user1_id = User.create("testuser1", "password").id
user2_id = User.create("testuser2", "password").id
db.session.flush()
response1 = self.client.post(
"/api/v1/jobs/%s/notified_users/" % job_name,
content_type="application/json",
data=dumps({
"username": "testuser1",
"on_success": False,
"on_failure": False,
"on_deletion": True
}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/jobs/%s/notified_users/" % job_id,
content_type="application/json",
data=dumps({"username": "testuser2"}))
self.assert_created(response2)
response3 = self.client.get("/api/v1/jobs/%s/notified_users/" % job_name)
self.assert_ok(response3)
self.assertEqual(response3.json,
[
{
"id": user1_id,
"username": "testuser1",
"email": None,
"on_deletion": True,
"on_success": False,
"on_failure": False
},
{
"id": user2_id,
"username": "testuser2",
"email": None,
"on_deletion": False,
"on_success": True,
"on_failure": True
}
])
def test_job_notified_user_add_unknown_user(self):
jobtype_name, jobtype_id = self.create_a_jobtype()
job_name, job_id = self.create_a_job(jobtype_name)
response1 = self.client.post(
"/api/v1/jobs/%s/notified_users/" % job_name,
content_type="application/json",
data=dumps({"username": "unknownuser"}))
self.assert_not_found(response1)
def test_job_notified_user_add_unknown_columns(self):
jobtype_name, jobtype_id = self.create_a_jobtype()
job_name, job_id = self.create_a_job(jobtype_name)
# Cannot create users via REST-API yet
user1_id = User.create("testuser1", "password").id
db.session.flush()
response1 = self.client.post(
"/api/v1/jobs/%s/notified_users/" % job_name,
content_type="application/json",
data=dumps({
"username": "testuser1",
"bla": "blubb"}))
self.assert_bad_request(response1)
def test_job_notified_user_add_no_username(self):
jobtype_name, jobtype_id = self.create_a_jobtype()
job_name, job_id = self.create_a_job(jobtype_name)
response1 = self.client.post(
"/api/v1/jobs/%s/notified_users/" % job_name,
content_type="application/json",
data=dumps({}))
self.assert_bad_request(response1)
def test_job_notified_user_add_unknown_job(self):
response1 = self.client.post(
"/api/v1/jobs/Unknown%20Job/notified_users/",
content_type="application/json",
data=dumps({"username": "unknownuser"}))
self.assert_not_found(response1)
def test_job_notified_user_list_unknown_job(self):
response1 = self.client.get(
"/api/v1/jobs/Unknown%20Job/notified_users/")
self.assert_not_found(response1)
def test_job_notified_user_list_by_id(self):
jobtype_name, jobtype_id = self.create_a_jobtype()
job_name, job_id = self.create_a_job(jobtype_name)
# Cannot create users via REST-API yet
user1_id = User.create("testuser1", "password").id
db.session.flush()
response1 = self.client.post(
"/api/v1/jobs/%s/notified_users/" % job_name,
content_type="application/json",
data=dumps({"username": "testuser1"}))
self.assert_created(response1)
response2 = self.client.get(
"/api/v1/jobs/%s/notified_users/" % job_id)
self.assert_ok(response2)
self.assertEqual(response2.json,
[
{
"id": user1_id,
"username": "testuser1",
"email": None,
"on_deletion": False,
"on_success": True,
"on_failure": True
}
])
def test_job_notified_user_list_unknown_job(self):
response1 = self.client.get(
"/api/v1/jobs/Unknown%20Job/notified_users/")
self.assert_not_found(response1)
def test_job_notified_user_delete(self):
jobtype_name, jobtype_id = self.create_a_jobtype()
job_name, job_id = self.create_a_job(jobtype_name)
# Cannot create users via REST-API yet
user1_id = User.create("testuser1", "password").id
user2_id = User.create("testuser2", "password").id
db.session.flush()
response1 = self.client.post(
"/api/v1/jobs/%s/notified_users/" % job_name,
content_type="application/json",
data=dumps({"username": "testuser1"}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/jobs/%s/notified_users/" % job_id,
content_type="application/json",
data=dumps({"username": "testuser2"}))
self.assert_created(response2)
response3 = self.client.delete(
"/api/v1/jobs/%s/notified_users/testuser1" % job_name)
self.assert_no_content(response3)
response4 = self.client.get("/api/v1/jobs/%s/notified_users/" % job_name)
self.assert_ok(response4)
self.assertEqual(response4.json,
[
{
"id": user2_id,
"username": "testuser2",
"email": None,
"on_success": True,
"on_failure": True,
"on_deletion": False
}
])
response5 = self.client.delete(
"/api/v1/jobs/%s/notified_users/testuser2" % job_id)
self.assert_no_content(response5)
response6 = self.client.get("/api/v1/jobs/%s/notified_users/" % job_name)
self.assert_ok(response6)
self.assertEqual(response6.json, [])
def test_task_failed_on_agent_add(self):
jobtype_name, jobtype_id = self.create_a_jobtype()
job_name, job_id = self.create_a_job(jobtype_name)
tasks_response = self.client.get("/api/v1/jobs/%s/tasks/" % job_id)
self.assert_ok(tasks_response)
task_id = tasks_response.json[0]["id"]
agent_id = uuid.uuid4()
agent_create_response = self.client.post(
"/api/v1/agents/",
content_type="application/json",
data=dumps({
"id": agent_id,
"cpus": 16,
"hostname": "testagent1",
"remote_ip": "10.0.200.1",
"port": 64994,
"ram": 2048,
"free_ram": 2048,
"state": "online"}))
self.assert_created(agent_create_response)
post_failure_response = self.client.post(
"/api/v1/jobs/%s/tasks/%s/failed_on_agents/" % (job_id, task_id),
content_type="application/json",
data=dumps({"id": agent_id}))
self.assert_created(post_failure_response)
failed_on_agents_response = self.client.get(
"/api/v1/jobs/%s/tasks/%s/failed_on_agents/" % (job_id, task_id))
self.assert_ok(failed_on_agents_response)
self.assertEqual(failed_on_agents_response.json,
[
{
"id" : str(agent_id),
"hostname": "testagent1"
}
])
def test_task_failed_on_agent_delete(self):
jobtype_name, jobtype_id = self.create_a_jobtype()
job_name, job_id = self.create_a_job(jobtype_name)
tasks_response = self.client.get("/api/v1/jobs/%s/tasks/" % job_id)
self.assert_ok(tasks_response)
task_id = tasks_response.json[0]["id"]
agent_id = uuid.uuid4()
agent_create_response = self.client.post(
"/api/v1/agents/",
content_type="application/json",
data=dumps({
"id": agent_id,
"cpus": 16,
"hostname": "testagent1",
"remote_ip": "10.0.200.1",
"port": 64994,
"ram": 2048,
"free_ram": 2048,
"state": "online"}))
self.assert_created(agent_create_response)
post_failure_response = self.client.post(
"/api/v1/jobs/%s/tasks/%s/failed_on_agents/" % (job_id, task_id),
content_type="application/json",
data=dumps({"id": agent_id}))
self.assert_created(post_failure_response)
delete_response = self.client.delete(
"/api/v1/jobs/%s/tasks/%s/failed_on_agents/%s" %
(job_id, task_id, str(agent_id)))
self.assert_no_content(delete_response)
failed_on_agents_response = self.client.get(
"/api/v1/jobs/%s/tasks/%s/failed_on_agents/" % (job_id, task_id))
self.assert_ok(failed_on_agents_response)
self.assertEqual(failed_on_agents_response.json, [])
|
UTF-8
|
Python
| false | false | 65,871 |
py
| 157 |
test_jobs_api.py
| 80 | 0.402286 | 0.385784 | 0 | 1,676 | 38.302506 | 81 |
AmirTavakol/ICT-IN-TS
| 16,647,293,281,088 |
92dca0f4d766ff59248e723321fc22614e3dab5f
|
1c0c6c7d5cbbc183c057ea3ece331610505f0127
|
/Step3_1.py
|
c1c325f8f4fe84ea0fdbded54a8f8decbd6b3caa
|
[] |
no_license
|
https://github.com/AmirTavakol/ICT-IN-TS
|
c436744602f2cb43e55f1202db9379d63c3f9693
|
70c75e74567787e96655fbfff4145356875f3ce1
|
refs/heads/master
| 2023-02-02T19:11:22.251292 | 2020-12-18T19:05:31 | 2020-12-18T19:05:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[56]:
import pymongo as pm #import MongoClient only
import pprint
import datetime as dt
import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
#3 lines of code to get the database ready
client = pm.MongoClient('bigdatadb.polito.it', ssl=True, authSource = 'carsharing', tlsAllowInvalidCertificates=True)
db = client['carsharing'] #Choose the DB to use
db.authenticate('ictts', 'Ictts16!')#, mechanism='MONGODB-CR') #authentication
# getting the collection
# Car2go
c2g_perm_book = db['PermanentBookings']
# Enjoy
enj_perm_book = db['enjoy_PermanentBookings']
def rentals (city, start, end):
pipeline = [
{
'$match': { 'city': city,
'init_time': {'$gte': start, '$lte': end}
}
},
{
'$project' : { '_id':0,
'city': 1,
'hourOfDay': {'$floor':{'$divide':['$init_time', 3600]}},
'duration':{'$ceil': {'$divide': [{'$subtract': ['$final_time', '$init_time']}, 60]} },
'moved': {'$ne': [
{'$arrayElemAt': ['$origin_destination.coordinates', 0]},
{'$arrayElemAt': ['$origin_destination.coordinates', 1]}]
}
}
},
# Filter Block : it has to be moved to be booked and it has to last more than 3 min and less then 3 hours
{
'$match': {"$and": [{'moved': True}, {'duration': {'$gte': 3, '$lte': 180}}]}
},
{
'$group': { '_id': '$hourOfDay',
'count':{'$sum':1}
}
},
{
'$sort': {'_id': 1}
}
]
return pipeline
# the projection of the time is done in unix time since pandas
# doesn't work well with dates as indexes (its easier)
def plot_rentals(city, base_date):
#add timezonez information
timezones = {'Torino': +1, 'Wien': +1, 'Vancouver':-8}
tz = dt.timezone(dt.timedelta(hours=timezones[city]))
startDate = base_date.replace(tzinfo=tz)
monthWnd = dt.timedelta(days = 30)
endDate=startDate+monthWnd
startUnixTime = dt.datetime.timestamp(startDate)
endUnixTime = dt.datetime.timestamp(endDate)
#get the rentals
books_pipe = rentals(city, startUnixTime, endUnixTime)
if city == 'Torino':
daily_bookings=enj_perm_book.aggregate(books_pipe)
else:
daily_bookings=c2g_perm_book.aggregate(books_pipe)
book_df = pd.DataFrame (list(daily_bookings)) # pandas dataframes are easier to use for regressions
book_df['date'] = pd.to_datetime ( book_df['_id'] , unit = 'h') # from unix to datetime
book_df.drop('_id', axis=1, inplace=True)
book_df.rename(columns={'count':'rentals'}, inplace=True) # for clarity
book_df.to_csv ( "rentals_" + city + ".csv" ) # save the dataframe for later computations with arima (shit happens)
plt.figure (figsize =(15 , 5))
plt.grid ()
plt.plot( book_df['date'] , book_df ['rentals'] )
plt.title ( city + ': number of rentals per hour')
# x axis dates formatting
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=3)) # interval of days in ticks for x axis
plt.gcf().autofmt_xdate()
plt.xlabel ('hour')
plt.ylabel ('number of rentals')
plt.savefig(city + 'rentals.png')
plt.show()
initial_date = dt.datetime(2017,10,1,0,0,0)
city='Torino'
plot_rentals(city, initial_date)
city='Wien'
plot_rentals(city, initial_date)
city='Vancouver'
plot_rentals(city, initial_date)
# In[ ]:
|
UTF-8
|
Python
| false | false | 3,755 |
py
| 6 |
Step3_1.py
| 6 | 0.585619 | 0.573103 | 0 | 122 | 29.745902 | 119 |
yougth/IRDM2017
| 6,442,450,991,476 |
00c84d9aefa1947abe40f29bbd28b5d728959def
|
b6039b58907e5b489bc7fdd89ac32cd8af2bc257
|
/python/RunMe.py
|
165b5a4f79017f625d8d371007b42aca1a2b9d25
|
[] |
no_license
|
https://github.com/yougth/IRDM2017
|
a227d7a954ccc52280771ec1d633421aab23bea2
|
875bb03e7cecc2269b06115b2730644bec0b5e19
|
refs/heads/master
| 2020-03-28T13:50:36.764409 | 2017-04-20T11:55:05 | 2017-04-20T11:55:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pandas as pd
import numpy as np
from HomeDepotCSVReader import HomeDepotReader
from FeatureEngineering import HomeDepotFeature
from HomeDepotCSVWriter import HomeDepotCSVWriter
from XGBoostRanker import XGBoostRanker
from OrdinalRegressionRanker import OrdinalRegressionRanker
import LogisticRegressionRanker
from DataPreprocessing import DataPreprocessing
import Feature_Doc2Vec
import FacMachineRanker
from Utilities import Utility
def getFeature(train_query_df, product_df, attribute_df, test_query_df, features):
print("#### Running: RunMe.getFeature() ####")
feature_df = HomeDepotFeature().getFeature(train_query_df, product_df, attribute_df, test_query_df,features=features)
# Write all feature to a CSV. Next time can just read from here
dumpFeature2CSV(feature_df, "../data/features_full.csv")
return feature_df
def dumpFeature2CSV(dataframe, fileName):
print("#### Running: RunMe.dumpFeature2CSV() ####")
HomeDepotCSVWriter().dumpCSV(dataframe, fileName)
def dumpFeature2RanklibCSV(dataframe, fileName):
print("#### Running: RunMe.dumpFeature2RanklibCSV() ####")
HomeDepotCSVWriter().write2RankLibCSV(dataframe, fileName)
def runXGBoostRanker():
print("#### Running: RunMe.runXGBoostRanker() ####")
reader = HomeDepotReader()
feature_df = reader.getBasicDataFrame("../data/features_doc2vec_sense2vec_20170416.csv")
feature_train_df = feature_df[:74067]
feature_test_df = feature_df[74067:]
feature_test_df.pop('relevance')
soln_filename = '../data/solution.csv'
soln_df = pd.read_csv(soln_filename, delimiter=',', low_memory=False, encoding="ISO-8859-1")
dp = DataPreprocessing()
test_private_df = dp.getGoldTestSet(feature_test_df, soln_df,
testsetoption='Private')
test_public_df = dp.getGoldTestSet(feature_test_df, soln_df,
testsetoption='Public')
xgb = XGBoostRanker(feature_train_df)
xgb.train_Regressor(feature_train_df)
# xgb.gridSearch_Regressor(feature_train_df)
# result_df = xgb.test_Model(test_public_df)
result_df = xgb.test_Model(test_private_df)
# # Compute NDCG Score
# gold_df = pd.DataFrame()
# gold_df['search_term'] = test_private_df['search_term']
# gold_df['product_uid'] = test_private_df['product_uid']
# gold_df['relevance_int'] = test_private_df['relevance']
# ndcg = NDCG_Eval()
# ndcg.computeAvgNDCG(gold_df, result_df)
# # Dump the prediction to csv
# result_df.pop('product_uid')
# result_df.pop('search_term')
# result_df.pop('relevance_int')
# print(result_df.columns)
# dumpFeature2CSV(result_df, "../data/xgboost_private_20170417.csv")
def runOrdinalRegressionRankerLAD(train_df, test_df):
print("#### Running: OrdinalRegression LAD ####")
# dp=DataPreprocessing()
# trainDF,validateDF=dp.generateValidationSet(train_df)
orRanker = OrdinalRegressionRanker('lad')
orRanker.train(train_df, None)
print("#### Completed: OrdinalRegression LAD ####")
def runOrdinalRegressionRankerOrdRidgeGridSearch(train_df, test_df):
print("#### Running GridSearch: OrdinalRegression ordridge ####")
# dp=DataPreprocessing()
# trainDF,validateDF=dp.generateValidationSet(train_df)
orRanker = OrdinalRegressionRanker('ordridge')
orRanker.gridSearch(train_df, None)
print("#### Completed GridSearch: OrdinalRegression ordridge ####")
def runOrdinalRegressionRankerOrdRidge(train_df, test_df):
print("#### Running: OrdinalRegression ordridge training ####")
# dp=DataPreprocessing()
# trainDF,validateDF=dp.generateValidationSet(train_df)
orRanker = OrdinalRegressionRanker('ordridge')
orRanker.train(train_df, None)
print("#### Completed: OrdinalRegression ordridge training ####")
return orRanker
def runFacMachineRanker(train_df, test_df):
print("#### Running: Factorisation Machine ####")
fmRanker = FacMachineRanker.FacMachineRanker()
fmRanker.train(train_df, None)
print("#### Completed: Fac Machine ####")
def runOrdinalRegressionRankerLogit(train_df, test_df):
print("#### Running: OrdinalRegression LOGIT ####")
# dp=DataPreprocessing()
# trainDF,validateDF=dp.generateValidationSet(train_df)
orRanker = OrdinalRegressionRanker('logit')
orRanker.train(train_df, None)
print("#### Completed: OrdinalRegression LOGIT ####")
def runOrdinalRegressionRankerLogat(train_df, test_df):
print("#### Running: OrdinalRegression LOGAT ####")
# dp=DataPreprocessing()
# trainDF,validateDF=dp.generateValidationSet(train_df)
orRanker = OrdinalRegressionRanker('logat')
orRanker.train(train_df, None)
print("#### Completed: OrdinalRegression LOGAT ####")
def runLogisticRegressionRanker(train_df, test_df):
print("#### Running: Logistic Regression ####")
# dp=DataPreprocessing()
# trainDF,validateDF=dp.generateValidationSet(train_df)
lrRanker = LogisticRegressionRanker.LogisticRegressionRanker()
lrRanker.train(train_df, None)
print("#### Completed: Logistic Regression ####")
# lrRanker.train(trainDF, validateDF)
if __name__ == "__main__":
train_filename = '../../data/train.csv'
test_filename = '../../data/test.csv'
attribute_filename = '../../data/attributes.csv'
description_filename = '../../data/product_descriptions.csv'
reader = HomeDepotReader()
train_query_df, product_df, attribute_df, test_query_df = reader.getQueryProductAttributeDataFrame(train_filename,
test_filename,
attribute_filename,
description_filename)
print("train_query_df:",list(train_query_df))
print("product_df:", list(product_df))
print("attribute_df:", list(attribute_df))
print("test_query_df:", list(test_query_df))
desiredFeatures="brand,attribute,spelling,nonascii,stopwords,colorExist,brandExist,wmdistance,stemming,word2vec,Word2VecQueryExpansion,tfidf,tfidf_expandedquery,doc2vec,doc2vec_expandedquery,bm25,bm25expandedquery,bm25description,bm25title,bm25brand,doclength,pmi"
print("Starting Feature Engineering")
# Mega combine all and generate feature for train and test all at one go.
all_df = pd.concat((train_query_df, test_query_df))
feature_df = getFeature(all_df, product_df, attribute_df, test_query_df, features=desiredFeatures)
# Run personal models from this point onward
# runOrdinalRegressionRanker(train_query_df, test_query_df)
# runXGBoostRanker(train_query_df, test_query_df)
|
UTF-8
|
Python
| false | false | 6,682 |
py
| 61 |
RunMe.py
| 22 | 0.693655 | 0.685424 | 0 | 158 | 41.291139 | 268 |
MarcelRaschke/netbox
| 9,775,345,608,548 |
6f78fca1a9a7f207c018916f6e3cc2efce190691
|
3d1aacb0ce641a1d96cb4a4b1363b0d03bc3f87c
|
/netbox/users/migrations/0003_token_permissions.py
|
a8a1f2a6e978ed00d6bd5da245405609a84d5d9b
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/MarcelRaschke/netbox
|
242e30909c8cdcc6cbfb1e5beb7fc29752d5025e
|
8206e46991498616bd9fdc5f40b57e66067b674a
|
refs/heads/develop
| 2023-09-05T18:59:32.540609 | 2022-08-12T02:26:58 | 2022-08-12T02:26:58 | 160,838,955 | 1 | 1 |
Apache-2.0
| true | 2023-04-30T00:40:43 | 2018-12-07T15:08:25 | 2023-04-15T12:32:00 | 2023-04-30T00:40:42 | 8,689 | 1 | 1 | 33 |
Python
| false | false |
# Generated by Django 2.0.8 on 2018-10-05 14:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_api_tokens_squashed_0002_unicode_literals'),
]
operations = [
migrations.AlterModelOptions(
name='token',
options={},
),
]
|
UTF-8
|
Python
| false | false | 345 |
py
| 235 |
0003_token_permissions.py
| 136 | 0.594203 | 0.527536 | 0 | 17 | 19.294118 | 68 |
GustavoGB/APS_LOGICA
| 12,549,894,466,787 |
88d441a5e9dfeeaf33b78a056087c55d4e7455aa
|
e1c284c6e4605e1f33b057e7fc437c68cf4e1dc9
|
/APS_FINAL/preprocess.py
|
0585bb019379627258e0b896f237c1bcf9fd6d4f
|
[] |
no_license
|
https://github.com/GustavoGB/APS_LOGICA
|
bbf77a718ca72203c32d3602b5af347722b27c97
|
243b56f7018b7c46fc56a64ae3b2e86b30ff83d8
|
refs/heads/master
| 2022-11-06T20:33:04.641363 | 2020-06-29T22:24:15 | 2020-06-29T22:24:15 | 248,819,174 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import re
from main import *
class PrePro:
@staticmethod
def filter(codigo):
codigo_filtrado = re.sub(re.compile("/\*.*?\*/", re.DOTALL), "",codigo)
codigo_filtrado = re.sub("\n", "", codigo_filtrado)
return codigo_filtrado
|
UTF-8
|
Python
| false | false | 272 |
py
| 7 |
preprocess.py
| 5 | 0.606618 | 0.606618 | 0 | 10 | 26.2 | 79 |
cofax48/FluentCityAnagrams
| 6,674,379,209,847 |
bd1b39fd1c2829eb1a212a2de38569d7b79c7eb6
|
64654af5eb28f2be51fee9332e79e9c8e48374b9
|
/DjangoHeroku/hello/views.py
|
6aa9190810b7c692730969be63319f7b29c11dfc
|
[] |
no_license
|
https://github.com/cofax48/FluentCityAnagrams
|
74c0ac1fe9c6c58e7d62e8ce503ff5001560fbe3
|
abc2ecd671198354cc6f435c24e532fd24b0dff9
|
refs/heads/master
| 2020-03-21T19:12:15.537016 | 2018-06-27T22:07:28 | 2018-06-27T22:07:28 | 138,935,050 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.http import FileResponse
from django.http import JsonResponse
#Don't want to deal with CSRF
from django.views.decorators.csrf import csrf_exempt
#El classico
import json
from sqlalchemy import create_engine
#fancy dictionary sorting
from operator import itemgetter
#for handling nan types in data
from numpy import nansum
from pandas import isnull
#Where my anagram_algorithm is located
from .anagram_algorithm import is_string_a_word_checker
#connects to my database
engine = create_engine('postgres://iwogouitiuowon:a1e97051f3c10aff7a0d0fedcaf759a7b259be0130e4a6b1790ed5c6c70a02e1@ec2-54-221-220-59.compute-1.amazonaws.com:5432/daepj190brg9i7')#10 million rows
conn = engine.connect()
# My views are here.
@csrf_exempt #As this is an unpaid proejct I'm skipping CSRF Protocal
def index(request):
return render(request, 'Homepage.html')
#APIs
#Gets word from client-runs the anagram algorithm and returns anagrams as json
@csrf_exempt
def word_to_check(request):
data = json.loads(request.body.decode('utf-8'))
fields = [i for i in data]
expected_fields = ["word"]
#If the expected data params equal the approved data params for this api then we proceeed
if expected_fields == fields:
word_to_check = data["word"]
list_of_anagrams = is_string_a_word_checker(word_to_check, conn)
return JsonResponse(list_of_anagrams, safe=False)
|
UTF-8
|
Python
| false | false | 1,468 |
py
| 11 |
views.py
| 8 | 0.773161 | 0.732289 | 0 | 40 | 35.7 | 194 |
itisianlee/hawk-facedet
| 4,286,377,374,323 |
b953e66f180e14b1db2cc005fd239cbe7c10c6f8
|
2f82e063549626463b4febdc588360a8d51234b3
|
/hawkdet/dataset/transformers.py
|
a86ab6894db4939b7b687a294448c9e44447c480
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/itisianlee/hawk-facedet
|
a235bde2d1d05557d94a2accbccb9eb53df24678
|
55774ac5619f9a4c76a3a872ff11940a874b32d1
|
refs/heads/main
| 2023-04-06T01:39:33.052760 | 2021-06-12T15:53:58 | 2021-06-12T15:53:58 | 353,374,546 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import cv2
import numpy as np
import random
from ..lib.box_utils import matrix_iof
class RandomCrop:
def __init__(self, image_size=(640, 640), iof_factor=1.0, min_face=16):
self.image_size = image_size
self.iof_factor = iof_factor # iof(IoF(forgrand))
self.min_face = min_face
self.pre_scales = [0.3, 0.45, 0.6, 0.8, 1.0]
def __call__(self, item):
img = item.get('image')
bboxes = item.get('bboxes')
labels = item.get('labels')
lmks = item.get('landmarks', None)
img_h, img_w, _ = img.shape
for _ in range(250):
scale = random.choice(self.pre_scales)
short_side = min(img_h, img_w)
side_len = int(scale * short_side)
l = np.random.randint(0, img_w-side_len+1)
t = np.random.randint(0, img_h-side_len+1)
roi = np.array((l, t, l+side_len, t+side_len))
value = matrix_iof(bboxes, roi[np.newaxis])
flag = (value >= self.iof_factor)
if not flag.any():
continue
centers = (bboxes[:, :2] + bboxes[:, 2:]) / 2
mask = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)
bboxes_t = bboxes[mask].copy()
labels_t = labels[mask].copy()
lmks_t = lmks[mask].copy()
lmks_t = lmks_t.reshape([-1, 5, 2])
if bboxes_t.shape[0] == 0:
continue
img_t = img[roi[1]:roi[3], roi[0]:roi[2]]
bboxes_t[:, :2] = np.maximum(bboxes_t[:, :2], roi[:2])
bboxes_t[:, :2] -= roi[:2]
bboxes_t[:, 2:] = np.minimum(bboxes_t[:, 2:], roi[2:])
bboxes_t[:, 2:] -= roi[:2]
# landm
lmks_t[:, :, :2] = lmks_t[:, :, :2] - roi[:2]
lmks_t[:, :, :2] = np.maximum(lmks_t[:, :, :2], np.array([0, 0]))
lmks_t[:, :, :2] = np.minimum(lmks_t[:, :, :2], roi[2:] - roi[:2])
lmks_t = lmks_t.reshape([-1, 10])
# make sure that the cropped image contains at least one face > 16 pixel at training image scale
b_w_t = (bboxes_t[:, 2] - bboxes_t[:, 0] + 1) / side_len * self.image_size[0]
b_h_t = (bboxes_t[:, 3] - bboxes_t[:, 1] + 1) / side_len * self.image_size[1]
mask = np.minimum(b_w_t, b_h_t) > self.min_face
bboxes_t = bboxes_t[mask]
labels_t = labels_t[mask]
lmks_t = lmks_t[mask]
if bboxes_t.shape[0] == 0:
continue
return {
'image': img_t,
'bboxes': bboxes_t,
'labels': labels_t,
'landmarks': lmks_t
}
return {
'image': img,
'bboxes': bboxes,
'labels': labels,
'landmarks': lmks
}
class RandomDistort:
def __call__(self, item):
img = item.get('image')
def _convert(image, alpha=1, beta=0):
tmp = image.astype(float) * alpha + beta
tmp[tmp < 0] = 0
tmp[tmp > 255] = 255
image[:] = tmp
image = img.copy()
if random.randrange(2):
#brightness distortion
if random.randrange(2):
_convert(image, beta=random.uniform(-32, 32))
#contrast distortion
if random.randrange(2):
_convert(image, alpha=random.uniform(0.5, 1.5))
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
#saturation distortion
if random.randrange(2):
_convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
#hue distortion
if random.randrange(2):
tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
tmp %= 180
image[:, :, 0] = tmp
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
else:
#brightness distortion
if random.randrange(2):
_convert(image, beta=random.uniform(-32, 32))
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
#saturation distortion
if random.randrange(2):
_convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
#hue distortion
if random.randrange(2):
tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
tmp %= 180
image[:, :, 0] = tmp
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
#contrast distortion
if random.randrange(2):
_convert(image, alpha=random.uniform(0.5, 1.5))
item['image'] = image
return item
class Pad:
def __init__(self, img_mean=[104, 111, 120]):
self.img_mean = img_mean
def __call__(self, item):
img = item.get('image')
height, width, _ = img.shape
if height == width:
return item
long_side = max(width, height)
image_t = np.empty((long_side, long_side, 3), dtype=img.dtype)
image_t[:, :] = self.img_mean
image_t[0:0 + height, 0:0 + width] = img
item['image'] = img
return item
class RandomFlip:
def __call__(self, item):
img = item.get('image')
bboxes = item.get('bboxes')
lmks = item.get('landmarks', None)
_, width, _ = img.shape
if random.randrange(2):
img = cv2.flip(img, 1)
bboxes = bboxes.copy()
bboxes[:, 0::2] = width - bboxes[:, 2::-2]
# landm
lmks = lmks.copy()
lmks = lmks.reshape([-1, 5, 2])
lmks[:, :, 0] = width - lmks[:, :, 0]
tmp = lmks[:, 1, :].copy()
lmks[:, 1, :] = lmks[:, 0, :]
lmks[:, 0, :] = tmp
tmp1 = lmks[:, 4, :].copy()
lmks[:, 4, :] = lmks[:, 3, :]
lmks[:, 3, :] = tmp1
lmks = lmks.reshape([-1, 10])
item['image'] = img
item['bboxes'] = bboxes
item['landmarks'] = lmks
return item
class Resize:
def __init__(self, image_size=(640, 640)): # h, w
self.image_size = image_size
def box_resize(self, img_h, img_w, bboxes=None):
scale_x = self.image_size[1] / img_w
scale_y = self.image_size[0] / img_h
if bboxes is not None:
bboxes *= [scale_x, scale_y, scale_x, scale_y]
return bboxes
def lmk_resize(self, img_h, img_w, lmks=None):
scale_x = self.image_size[1] / img_w
scale_y = self.image_size[0] / img_h
if lmks is not None:
lmks *= ([scale_x, scale_y]*5)
return lmks
def __call__(self, item):
img = item.get('image')
bboxes = item.get('bboxes')
lmks = item.get('landmarks', None)
ori_h, ori_w, _ = img.shape
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
interp_method = interp_methods[random.randrange(5)]
img = cv2.resize(img, self.image_size[::-1], interpolation=interp_method)
item['image'] = img.astype(np.uint8)
item['bboxes'] = self.box_resize(ori_h, ori_w, bboxes)
item['landmarks'] = self.lmk_resize(ori_h, ori_w, lmks)
return item
class ImageT:
def __call__(self, item):
img = item.get('image')
img = img.transpose(2, 0, 1)
item['image'] = img
return item
class Normalize:
def __init__(self, image_mean, image_std):
self.image_mean = image_mean
self.image_std = image_std
def __call__(self, item):
img = item.get('image')
img = (img - self.image_mean) / self.image_std
item['image'] = img
return item
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, item):
for t in self.transforms:
item = t(item)
return item
def build_transforms(image_size, image_mean, image_std, iof_factor=1.0, min_face=16):
transforms = Compose([
RandomCrop(image_size, iof_factor, min_face),
RandomDistort(),
Pad(image_mean),
RandomFlip(),
Normalize(image_mean, image_std),
Resize(image_size),
ImageT(),
])
return transforms
|
UTF-8
|
Python
| false | false | 8,421 |
py
| 25 |
transformers.py
| 23 | 0.490441 | 0.464078 | 0 | 268 | 30.425373 | 115 |
Frexiona/SQL-ASSIGNMENT
| 16,381,005,310,661 |
3f9c620d215bc7cb10cf12cd6368535ba95ffa0c
|
1efa822034054b743f7f2a4402bc074e7f086300
|
/CSV_Split.py
|
3074738414c608a7830a4002a471a19141170d65
|
[] |
no_license
|
https://github.com/Frexiona/SQL-ASSIGNMENT
|
db50e76d5e8f3e082a1597915bc10910fbd055d8
|
d7d0a1ed92f57faf1a1b34964f646fd45a7f768c
|
refs/heads/master
| 2020-04-26T12:56:03.678985 | 2019-03-31T11:45:44 | 2019-03-31T11:45:44 | 173,565,056 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Author: Haolin Zhang
Date: 20-02-2019
"""
import mysql.connector as sql
import pandas as pd
import os
import csv
__location__ = 'path'
# Connect mysql function
def connectSql():
conn = sql.connect(host = '127.0.0.1', user = 'root', password = "password", database = "books_studentID")
return conn
def csv_divide(column_name):
ori_file_path = os.path.join(__location__, 'Database of books.csv')
out_file_path = os.path.join(__location__,'A2', column_name + '.csv')
df = pd.read_csv(ori_file_path, encoding= 'ISO-8859-1')
# a means append the data at the end of the file
# newline = '' prevents the blank line at the end of the file
out = open(out_file_path, 'a', newline= '')
csv_write = csv.writer(out, dialect='excel')
if column_name == 'Book':
csv_write.writerow(['ISBN', column_name])
else:
csv_write.writerow(['ISBN', column_name, 'Rank'])
if (column_name != 'Author'):
for i in range(len(df)):
ISBN = str(df['ISBN'][i])
for j in range(len(str(df[column_name][i]).split(','))):
# break when the string is none or blank spaces
if str(df[column_name][i]).split(',')[j].isspace() or str(df[column_name][i]).split(',')[j] == '':
break
else:
writing_list = list()
writing_list.append(ISBN)
# delete the blank space right or left to the words
writing_list.append(str(df[column_name][i]).split(',')[j].strip())
if column_name == 'Book':
csv_write.writerow(writing_list)
else:
writing_list.append(j + 1)
csv_write.writerow(writing_list)
else:
for i in range(len(df)):
ISBN = str(df['ISBN'][i])
for j in range(len(str(df[column_name][i]).split(' and '))):
writing_list = list()
writing_list.append(ISBN)
# delete the blank space right or left to the words
writing_list.append(str(df[column_name][i]).split(' and ')[j].strip())
writing_list.append(j + 1)
csv_write.writerow(writing_list)
print(column_name, "Writing Done!")
# Insert values into Titles
def insertValues(topic):
out_file_path = os.path.join(__location__, 'A2', topic + '.csv')
conn = connectSql()
cur = conn.cursor()
try:
cur.execute("load data local infile '%s' into table %s" % (out_file_path, topic))
conn.commit()
except Exception as e:
print(e)
print(topic, "Failed")
topics = ['Book', 'Author', 'Themes', 'Qualities']
for topic in topics:
# csv_divide(topic)
insertValues(topic)
|
UTF-8
|
Python
| false | false | 2,814 |
py
| 4 |
CSV_Split.py
| 3 | 0.548685 | 0.540512 | 0 | 82 | 33.317073 | 114 |
pyrfume/pyrfume
| 12,017,318,509,642 |
24e0e6a8bc160aa1d1e7a077726b5a7aa26fee86
|
a873ed639369c60a1653048bc1481af0bad14b03
|
/notebooks/snitz-dragon-selection.py
|
53057ec4c1edf0fb678c8e53a2cc2213f3851c58
|
[
"MIT"
] |
permissive
|
https://github.com/pyrfume/pyrfume
|
636243df2ae1daec21977369fad4d914bbb35e73
|
f9a1261edd207a6e726470bcc6285ac1fa1b1982
|
refs/heads/main
| 2023-08-08T10:25:38.783613 | 2023-07-31T01:22:09 | 2023-07-31T01:22:09 | 112,794,726 | 39 | 13 |
MIT
| false | 2023-07-31T01:22:10 | 2017-12-01T22:53:40 | 2023-07-04T14:23:30 | 2023-07-31T01:22:09 | 10,073 | 32 | 7 | 4 |
Python
| false | false |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from fancyimpute import KNN
from sklearn.linear_model import Lasso
from sklearn.model_selection import ShuffleSplit, cross_validate
from sklearn.preprocessing import MinMaxScaler, Normalizer
# Load Snitz Dataset #1
df1 = pd.read_csv(
"data/snitz/experiment1_comparisons.csv", header=0, index_col=0, names=["A", "B", "Similarity"]
)
df1_cids = pd.read_csv("data/snitz/experiment1_cids.csv", index_col=0)
df1_cids = df1_cids.applymap(
lambda x: x.replace("[", "").replace("]", "").strip().replace(" ", ",")
)
df1_cids
df1.loc[:, ["A", "B"]] = df1.loc[:, ["A", "B"]].applymap(lambda x: df1_cids.loc[x]["Mixture Cids"])
df1.head()
df1.shape[0], len(set(df1[["A", "B"]].values.ravel()))
df1.hist("Similarity")
# Load Snitz Dataset #2
df2 = pd.read_csv(
"data/snitz/experiment2_comparisons.csv", header=0, index_col=0, names=["A", "B", "Similarity"]
)
df2_cids = pd.read_csv("data/snitz/experiment2_cids.csv", index_col=0)
df2_cids = df2_cids.applymap(
lambda x: x.replace("[", "").replace("]", "").strip().replace(" ", ",")
)
df2_cids
df2.loc[:, ["A", "B"]] = df2.loc[:, ["A", "B"]].applymap(lambda x: df2_cids.loc[x]["Mixture Cids"])
df2.head()
df2.shape[0], len(set(df2[["A", "B"]].values.ravel()))
df2.hist("Similarity")
# ### Load Snitz Dataset #3
df3 = pd.read_csv(
"data/snitz/experiment3_comparisons.csv", header=0, index_col=0, names=["A", "B", "Similarity"]
)
df3.head()
df3.shape[0], len(set(df3[["A", "B"]].values.ravel()))
df3.hist("Similarity")
# ### Get all Snitz CIDs
snitz_cids = []
for x in df1_cids["Mixture Cids"]:
snitz_cids += x.split(",")
for x in df2_cids["Mixture Cids"]:
snitz_cids += x.split(",")
for x in df3[["A", "B"]].values.ravel():
snitz_cids += [x]
snitz_cids = np.array(snitz_cids).astype(int)
snitz_cids = set(snitz_cids)
print("There are %d distinct CIDs across all of the Snitz datasets" % len(snitz_cids))
# ### Load the Dragon data and scale each features to 0-1.
# +
df_dragon = pd.read_csv("data/cids-smiles-dragon.txt").set_index("CID")
df_dragon = df_dragon.iloc[:, 1:] # Remove SMILES column
# Normalize every feature to [0, 1]
mms = MinMaxScaler()
df_dragon[:] = mms.fit_transform(df_dragon)
with open("data/dragon-minmaxscaler.pickle", "wb") as f:
pickle.dump(mms, f)
# -
# ### Cleanup and Impute
# No dragon info yet for these CIDs
no_dragon = snitz_cids.difference(df_dragon.index)
no_dragon
# +
# Remove these from the Snitz data
df_snitz_dragon = df_dragon.loc[snitz_cids.difference(no_dragon)]
for nd in no_dragon:
df_snitz_dragon.loc[nd, :] = 0
# +
# Remove bad features (too many NaNs) and impute remaining NaNs
frac_bad = df_snitz_dragon.isnull().mean()
good = frac_bad[frac_bad < 0.3].index
df_snitz_dragon = df_snitz_dragon.loc[:, good]
knn = KNN(k=5)
df_snitz_dragon[:] = knn.fit_transform(df_snitz_dragon.values)
# +
# from olfactometer.odorants import from_cids
# pubchem_data = from_cids([int(x) for x in snitz_cids])
# pd.DataFrame.from_dict(pubchem_data).set_index('CID').to_csv('data/snitz-odorant-info.csv')
# +
# df_snitz_mordred = pd.read_csv('data/snitz-mordred.csv').set_index('CID')
# df_snitz_mordred[:] = mms.fit_transform(df_snitz_mordred.values)
# df_snitz_mordred.head()
# -
df_snitz_features = df_snitz_dragon
# Normalize every molecule to have unit norm (to be unit vector in feature space)
nmr = Normalizer()
df_snitz_features[:] = nmr.fit_transform(df_snitz_features)
def get_unit_distance(row):
"""Convert feature vectors to unit vectors, summing across odorants if needed
and then getting the vector difference, which will be related to the cosine of
of the angle between them"""
a, b, similarity = row
if isinstance(a, str):
a = [int(x) for x in a.split(",")]
b = [int(x) for x in b.split(",")]
A = df_snitz_features.loc[a, :].values
B = df_snitz_features.loc[b, :].values
if A.ndim > 1:
A = A.sum(axis=0)
B = B.sum(axis=0)
A /= np.linalg.norm(A)
B /= np.linalg.norm(B)
return pd.Series(np.abs(A - B), index=df_snitz_features.columns, name=row.name)
df_distance = pd.concat([df1, df2, df3]).reset_index(drop=True)
features = list(df_snitz_features.columns)
unit_distances = df_distance.apply(get_unit_distance, axis=1)
df_distance = df_distance.join(df_distance.apply(get_unit_distance, axis=1))
df_distance.loc[:, "Similarity"] /= 100
df_distance.head()
# %matplotlib inline
model = Lasso(alpha=1e-4, max_iter=1e5)
X = df_distance[features]
y = df_distance["Similarity"]
model.fit(X, y)
plt.plot(1 + np.arange(len(model.coef_)), sorted(np.abs(model.coef_))[::-1])
plt.xscale("log")
# +
def r_score(model, X, y_true):
y_pred = model.predict(X)
# print(y_true.shape, y_pred.shape)
return np.corrcoef(y_true, y_pred)[0, 1]
alphas = np.logspace(-5, -2, 9)
n_splits = 25
cv = ShuffleSplit(n_splits=n_splits, test_size=0.2)
training = np.zeros((len(alphas), n_splits))
testing = np.zeros((len(alphas), n_splits))
for i, alpha in enumerate(alphas):
print(alpha)
model = Lasso(alpha=alpha, max_iter=1e5)
fff = cross_validate(model, X, y, cv=cv, return_train_score=True, scoring=r_score)
training[i, :] = fff["train_score"]
testing[i, :] = fff["test_score"]
# -
plt.errorbar(alphas, training.mean(axis=1), yerr=training.std(axis=1), label="Train")
plt.errorbar(alphas, testing.mean(axis=1), yerr=testing.std(axis=1), label="Test")
plt.xscale("log")
plt.xlabel("Alpha")
plt.ylabel("R")
plt.legend()
model = Lasso(alpha=1e-4, max_iter=1e5)
model.fit(X, y)
snitz_space_weights = pd.Series(model.coef_, index=features, name="Weight")
snitz_space_weights = snitz_space_weights[np.abs(snitz_space_weights) > 1e-5]
snitz_space_weights
snitz_space_weights.to_csv("data/snitz_dragon_weights.csv", header=True)
|
UTF-8
|
Python
| false | false | 6,147 |
py
| 132 |
snitz-dragon-selection.py
| 98 | 0.662437 | 0.645193 | 0 | 210 | 28.271429 | 99 |
brunorasteiro/birdie_psel_ds
| 3,461,743,647,571 |
18daecd04b139cdc486c1be393aef4727b3b9c03
|
e9b95b9fe1abe139750640c371cce46a39f8f328
|
/etapa_1/etapa_1.py
|
b80df9e48d82eaac2d738d34e3caab810192f0c7
|
[] |
no_license
|
https://github.com/brunorasteiro/birdie_psel_ds
|
a50f3603b6ffa537e10a1ec7651387597e94801f
|
70e867a8aa0006357b4d9d8c2fcd9ec64261a9f7
|
refs/heads/master
| 2020-04-12T05:20:37.579987 | 2019-01-04T00:53:56 | 2019-01-04T00:53:56 | 162,323,557 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
# In[200]:
import pandas as pd
import sys
# In[201]:
# tenta ler o arquivo de produtos pelo primeiro parâmetro do terminal
# se não conseguir tenta abrir com o nome 'data_estag_ds.tsv'
try:
path = sys.argv[1]
data = pd.read_csv(path, sep='\t')
except Exception as e:
try:
data = pd.read_csv('data_estag_ds.tsv', sep='\t')
except:
print('Erro ao abrir arquivo: ', e)
sys.exit()
# In[202]:
# insere a coluna CLASSE que futuramente será preenchida com o rótulo 'smartphone' ou 'não-smartphone'
data.insert(data.shape[1], 'CLASSE', '')
# In[203]:
from unidecode import unidecode
# Função que classifica um produto em 'smartphone' ou 'não-smartphone' com base em seu anúncio
# Parâmetros:
# * prod: <str> Anúncio do produto
# * kw_smart: <list of str> Lista das palavras chave que um smartphone possívelmente contém
# * kw_nsmart: <list of str> Lista das palavras chave que um não smartphone possívelmente contém
# Retorno:
# * Retorna uma string que representa a classe do produto, 'smartphone' ou 'não-smartphone'
def classifica(prod, kw_smart, kw_nsmart):
# substitui caracteres especiais e deixa em lowercase (o anuncio do produto)
prod = unidecode( prod.lower() )
# testa se o anuncio do produto contem alguma das palavras de um não smartphone
if True in [kw in prod for kw in key_nsmart]:
return 'não-smartphone'
# testa se o anuncio do produto contem alguma das palavras de um smartphone
elif True in [kw in prod for kw in key_smart]:
return 'smartphone'
# se não for smartphone, será não smartphone
else:
return 'não-smartphone'
# In[204]:
# lista das palavras chave que o anúncio de um não smartphone possívelmente contém
key_nsmart = ['capa',
'capinha',
'case',
'pelicula',
'acessorio',
'tablet',
'tab ',
'relogio',
'smartwatch',
'bumper',
'bumber',
'protetores',
'protetor',
'suporte',
'kit',
'cabo',
'bracadeira',
'ipad',
'adesivo',
'lentes',
'lente',
'carregador',
'repetidor',
'espelhamento',
'mirror',
'antena',
'watch',
'interface']
# In[205]:
# lista das palavras chave que o anúncio de um smartphone possívelmente contém
key_smart = ['smartphone',
'celular',
'iphone',
'galaxy',
'samsung a',
'samsung j',
'moto ',
'xperia',
'zenfone',
'lg k',
'xiaomi mi',
'rom global',
'xiaomi redmi',
'oneplus',
'caterpillar cat',
'motorola nextel']
# In[207]:
# aplica a classificação a todas as instâncias e armazena na coluna 'classe'
data['CLASSE'] = data['TITLE'].apply(lambda x: classifica(x, key_smart, key_nsmart) )
# salva em csv os produtos e sua respectiva classe
data.to_csv("produtos_classificados.csv", index=False)
|
UTF-8
|
Python
| false | false | 3,316 |
py
| 12 |
etapa_1.py
| 3 | 0.55894 | 0.55163 | 0 | 127 | 24.834646 | 103 |
peterzhaoc/Edu123Kid
| 11,991,548,734,321 |
c6edfc90cabb252f5c28b5c86303b6e819644edc
|
a585acb0a98f19b803df62697dfd49c661b37aaa
|
/writings/admin.py
|
ea0599a87df24672a1da71b91ab39279af9b5887
|
[] |
no_license
|
https://github.com/peterzhaoc/Edu123Kid
|
19b0ab5761faf37a3015e6bb2b9b42ce5c763cba
|
b3f823b2c1820fe335869d719b78a80f83bc8121
|
refs/heads/master
| 2021-09-17T11:04:21.757951 | 2018-07-01T06:09:01 | 2018-07-01T06:09:01 | 111,057,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from writings.models import *
class WritingTaskAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'publish_date', 'mentor_end_date', 'editor', 'finaleditor', 'pay', 'state', )
admin.site.register(Book)
admin.site.register(Img)
admin.site.register(WritingTask,WritingTaskAdmin)
|
UTF-8
|
Python
| false | false | 390 |
py
| 60 |
admin.py
| 36 | 0.769231 | 0.769231 | 0 | 10 | 38 | 116 |
kkang2097/GP-Derivatives-Variational-Inference
| 962,072,710,117 |
32c283364a814de9c7b731d0680afbebfc796781
|
359dcdb32288a300d3dcd9402532e4433c1b0c81
|
/experiments/rover/random_search.py
|
f5193a9c0d51dc158be05cb9d98361228f81704e
|
[] |
no_license
|
https://github.com/kkang2097/GP-Derivatives-Variational-Inference
|
7d94cec6171a20587887282724dd87ec37f2131f
|
0699c5ef20132f92e0bd4f41525eb09f6fd2c118
|
refs/heads/main
| 2023-08-20T10:43:31.166748 | 2021-10-25T20:46:21 | 2021-10-25T20:46:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
from rover import rover_obj
import sys
if __name__ == '__main__':
dim = 200
max_evals = 2000
lb = -5 * np.ones(dim)
ub = 5 * np.ones(dim)
batch_size = 5
num_epochs = 30
from datetime import datetime
now = datetime.now()
seed = int("%d%.2d%.2d%.2d%.2d"%(now.month,now.day,now.hour,now.minute,now.second))
barcode = "%d%.2d%.2d%.2d%.2d%.2d"%(now.year,now.month,now.day,now.hour,now.minute,now.second)
np.random.seed(seed)
X = np.random.uniform(lb,ub,(max_evals,dim))
fX = [rover_obj(x) for x in X]
d ={}
d['X'] = X
d['fX'] = fX
d['mode'] = "Random Search"
outfilename = f"./output/data_rover_Random_Search_{max_evals}_evals_{barcode}.pickle"
import pickle
pickle.dump(d,open(outfilename,"wb"))
|
UTF-8
|
Python
| false | false | 769 |
py
| 62 |
random_search.py
| 54 | 0.617685 | 0.590377 | 0 | 31 | 23.709677 | 96 |
alejandropages/CSCE411
| 6,493,990,597,713 |
513bfc7ceb13936d7dae28e79e949a6cb1c15d5f
|
65522d34316c37a35093c543ef8e10f25ad5fe4d
|
/Assignment3/Stage1/Stage1_Stepb/dataParser/parser.py
|
4fe9f5a4ef8e3325c22480d15a359f4bffc64678
|
[] |
no_license
|
https://github.com/alejandropages/CSCE411
|
218599ccad399539aa426f150da4d4703186327c
|
665648b6fa39c454a650cbaddfda801c0ff441d1
|
refs/heads/master
| 2020-04-02T02:56:12.055957 | 2018-12-04T14:38:17 | 2018-12-04T14:38:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def main():
# data = loadData("data3.txt")
# data = loadData("data2.txt")
data = loadData("data.txt")
for entry in data:
print(entry["id"] + ": " + entry["name"])
if "state" in entry:
print(entry["state"] + " - " + entry["city"])
# for message in entry["messages"]:
# print(message)
return
def loadData(filepath):
people = list()
sections = _loadSections(filepath)
for section in sections:
# print("section:")
# print(section)
person = dict()
person["id"] = _extractValue(section[0])
person["name"] = _extractValue(section[1])
# print(_extractValue(section[2]))
locationTokens = _extractValue(section[2]).split(",")
# print(locationTokens)
if len(locationTokens) == 3:
if locationTokens[1].strip() != "":
person["state"] = locationTokens[1].strip()
person["city"] = locationTokens[0].strip()
person["messages"] = _buildMessages(section)
people.append(person)
return people
def _buildMessages(section):
messages = list()
message = dict()
isFirstMessage = True
for i in range(3, len(section)):
line = section[i]
value = _extractValue(line)
# print(value)
if _isTimestamp(i):
# print("Timestamp Line")
tokens = value.split(" ")
if not isFirstMessage:
# print("Not first message")
messages.append(message)
message = dict()
message["date"] = _convertDateFormat(tokens[0])
message["time"] = tokens[1]
isFirstMessage = False
else:
# print("Message Line")
message["value"] = value
if "date" in message:
messages.append(message)
return messages
def _convertDateFormat(date):
newDate = ""
tokens = date.split("/")
newDate += tokens[2] + "-"
newDate += tokens[0] + "-"
newDate += tokens[1]
return newDate
def _extractValue(line):
start = line.index(":") + 2
return line[start : len(line)]
def _isTimestamp(number):
return not (number % 2 == 0)
def _loadSections(filepath):
f = open(filepath, 'r')
NEW_SECTION_HEADER = "ID:"
sections = list()
section = list()
isFirstSection = True
for line in list(f):
strippedLine = line.strip()
if _lineIsIrrelevant(strippedLine):
continue
linePrefix = line[0:3]
if linePrefix == NEW_SECTION_HEADER:
if isFirstSection:
section.append(strippedLine)
isFirstSection = False
elif not isFirstSection:
sections.append(section)
section = list()
section.append(strippedLine)
else:
section.append(strippedLine)
if section[0] != "":
sections.append(section)
return sections
def _lineIsIrrelevant(line):
return (line == "") or (line[0:12] == "Process time")
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 3,120 |
py
| 39 |
parser.py
| 20 | 0.542949 | 0.534936 | 0 | 116 | 25.887931 | 61 |
vertexproject/synapse
| 5,454,608,493,252 |
a303a6f7bcd00b094a57de4e575d0abd286bb345
|
c5f7019c52cd91a3d9505943b9d866539f2fb0bc
|
/synapse/lib/cli.py
|
0155ccf59d03a6c1d0ced10b751218664e8d7fef
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/vertexproject/synapse
|
ce31699fcb10cb2c870d448915f4d4524247e2d0
|
1808dff78921b4bfdb451a12ee5d03427a5295b9
|
refs/heads/master
| 2023-09-03T23:48:26.584015 | 2023-08-31T20:34:35 | 2023-08-31T20:34:35 | 37,228,107 | 307 | 63 |
Apache-2.0
| false | 2023-09-14T21:53:32 | 2015-06-10T23:29:41 | 2023-09-14T20:07:23 | 2023-09-14T21:53:31 | 27,459 | 310 | 69 | 20 |
Python
| false | false |
import os
import json
import signal
import asyncio
import logging
import traceback
import collections
import regex
from prompt_toolkit import PromptSession, print_formatted_text
from prompt_toolkit.formatted_text import FormattedText
from prompt_toolkit.history import FileHistory
from prompt_toolkit.patch_stdout import patch_stdout
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.lib.base as s_base
import synapse.lib.output as s_output
import synapse.lib.parser as s_parser
import synapse.lib.grammar as s_grammar
import synapse.lib.version as s_version
logger = logging.getLogger(__name__)
class Cmd:
'''
Base class for modular commands in the synapse CLI.
'''
_cmd_name = 'fixme'
_cmd_syntax = ()
def __init__(self, cli, **opts):
self._cmd_cli = cli
self._cmd_opts = opts
async def runCmdLine(self, line):
'''
Run a line of command input for this command.
Args:
line (str): Line to execute
Examples:
Run the foo command with some arguments:
await foo.runCmdLine('foo --opt baz woot.com')
'''
opts = self.getCmdOpts(line)
return await self.runCmdOpts(opts)
def getCmdItem(self):
'''
Get a reference to the object we are commanding.
'''
return self._cmd_cli.item
def getCmdOpts(self, text):
'''
Use the _cmd_syntax def to split/parse/normalize the cmd line.
Args:
text (str): Command to process.
Notes:
This is implemented independent of argparse (et al) due to the
need for syntax aware argument splitting. Also, allows different
split per command type
Returns:
dict: An opts dictionary.
'''
off = 0
_, off = s_grammar.nom(text, off, s_grammar.whites)
name, off = s_grammar.meh(text, off, s_grammar.whites)
_, off = s_grammar.nom(text, off, s_grammar.whites)
opts = {}
args = collections.deque([synt for synt in self._cmd_syntax if not synt[0].startswith('-')])
switches = {synt[0]: synt for synt in self._cmd_syntax if synt[0].startswith('-')}
# populate defaults and lists
for synt in self._cmd_syntax:
snam = synt[0].strip('-')
defval = synt[1].get('defval')
if defval is not None:
opts[snam] = defval
if synt[1].get('type') == 'list':
opts[snam] = []
def atswitch(t, o):
# check if we are at a recognized switch. if not
# assume the data is part of regular arguments.
if not text.startswith('-', o):
return None, o
name, x = s_grammar.meh(t, o, s_grammar.whites)
swit = switches.get(name)
if swit is None:
return None, o
return swit, x
while off < len(text):
_, off = s_grammar.nom(text, off, s_grammar.whites)
swit, off = atswitch(text, off)
if swit is not None:
styp = swit[1].get('type', 'flag')
snam = swit[0].strip('-')
if styp == 'valu':
valu, off = s_parser.parse_cmd_string(text, off)
opts[snam] = valu
elif styp == 'list':
valu, off = s_parser.parse_cmd_string(text, off)
if not isinstance(valu, list):
valu = valu.split(',')
opts[snam].extend(valu)
elif styp == 'enum':
vals = swit[1].get('enum:vals')
valu, off = s_parser.parse_cmd_string(text, off)
if valu not in vals:
raise s_exc.BadSyntax(mesg='%s (%s)' % (swit[0], '|'.join(vals)),
text=text)
opts[snam] = valu
else:
opts[snam] = True
continue
if not args:
raise s_exc.BadSyntax(mesg='trailing text: [%s]' % (text[off:],),
text=text)
synt = args.popleft()
styp = synt[1].get('type', 'valu')
# a glob type eats the remainder of the string
if styp == 'glob':
opts[synt[0]] = text[off:]
break
# eat the remainder of the string as separate vals
if styp == 'list':
valu = []
while off < len(text):
item, off = s_parser.parse_cmd_string(text, off)
valu.append(item)
opts[synt[0]] = valu
break
valu, off = s_parser.parse_cmd_string(text, off)
opts[synt[0]] = valu
return opts
def getCmdBrief(self):
'''
Return the single-line description for this command.
'''
return self.getCmdDoc().strip().split('\n', 1)[0].strip()
def getCmdName(self):
return self._cmd_name
def getCmdDoc(self):
'''
Return the help/doc output for this command.
'''
if not self.__doc__: # pragma: no cover
return ''
return self.__doc__
def printf(self, mesg, addnl=True, color=None):
return self._cmd_cli.printf(mesg, addnl=addnl, color=color)
async def runCmdOpts(self, opts):
'''
Perform the command actions. Must be implemented by Cmd implementers.
Args:
opts (dict): Options dictionary.
'''
raise s_exc.NoSuchImpl(mesg='runCmdOpts must be implemented by subclasses.',
name='runCmdOpts')
_setre = regex.compile(r'\s*set\s+editing-mode\s+vi\s*')
def _inputrc_enables_vi_mode():
'''
Emulate a small bit of readline behavior.
Returns:
(bool) True if current user enabled vi mode ("set editing-mode vi") in .inputrc
'''
for filepath in (os.path.expanduser('~/.inputrc'), '/etc/inputrc'):
try:
with open(filepath) as f:
for line in f:
if _setre.fullmatch(line):
return True
except IOError:
continue
return False
class Cli(s_base.Base):
'''
A modular / event-driven CLI base object.
'''
histfile = 'cmdr_history'
async def __anit__(self, item, outp=None, **locs):
await s_base.Base.__anit__(self)
if outp is None:
outp = s_output.OutPut()
self.outp = outp
self.locs = locs
self.cmdtask = None # type: asyncio.Task
self.sess = None
self.vi_mode = _inputrc_enables_vi_mode()
self.item = item # whatever object we are commanding
self.echoline = False
self.colorsenabled = False
if isinstance(self.item, s_base.Base):
self.item.onfini(self._onItemFini)
self.locs['syn:local:version'] = s_version.verstring
if isinstance(self.item, s_telepath.Proxy):
version = self.item._getSynVers()
if version is None: # pragma: no cover
self.locs['syn:remote:version'] = 'Remote Synapse version unavailable'
else:
self.locs['syn:remote:version'] = '.'.join([str(v) for v in version])
self.cmds = {}
self.cmdprompt = 'cli> '
self.initCmdClasses()
def initCmdClasses(self):
self.addCmdClass(CmdHelp)
self.addCmdClass(CmdQuit)
self.addCmdClass(CmdLocals)
async def _onItemFini(self):
if self.isfini:
return
self.printf('connection closed...')
await self.fini()
async def addSignalHandlers(self):
'''
Register SIGINT signal handler with the ioloop to cancel the currently running cmdloop task.
'''
def sigint():
self.printf('<ctrl-c>')
if self.cmdtask is not None:
self.cmdtask.cancel()
self.loop.add_signal_handler(signal.SIGINT, sigint)
def get(self, name, defval=None):
return self.locs.get(name, defval)
def set(self, name, valu):
self.locs[name] = valu
async def prompt(self, text=None):
'''
Prompt for user input from stdin.
'''
if self.sess is None:
history = None
histfp = s_common.getSynPath(self.histfile)
# Ensure the file is read/writeable
try:
with s_common.genfile(histfp):
pass
history = FileHistory(histfp)
except OSError: # pragma: no cover
logger.warning(f'Unable to create file at {histfp}, cli history will not be stored.')
self.sess = PromptSession(history=history)
if text is None:
text = self.cmdprompt
with patch_stdout():
retn = await self.sess.prompt_async(text, vi_mode=self.vi_mode, enable_open_in_editor=True)
return retn
def printf(self, mesg, addnl=True, color=None):
if not self.colorsenabled:
return self.outp.printf(mesg, addnl=addnl)
# print_formatted_text can't handle \r
mesg = mesg.replace('\r', '')
if color is not None:
mesg = FormattedText([(color, mesg)])
return print_formatted_text(mesg, end='\n' if addnl else '')
def addCmdClass(self, ctor, **opts):
'''
Add a Cmd subclass to this cli.
'''
item = ctor(self, **opts)
name = item.getCmdName()
self.cmds[name] = item
def getCmdNames(self):
'''
Return a list of all the known command names for the CLI.
'''
return list(self.cmds.keys())
def getCmdByName(self, name):
'''
Return a Cmd instance by name.
'''
return self.cmds.get(name)
def getCmdPrompt(self):
'''
Get the command prompt.
Returns:
str: Configured command prompt
'''
return self.cmdprompt
async def runCmdLoop(self):
'''
Run commands from a user in an interactive fashion until fini() or EOFError is raised.
'''
while not self.isfini:
self.cmdtask = None
try:
line = await self.prompt()
if not line:
continue
line = line.strip()
if not line:
continue
coro = self.runCmdLine(line)
self.cmdtask = self.schedCoro(coro)
await self.cmdtask
except KeyboardInterrupt:
if self.isfini:
return
self.printf('<ctrl-c>')
except (s_exc.CliFini, EOFError):
await self.fini()
except Exception:
s = traceback.format_exc()
self.printf(s)
finally:
if self.cmdtask is not None:
self.cmdtask.cancel()
try:
self.cmdtask.result()
except asyncio.CancelledError:
# Wait a beat to let any remaining nodes to print out before we print the prompt
await asyncio.sleep(1)
except Exception:
pass
async def runCmdLine(self, line):
'''
Run a single command line.
Args:
line (str): Line to execute.
Examples:
Execute the 'woot' command with the 'help' switch:
await cli.runCmdLine('woot --help')
Returns:
object: Arbitrary data from the cmd class.
'''
if self.echoline:
self.outp.printf(f'{self.cmdprompt}{line}')
ret = None
name = line.split(None, 1)[0]
cmdo = self.getCmdByName(name)
if cmdo is None:
self.printf('cmd not found: %s' % (name,))
return
try:
ret = await cmdo.runCmdLine(line)
except s_exc.CliFini:
await self.fini()
except asyncio.CancelledError:
self.printf('Cmd cancelled')
except s_exc.ParserExit as e:
pass # avoid duplicate print
except Exception as e:
exctxt = traceback.format_exc()
self.printf(exctxt)
self.printf('error: %s' % e)
return ret
class CmdQuit(Cmd):
'''
Quit the current command line interpreter.
Example:
quit
'''
_cmd_name = 'quit'
async def runCmdOpts(self, opts):
self.printf('o/')
raise s_exc.CliFini()
class CmdHelp(Cmd):
'''
List commands and display help output.
Example:
help foocmd
'''
_cmd_name = 'help'
_cmd_syntax = (
('cmds', {'type': 'list'}), # type: ignore
)
async def runCmdOpts(self, opts):
cmds = opts.get('cmds')
# if they didn't specify one, just show the list
if not cmds:
cmds = sorted(self._cmd_cli.getCmdNames())
padsize = max([len(n) for n in cmds])
for name in cmds:
padname = name.ljust(padsize)
cmdo = self._cmd_cli.getCmdByName(name)
brief = cmdo.getCmdBrief()
self.printf('%s - %s' % (padname, brief))
return
for name in cmds:
cmdo = self._cmd_cli.getCmdByName(name)
if cmdo is None:
self.printf('=== NOT FOUND: %s' % (name,))
continue
self.printf('=== %s' % (name,))
self.printf(cmdo.getCmdDoc())
return
class CmdLocals(Cmd):
'''
List the current locals for a given CLI object.
'''
_cmd_name = 'locs'
async def runCmdOpts(self, opts):
ret = {}
for k, v in self._cmd_cli.locs.items():
if isinstance(v, (int, str)):
ret[k] = v
else:
ret[k] = repr(v)
mesg = json.dumps(ret, indent=2, sort_keys=True)
self.printf(mesg)
|
UTF-8
|
Python
| false | false | 14,389 |
py
| 508 |
cli.py
| 421 | 0.520954 | 0.519494 | 0 | 530 | 26.149057 | 104 |
5783354/awokado
| 11,287,174,089,944 |
4a4393a7ae99b936b5171ed8bb00ed590e6e75dd
|
41c7bb7e14475d92faa2a023ee489f73e4371d86
|
/awokado/response.py
|
029930a22b3e1174930814e2d64452fb0a3d0f7f
|
[
"MIT"
] |
permissive
|
https://github.com/5783354/awokado
|
4b024c4d96758545f7fb1c16deecc8528612c3b0
|
9454067f005fd8905409902fb955de664ba3d5b6
|
refs/heads/master
| 2022-12-21T13:00:02.853859 | 2021-05-21T12:22:21 | 2021-05-21T12:22:21 | 165,822,421 | 6 | 1 |
MIT
| false | 2022-12-08T07:44:33 | 2019-01-15T09:26:59 | 2021-05-21T12:22:26 | 2022-12-08T07:44:33 | 1,464 | 6 | 1 | 5 |
Python
| false | false |
from typing import Dict, Optional, List
if False:
from awokado.resource import BaseResource
class Response:
"""
Response class helps to collect your data
and prepare it in a readable format for the Frontend (or another API Client)
You can override it in your resource to change response format::
class MyResponse(Response):
PAYLOAD_KEYWORD = "data"
class MyBaseResource(BaseResource):
Response = MyResponse
Default serialization for list requests (``/v1/book/``)::
{
"payload": {
"book": [
{
"name": "My Book",
"authors": [1, 2]
}
]
},
"meta": {
"total": 1
}
}
Default serialization for single object (``/v1/book/123``)::
{
"book": [
{
"name": "My Book",
"authors": [1, 2]
}
]
}
"""
PAYLOAD_KEYWORD = "payload"
META_KEYWORD = "meta"
TOTAL_KEYWORD = "total"
def __init__(self, resource: "BaseResource", is_list: bool = False):
self.is_list = is_list
self.resource = resource
self.payload: Dict = {}
self.related_payload: Optional[Dict] = None
self.include_total = False
self.total = 0
if resource:
self.include_total = not resource.Meta.disable_total
def serialize(self) -> dict:
if self.related_payload and self.payload:
self.payload.update(self.related_payload)
if self.is_list:
return self._serialize_list()
else:
return self._serialize_single()
def set_parent_payload(self, parent_payload: Optional[List] = None) -> None:
if not parent_payload:
parent_payload = []
payload = {self.resource.Meta.name: parent_payload}
self.payload = payload
def set_related_payload(self, related_payload: Optional[Dict]) -> None:
self.related_payload = related_payload
def set_total(self, total_objects_count: int):
self.total = total_objects_count
def _serialize_single(self) -> dict:
if not self.payload:
self.set_parent_payload()
response: Dict = self.payload
return response
def _serialize_list(self) -> dict:
if not self.payload:
self.set_parent_payload()
response = {self.PAYLOAD_KEYWORD: self.payload}
if self.include_total:
response[self.META_KEYWORD] = {self.TOTAL_KEYWORD: self.total}
else:
response[self.META_KEYWORD] = None # type: ignore
return response
|
UTF-8
|
Python
| false | false | 2,724 |
py
| 80 |
response.py
| 64 | 0.552496 | 0.548458 | 0 | 105 | 24.942857 | 80 |
HeptaKos/bluetooth_speaker
| 3,195,455,708,620 |
9c6e3a47d4af43149d9f52b7a89d930a2b632d17
|
1588e3b48dfda7801430b71493ab039bb8c378dd
|
/spider/papapapa.py
|
39ef4e280b2e3e21c6f6c53f6b1f6f4d9be48de9
|
[] |
no_license
|
https://github.com/HeptaKos/bluetooth_speaker
|
4b2fe7032197d49c753bf4c67268f8323eec5cf6
|
2ddb31c34c142305cfc8be77bc41d80f116f30dc
|
refs/heads/master
| 2020-07-04T10:00:50.696272 | 2019-08-14T01:32:39 | 2019-08-14T01:32:39 | 202,249,694 | 1 | 0 | null | true | 2019-08-14T01:28:02 | 2019-08-14T01:28:01 | 2019-08-14T01:26:08 | 2019-08-14T01:26:07 | 0 | 0 | 0 | 0 | null | false | false |
import re
import requests
from urllib import error
from bs4 import BeautifulSoup
import os
import json
from PIL import Image
import os.path
import glob
num = 0
numPicture = 0
file = ''
List = []
def Find(url):
global List
print('正在检测图片总数,请稍等.....')
t = 0
i = 1
s = 0
while t < 1000:
Url = url + str(t)
try:
Result = requests.get(Url, timeout=7)
except BaseException:
t = t + 60
continue
else:
result = Result.text
pic_url = re.findall('"objURL":"(.*?)",', result, re.S) # 先利用正则表达式找到图片url
s += len(pic_url)
if len(pic_url) == 0:
break
else:
List.append(pic_url)
t = t + 60
return s
def recommend(url):
Re = []
try:
html = requests.get(url)
except error.HTTPError as e:
return
else:
html.encoding = 'utf-8'
bsObj = BeautifulSoup(html.text, 'html.parser')
div = bsObj.find('div', id='topRS')
if div is not None:
listA = div.findAll('a')
for i in listA:
if i is not None:
Re.append(i.get_text())
return Re
def dowmloadPicture(html, keyword):
global num
# t =0
pic_url = re.findall('"objURL":"(.*?)",', html, re.S) # 先利用正则表达式找到图片url
print('找到关键词:' + keyword + '的图片,即将开始下载图片...')
for each in pic_url:
print('正在下载第' + str(num + 1) + '张图片,图片地址:' + str(each))
try:
if each is not None:
pic = requests.get(each, timeout=17)
else:
continue
except BaseException:
print('错误,当前图片无法下载')
continue
else:
string = file + r'\\' + keyword + '_' + str(num) + '.jpg'
fp = open(string, 'wb')
fp.write(pic.content)
fp.close()
num += 1
if num >= numPicture:
for jpgfile in glob.glob(file + "\\*.jpg"):
convertjpg(jpgfile, file)
return
def convertjpg(jpgfile,outdir,width=299,height=299):
try:
img = Image.open(jpgfile)
new_img=img.resize((width,height),Image.BILINEAR)
new_img.save(os.path.join(outdir,os.path.basename(jpgfile)))
except Exception as e:
print(e)
os.remove(jpgfile)
if __name__ == '__main__': # 主函数入口
tm = int(input('请输入每类图片的下载数量 '))
numPicture = tm
line_list = []
with open('list.json','r') as f:
line_list = json.load(f)
for word in line_list:
url = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=' + word + '&pn='
tot = Find(url)
Recommend = recommend(url) # 记录相关推荐
print('经过检测%s类图片共有%d张' % (word, tot))
file = word
y = os.path.exists(file)
if y == 1:
print('该文件已存在,请重新输入')
file = word + '2'
os.mkdir(file)
else:
os.mkdir(file)
t = 0
tmp = url
while t < numPicture:
try:
url = tmp + str(t)
result = requests.get(url, timeout=10)
print(url)
except error.HTTPError as e:
print('网络错误,请调整网络后重试')
t = t + 60
else:
dowmloadPicture(result.text, word)
t = t + 60
numPicture = numPicture + tm
print('当前搜索结束,感谢使用')
|
UTF-8
|
Python
| false | false | 3,779 |
py
| 2 |
papapapa.py
| 1 | 0.490106 | 0.479208 | 0 | 134 | 25.022388 | 96 |
maresh88/projects_tracker
| 8,856,222,601,820 |
acf5880683b888b7986acdb4a02b75480b5c9cbf
|
28aac09a102d68d88a7ca5c4fffb84bced7c5fab
|
/projects/migrations/0004_auto_20210525_1605.py
|
c2ad9ed5c6082ed40287365d12ad55d061d7255d
|
[] |
no_license
|
https://github.com/maresh88/projects_tracker
|
3f6fbb11398765ae46a6d4ef34fd1e0fe964cbdb
|
2c5bf6485b4f4766b20abde6f135550fe207746b
|
refs/heads/master
| 2023-05-07T11:47:14.932618 | 2021-06-01T08:34:33 | 2021-06-01T08:34:33 | 368,487,894 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 3.2.3 on 2021-05-25 13:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0003_comment_profile'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={'ordering': ('created_at',)},
),
migrations.DeleteModel(
name='Profile',
),
]
|
UTF-8
|
Python
| false | false | 422 |
py
| 29 |
0004_auto_20210525_1605.py
| 23 | 0.561611 | 0.516588 | 0 | 20 | 20.1 | 50 |
jergusg/keyboard-tester-app
| 14,594,298,890,803 |
00a683f131b320bd6248a89469c2cc98771babfc
|
e7e79b41bf01c7f970a081387a0f7b7223ad450a
|
/python-generator/words.py
|
d81d8f4fedb851b79745f2ebbaf4acaeb29ecaf6
|
[] |
no_license
|
https://github.com/jergusg/keyboard-tester-app
|
92f0af1907945aa6376b685609c0dd288cb78943
|
ee9a1717f588a25b1e610194cbd2094e04caa53d
|
refs/heads/master
| 2020-03-18T00:00:13.458255 | 2018-06-05T14:36:21 | 2018-06-05T14:36:21 | 134,072,963 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
words = [
'organizácia',
'schopnosť',
'rozhodnutie',
'prehrávač',
'efektívnosť',
'inštrukcia',
'bývanie',
'chlieb',
'iniciatíva',
'sloboda',
'filozofia',
'príbeh',
'vysvetlenie',
'recept',
'aspekt',
'výsledok',
'rieka',
'pieseň',
'jednotka',
'kapitola',
'systém',
'zmätok',
'spôsob',
'atmosféra',
'význam',
'pamäť',
'kreslenie',
'teória',
'matematika',
'manažér',
'košík',
'riaditeľ',
'dievča',
'časopis',
'noviny',
'súvislosť',
'univerzita',
'rozloha',
'spoločnosť',
'aktivita',
]
wordlist0 = ['Výrobca',
'Internet',
'Žena',
'metóda',
'dieťa',
'realita',
'Fyzika',
'recept',
'Zbierka',
'bahno',
'rieka',
'cigareta',
'Vedomosti',
'mesiac',
'výber',
'Jazero'
]
random.seed(6)
# random shuffle
wordlist2 = random.sample(wordlist0, k=len(wordlist0))
|
UTF-8
|
Python
| false | false | 915 |
py
| 13 |
words.py
| 11 | 0.569966 | 0.56314 | 0 | 68 | 11.294118 | 54 |
manifolded/five-bit-poskitt
| 15,642,270,920,685 |
7721d8f67b249e520b03b2cf08ccfd1fad6c82d9
|
908541b1ec06d6117b3f133e659a26e4701f4f24
|
/cipher-grid/genCipherGrid.py
|
7d7bdc0c6e358959db68494ca0393914f9575808
|
[
"MIT"
] |
permissive
|
https://github.com/manifolded/five-bit-poskitt
|
d52c4aed7bf4e9a1e27dddf4f13140abb2d782c8
|
7fc5e5470b6158694ee62198473ead547051efa5
|
refs/heads/master
| 2021-01-22T02:13:07.618301 | 2020-09-17T05:11:51 | 2020-09-17T05:11:51 | 92,338,154 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/Users/keith/anaconda/bin/python2.7
from string import ascii_lowercase
from prettytable import PrettyTable
# pylint: disable=C0103
# http://www.sitesbay.com/python-program/python-print-alphabet-pattern-in-python
def ciphertext(offset):
ciphertext = [str(offset)]
for i in range(0, 26):
ciphertext.append(chr(65+(i+offset)%26))
return ciphertext
def blanktext():
blanktext = [" "]
for i in range(0, 26):
blanktext.append(" ")
return blanktext
# construct the plaintext header
plaintext = [" "]
for i in range(0, 26):
plaintext.append(ascii_lowercase[i])
# construct the table/grid
x = PrettyTable(plaintext)
x.padding_width = 1 # One space between column edges and contents (default)
# make all columns centered
for colHeader in plaintext:
x.align[colHeader] = "c"
# construct the ciphertext body
x.add_row(blanktext())
for j in range(0, 26):
x.add_row(ciphertext(j+1))
print x.get_html_string(attributes = {"class": "grid-style"})
# print x
|
UTF-8
|
Python
| false | false | 1,003 |
py
| 7 |
genCipherGrid.py
| 4 | 0.6999 | 0.675972 | 0 | 39 | 24.717949 | 80 |
NeTatsu/video-diff
| 1,683,627,223,074 |
31a32943e6e75d49faa2d86c4de641d68fcbb447
|
b30c31f07a3eaa3d0822bcb15d5ea9024929c8ed
|
/Python/ReadVideo.py
|
3af9c809c8c6c4bbc2d121c6afd7bf2f84832c25
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/NeTatsu/video-diff
|
238f9a980c0193fc8459719c04d3fec54f06a3f4
|
c2eb75373d20aefc82a0d8d198eddd7eb9b9675a
|
refs/heads/master
| 2023-01-24T19:49:05.492759 | 2020-12-08T07:42:47 | 2020-12-08T07:42:47 | 269,405,610 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import cv2
import os
import traceback
import shutil
import sys
# Find max number of matched features with Simulated annealing
import common
import config
if config.OCV_OLD_PY_BINDINGS == True:
import cv
if config.USE_EVANGELIDIS_ALGO == False:
import MatchFrames
import SimAnneal
captureQ = None
frameCountQ = None
captureR = None
frameCountR = None
"""
widthQ = None
widthR = None
heightQ = None
heightR = None
"""
resVideoQ = (-1, -1)
resVideoR = (-1, -1)
# Do performance benchmarking
def Benchmark():
global captureQ, frameCountQ
global captureR, frameCountR
# Doing benchmarking
while True:
if config.OCV_OLD_PY_BINDINGS:
frame1 = captureQ.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)
else:
frame1 = captureQ.get(cv2.CAP_PROP_POS_FRAMES)
common.DebugPrint("Alex: frame1 = %d" % frame1)
MatchFrames.counterQ = int(frame1) #0
common.DebugPrint("Alex: MatchFrames.counterQ = %d" % MatchFrames.counterQ)
retQ, imgQ = captureQ.read()
if retQ == False:
break
if False:
grayQ = common.ConvertImgToGrayscale(imgQ)
MatchFrames.Main_img1(imgQ, MatchFrames.counterQ)
# 36.2 secs (38.5 secs with Convert to RGB)
common.DebugPrint("Alex: time after Feature Extraction of all frames of " \
"video 1 = %s" % GetCurrentDateTimeStringWithMilliseconds())
while True:
if config.OCV_OLD_PY_BINDINGS:
frameR = captureR.get(cv2.cv.CV_CAP_PROP_POS_FRAMES);
else:
frameR = captureR.get(cv2.CAP_PROP_POS_FRAMES);
common.DebugPrint("Alex: frameR = %d" % frameR);
MatchFrames.counterR = int(frameR); #0
common.DebugPrint("Alex: counterR = %d" % (MatchFrames.counterR));
retR, imgR = captureR.read();
if retR == False:
break;
if False:
grayR = common.ConvertImgToGrayscale(imgR);
MatchFrames.Main_img2(imgR, MatchFrames.counterR);
# Note: 47.2 secs (56.7 secs with Convert to RGB)
common.DebugPrint("Alex: time after Feature Extraction of all frames of " \
"video 2 and (FLANN?) matching once for each frame = %s" % \
GetCurrentDateTimeStringWithMilliseconds());
quit();
def OpenVideoCapture(videoPathFileName, videoType): # videoType = 0 --> query (input), 1 --> reference
# OpenCV can read AVIs (if no ffmpeg support installed it can't read MP4, nor 3GP, nor FLVs with MPEG compression)
# From http://answers.opencv.org/question/6/how-to-readwrite-video-with-opencv-in-python/
if False:
"""
We get the following error when trying to open .3gp or .flv.
OpenCV Error: Bad flag (parameter or structure field)
(Unrecognized or unsupported array type)
in unknown function,
file ..\..\..\src\opencv\modules\core\src\array.cpp, line 2482
"""
capture = cv2.VideoCapture("2010_06_22_16_05_29_1.3gp");
#capture = cv2.VideoCapture("1WF2gHYmuFg.flv")
"""
Unfortunately, normally cv2.VideoCapture() continues even if it does not
find videoPathFileName
"""
assert os.path.isfile(videoPathFileName);
# From http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-videocapture
capture = cv2.VideoCapture(videoPathFileName);
# Inspired from https://stackoverflow.com/questions/16703345/how-can-i-use-opencv-python-to-read-a-video-file-without-looping-mac-os
if config.OCV_OLD_PY_BINDINGS:
frameCount = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT));
else:
frameCount = int(capture.get(cv2.CAP_PROP_FRAME_COUNT));
if config.OCV_OLD_PY_BINDINGS:
capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, config.initFrame[videoType]);
else:
capture.set(cv2.CAP_PROP_POS_FRAMES, config.initFrame[videoType]);
#captureQ.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameCountQ / 2)
if config.OCV_OLD_PY_BINDINGS:
width = capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH);
height = capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT);
fps = capture.get(cv2.cv.CV_CAP_PROP_FPS);
codec = capture.get(cv2.cv.CV_CAP_PROP_FOURCC);
else:
width = capture.get(cv2.CAP_PROP_FRAME_WIDTH);
height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT);
fps = capture.get(cv2.CAP_PROP_FPS);
codec = capture.get(cv2.CAP_PROP_FPS);
assert width < 32767; # we use np.int16
assert height < 32767; # we use np.int16
"""
common.DebugPrint("Video '%s' has resolution %dx%d, %d fps and " \
"%d frames" % \
(videoPathFileName,
capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH), \
capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT), \
capture.get(cv2.cv.CV_CAP_PROP_FPS), \
frameCount));
"""
duration = frameCount / fps;
print("Video '%s' has resolution %dx%d, %.2f fps and " \
"%d frames, duration %.2f secs, codec=%s" % \
(videoPathFileName, width, height, fps, \
frameCount, duration, codec));
steps = [config.counterQStep, config.counterRStep];
usedFrameCount = frameCount / steps[videoType]; #!!!!TODO: take into account also initFrame
assert not((videoType == 0) and (usedFrameCount <= 10));
common.DebugPrint("We use video '%s', with %d frames, from which we use ONLY %d\n" % \
(videoPathFileName, frameCount, usedFrameCount));
"""
CV_CAP_PROP_FRAME_WIDTH Width of the frames in the video stream.
CV_CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream.
CV_CAP_PROP_FPS Frame rate.
"""
resolution = (width, height);
return capture, frameCount, resolution;
"""
# cv2.VideoCapture() continues even if it does not find videoPathFileNameB
assert os.path.isfile(videoPathFileNameB)
captureR = cv2.VideoCapture(videoPathFileNameB)
#print "Alex: dir(captureQ.read) = %s" % (str(dir(captureQ.read)))
#print "Alex: help(captureQ.read) = %s" % (str(help(captureQ.read)))
if config.OCV_OLD_PY_BINDINGS:
frameCountR = int(captureR.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
else:
frameCountR = int(captureR.get(cv2.CAP_PROP_FRAME_COUNT))
common.DebugPrint("Alex: frameCountR = %d" % frameCountR)
"""
"""
Note: the extension of the videoPathFileName needs to be the same as the fourcc,
otherwise, gstreamer, etc can return rather criptic error messages.
"""
def WriteVideoCapture(videoPathFileName, folderName):
# OpenCV can read only AVIs - not 3GP, nor FLVs with MPEG compression
# From http://answers.opencv.org/question/6/how-to-readwrite-video-with-opencv-in-python/
#assert os.path.isfile(videoPathFileName);
#assert False; # UNFINISHED
# From http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#videowriter-videowriter
#capture = cv2.VideoWriter(videoPathFileName); # Gives error: <<TypeError: Required argument 'fourcc' (pos 2) not found>>
# Inspired from http://stackoverflow.com/questions/14440400/creating-a-video-using-opencv-2-4-0-in-python
#writer = cv.CreateVideoWriter("out.avi", CV_FOURCC("M", "J", "P", "G"), fps, frame_size, True)
if False:
writer = cv.CreateVideoWriter("out.avi",
cv.CV_FOURCC("M", "J", "P", "G"),
fps, frameSize, True);
else:
videoWriter = None;
folderContent = os.listdir(folderName);
sortedFolderContent = sorted(folderContent);
for fileName in sortedFolderContent:
pathFileName = folderName + "/" + fileName;
if os.path.isfile(pathFileName) and \
fileName.lower().endswith("_good.png"):
common.DebugPrint("ComputeHarlocs(): Loading %s" % pathFileName);
img = cv2.imread(pathFileName);
assert img != None;
if videoWriter == None:
common.DebugPrint("img.shape = %s" % str(img.shape));
# From http://docs.opencv.org/trunk/doc/py_tutorials/py_gui/py_video_display/py_video_display.html#saving-a-video
# See also http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
# WRITES 0 BYTES IN THE VIDEO: vidFourcc = cv2.VideoWriter_fourcc('M','J','P','G');
# See also http://www.fourcc.org/codecs.php
vidFourcc = cv2.VideoWriter_fourcc(*'XVID');
videoWriter = cv2.VideoWriter(filename=videoPathFileName, \
fourcc=vidFourcc, fps=10, \
frameSize=(img.shape[1], img.shape[0]));
if not videoWriter:
common.DebugPrint("Error in creating video writer");
sys.exit(1);
#cv.WriteFrame(writer, img);
videoWriter.write(img);
videoWriter.release();
common.DebugPrint("Finished writing the video");
return;
height, width, layers = img1.shape;
video = cv2.VideoWriter("video.avi", -1, 1, (width, height));
video.write(img1);
video.release();
resolution = (width, height);
# From http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#videowriter-write
capture.write(im);
return;
if config.OCV_OLD_PY_BINDINGS:
capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, 0);
else:
capture.set(cv2.CAP_PROP_POS_FRAMES, 0);
if config.OCV_OLD_PY_BINDINGS:
frameCount = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT));
else:
frameCount = int(capture.get(cv2.CAP_PROP_FRAME_COUNT));
"""
def ReadFrame(capture, ):
SynchroEvangelidis(captureQ, captureR);
return;
# Allocate numFeaturesMatched
numFeaturesMatched = [None] * numFramesQ
for i in range(numFramesQ):
numFeaturesMatched[i] = [-2000000000] * numFramesR
while True:
if config.OCV_OLD_PY_BINDINGS:
frameQ = captureQ.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)
else:
frameQ = captureQ.get(cv2.CAP_PROP_POS_FRAMES)
common.DebugPrint("Alex: frameQ = %d" % frameQ)
counterQ = int(frameQ) #0
common.DebugPrint("Alex: counterQ = %d" % counterQ)
ret1, imgQ = captureQ.read()
if False and config.SAVE_FRAMES:
fileName = config.IMAGES_FOLDER + "/imgQ_%05d.png" % counterQ
if not os.path.exists(fileName):
#print "dir(imgQ) = %s"% str(dir(imgQ));
#imgQCV = cv.fromarray(imgQ);
#cv2.imwrite(fileName, imgQCV);
cv2.imwrite(fileName, imgQ);
#if ret1 == False: #MatchFrames.counterQ == 3:
if (ret1 == False) or (counterQ > numFramesQ):
break;
#I don't need to change to gray image if I do NOT do
# explore_match() , which requires gray to
# concatenate the 2 frames together.
if False:
#if True:
#common.ConvertImgToGrayscale(imgQ)
#gray1 = common.ConvertImgToGrayscale(imgQ)
imgQ = common.ConvertImgToGrayscale(imgQ)
ComputeFeatures1(imgQ, counterQ) #TODO: counterQ already visible in module MatchFrames
# We set the video stream captureR at the beginning
if config.OCV_OLD_PY_BINDINGS:
captureR.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, 0) #900)
else:
captureR.set(cv2.CAP_PROP_POS_FRAMES, 0) #900)
# Start time profiling for the inner loop
t1 = float(cv2.getTickCount())
#TODO: counterQ already visible in module MatchFrames
TemporalAlignment(counterQ, frameQ, captureR, \
numFramesR, numFeaturesMatched, fOutput)
# Measuring how much it takes the inner loop
t2 = float(cv2.getTickCount())
myTime = (t2 - t1) / cv2.getTickFrequency()
common.DebugPrint(
"Avg time it takes to complete a match (and to perform " \
"INITIAL Feat-Extract) = %.6f [sec]" % \
(myTime / (numFramesR / config.counterRStep)) )
counterQ += config.counterQStep
# If we try to seek to a frame out-of-bounds frame it gets to the last one
if config.OCV_OLD_PY_BINDINGS:
captureQ.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, counterQ)
else:
captureQ.set(cv2.CAP_PROP_POS_FRAMES, counterQ)
common.DebugPrint("numFeaturesMatched = %s" % str(numFeaturesMatched))
"""
def Main(videoPathFileNameQ, videoPathFileNameR):
global captureQ, frameCountQ
global captureR, frameCountR
global resVideoQ, resVideoR
if config.USE_EVANGELIDIS_ALGO == False:
if not os.path.exists(config.IMAGES_FOLDER):
os.makedirs(config.IMAGES_FOLDER);
if not os.path.exists(config.FRAME_PAIRS_FOLDER):
os.makedirs(config.FRAME_PAIRS_FOLDER);
if not os.path.exists(config.FRAME_PAIRS_MATCHES_FOLDER):
os.makedirs(config.FRAME_PAIRS_MATCHES_FOLDER);
totalT1 = float(cv2.getTickCount());
#if False:
if True:
#print "dir(cv) = %s" % str(dir(cv))
if config.OCV_OLD_PY_BINDINGS == True:
common.DebugPrint("dir(cv) = %s" % str(dir(cv)));
common.DebugPrint("dir(cv2) = %s" % str(dir(cv2)));
#print "cv2._INPUT_ARRAY_GPU_MAT = %s" % str(cv2._INPUT_ARRAY_GPU_MAT)
#sys.stdout.flush()
if config.USE_EVANGELIDIS_ALGO == False:
fOutput = open("output.txt", "w")
print >>fOutput, "Best match for frames from (input/current) video A w.r.t. reference video B:"
captureQ, frameCountQ, resVideoQ = OpenVideoCapture(videoPathFileNameQ, 0);
captureR, frameCountR, resVideoR = OpenVideoCapture(videoPathFileNameR, 1);
common.DebugPrint("Main(): frameCountQ = %d" % frameCountQ);
common.DebugPrint("Main(): frameCountR = %d" % frameCountR);
"""
In case the videos have different resolutions an error will actually take
place much longer when using Evangelidis' algorithm, in spatial
alignment, more exactly in Matlab.interp2():
File "/home/asusu/drone-diff/Backup/2014_03_25/Matlab.py", line 313, in interp2
V4 = np.c_[V[1:, 1:] * xM[:-1, :-1] * yM[:-1, :-1], nanCol1];
ValueError: operands could not be broadcast together with shapes (719,1279) (239,319)
"""
assert resVideoQ == resVideoR;
#numInliersMatched = None
if config.USE_EVANGELIDIS_ALGO == False:
SimAnneal.LIMIT = frameCountR;
SimAnneal.captureR = captureR;
#SimAnneal.lenA = frameCountR;
print("ReadVideo.Main(): time before PreMain() = %s" % \
common.GetCurrentDateTimeStringWithMilliseconds());
if config.USE_EVANGELIDIS_ALGO == False:
#MatchFrames.PreMain(nFramesQ=1000, nFramesR=1000)
MatchFrames.PreMain(nFramesQ=frameCountQ, nFramesR=frameCountR);
common.DebugPrint("Main(): time after PreMain() = %s" % \
common.GetCurrentDateTimeStringWithMilliseconds());
if False:
Benchmark();
################ Here we start the (main part of the) algorithm
# Distinguish between Alex's alignment algo and Evangelidis's algo
if config.USE_EVANGELIDIS_ALGO:
#import synchro_script
#synchro_script.SynchroEvangelidis(captureQ, captureR);
import VideoAlignmentEvangelidis
VideoAlignmentEvangelidis.AlignVideos(captureQ, captureR);
else:
MatchFrames.ProcessInputFrames(captureQ, captureR, fOutput);
if config.USE_GUI:
cv2.destroyAllWindows();
if not config.USE_EVANGELIDIS_ALGO:
fOutput.close();
captureQ.release();
captureR.release();
totalT2 = float(cv2.getTickCount());
myTime = (totalT2 - totalT1) / cv2.getTickFrequency();
#common.DebugPrint("ReadVideo.Main() took %.6f [sec]" % (myTime));
print("ReadVideo.Main() took %.6f [sec]" % (myTime));
if __name__ == '__main__':
"""
# Inspired from \OpenCV2-Python-Tutorials-master\source\py_tutorials\py_core\py_optimization
# normally returns True - relates to using the SIMD extensions of x86: SSX, AVX
common.DebugPrint("cv2.useOptimized() is %s" % str(cv2.useOptimized()));
if False:
cv2.setUseOptimized(True);
cv2.useOptimized();
"""
#Main(None, None);
#WriteVideoCapture(videoPathFileName="MIT_drive.xvid", folderName=sys.argv[1]);
WriteVideoCapture(videoPathFileName="MIT_drive.avi", folderName=sys.argv[1]);
|
UTF-8
|
Python
| false | false | 16,701 |
py
| 12 |
ReadVideo.py
| 10 | 0.630501 | 0.614993 | 0 | 465 | 34.913978 | 141 |
CoronaCircles/coronacircles-django
| 11,905,649,372,177 |
5c14329221a81b786397f5498d495138fe73a623
|
7a4761243b563e203b507facefdf3382ce63dbb5
|
/main_app/migrations/0003_auto_20200413_0115.py
|
4ba8756df00a5758425620f63c2453d37d686d3a
|
[] |
no_license
|
https://github.com/CoronaCircles/coronacircles-django
|
f8dcc8a1c0ebd027fe0c96f920b0c12c00c6070e
|
f3091f239307bd25557944e5c51ed26546e308d7
|
refs/heads/master
| 2022-04-19T03:20:21.498595 | 2020-04-20T13:25:34 | 2020-04-20T13:25:34 | 254,603,787 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 2.2.12 on 2020-04-12 23:15
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('main_app', '0002_auto_20200413_0112'),
]
operations = [
migrations.AlterField(
model_name='event',
name='creation_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='user',
name='creation_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
UTF-8
|
Python
| false | false | 629 |
py
| 16 |
0003_auto_20200413_0115.py
| 9 | 0.605723 | 0.554849 | 0 | 24 | 25.208333 | 74 |
JKChenFZ/EAS-ETL
| 16,569,983,828,473 |
1971ecf2a7d0c7bbdfefd886004a997137e58c83
|
ce29771a146cce0121a73a14033bae9e9d1fb4d1
|
/run_crontab_load.py
|
b636e8015a968b7dc4a0ebfd7da66cdb1ce24010
|
[] |
no_license
|
https://github.com/JKChenFZ/EAS-ETL
|
874c31b46e4541b062ef8cd07f13e54a3eadb8d0
|
f8aa02fc5c4a645143761a2f804a9ce77539863a
|
refs/heads/master
| 2021-06-11T05:14:38.735418 | 2018-04-14T04:25:26 | 2018-04-14T04:25:26 | 128,496,060 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from load_history.crontab import EASCrontabUtil
import datetime
if __name__ == '__main__':
print('--------------Start-----------------------------')
print(datetime.datetime.utcnow())
loader = EASCrontabUtil()
loader.run_loader()
print('--------------Done------------------------------')
|
UTF-8
|
Python
| false | false | 309 |
py
| 9 |
run_crontab_load.py
| 7 | 0.475728 | 0.475728 | 0 | 9 | 33.222222 | 61 |
ThenTech/NIIP-Labo
| 12,601,434,048,430 |
101a43c815115ad028c1e393bbacce3b1ff03581
|
ff4d3189252012640fe264a49012403820157dd8
|
/Lab3/Opdracht_1/broker/mqtt/mqtt_packet.py
|
2d1b4fc657311eb060f81628a6f50f76ad45d9aa
|
[] |
no_license
|
https://github.com/ThenTech/NIIP-Labo
|
60aaaa6a0b269949123f38d88583c12dd64f0284
|
e71b423c612f0847357976ce469668d00099e8b5
|
refs/heads/master
| 2022-08-21T15:58:31.858152 | 2020-05-28T10:09:01 | 2020-05-28T10:09:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from mqtt.bits import Bits
from mqtt.colours import *
from mqtt.mqtt_packet_types import *
from mqtt.mqtt_exceptions import *
from mqtt.mqtt_subscription import TopicSubscription
from mqtt.topic_matcher import TopicMatcher
class MQTTPacket:
PROTOCOL_NAME = b"MQTT"
def __init__(self, raw=b""):
super().__init__()
self.ptype = 0
self.pflag = 0
self.length = 0
self.packet_id = b""
self.payload = b""
if raw:
self._parse(raw)
def name(self):
return ControlPacketType.to_string(self.ptype)
def __str__(self):
attr = []
if self.packet_id:
attr.append("id={0}".format(self.packet_id))
if self.length:
attr.append("len={0}".format(self.length))
text = "<{0}{1}>" \
.format(self.name(), " " + ", ".join(attr) if attr else "")
return style(text, Colours.FG.BLUE)
@staticmethod
def _parse_type(raw):
# Extract bits, but keep MSB location: shift back to left
ptype, pflags = (Bits.get(raw[0], 4, 4) << 4), Bits.get(raw[0], 0, 4)
return ptype, pflags
@staticmethod
def _create_length_bytes(length):
if length <= 0x7F: # 127
return bytes((length,))
elif length > 268435455:
# Larger than 256Mb
raise MQTTPacketException("[MQTTPacket] Payload exceeds maximum length (256Mb)!")
len_bytes = bytearray()
while length > 0:
enc, length = length % 128, length // 128
if length:
enc |= 128
len_bytes.append(enc)
assert(len(len_bytes) <= 4)
return bytes(len_bytes)
@staticmethod
def _get_length_from_bytes(data):
length, payload_offset = 0, 0
mult = 1
if data:
while True:
enc = data[payload_offset]
length += (enc & 127) * mult
mult *= 128
payload_offset += 1
if mult > 2097152:
# More than 4 bytes parsed, error
raise MQTTPacketException("[MQTTPacket] Malformed remaining length!")
if (enc & 128) == 0:
break
return length, payload_offset
def _extract_next_field(self, length=0, length_bytes=2):
"""For parsing only"""
if not length:
blength = Bits.unpack(self.payload[0:length_bytes])
else:
blength = length
length_bytes = 0
try:
data = self.payload[length_bytes:length_bytes+blength]
self.payload = self.payload[length_bytes+blength:]
return blength, data
except:
return 0, None
def _includes_packet_identifier(self):
# PUBLISH also contains id, but after topic, so not until payload itself
return self.ptype in ControlPacketType.CHECK_HAS_PACKET_ID
def _parse(self, raw):
# Get type
self.ptype, self.pflag = self._parse_type(raw)
# If the flags are malformed, disconnect the client
if not ControlPacketType.check_flags(self.ptype, self.pflag):
raise MQTTDisconnectError("[MQTTPacket::parse] Malformed packet flags for {0}! ({1})"
.format(self.name(), self.pflag))
# Parse length
self.length, offset = self._get_length_from_bytes(raw[1:])
# Everything else is payload
self.payload = raw[offset + 1:]
if self._includes_packet_identifier():
self.packet_id = self.payload[0:2]
self.payload = self.payload[2:]
@staticmethod
def from_bytes(raw, expected_type=None):
packet_type, packet_flags = MQTTPacket._parse_type(raw)
packet_adaptor = {
ControlPacketType.CONNECT : Connect,
ControlPacketType.PUBLISH : Publish,
ControlPacketType.SUBSCRIBE : Subscribe,
ControlPacketType.UNSUBSCRIBE : Unsubscribe,
ControlPacketType.PUBACK : MQTTPacket,
ControlPacketType.PUBREC : MQTTPacket,
ControlPacketType.PUBREL : MQTTPacket,
ControlPacketType.PUBCOMP : MQTTPacket,
ControlPacketType.PINGREQ : MQTTPacket,
ControlPacketType.DISCONNECT : MQTTPacket,
}
if expected_type and packet_type != expected_type:
return None
elif packet_type not in packet_adaptor:
raise MQTTPacketException("[MQTTPacket::from_bytes] Unimplemented packet received! ({0})"
.format(ControlPacketType.to_string(packet_type)))
return packet_adaptor.get(packet_type, MQTTPacket)(raw)
@classmethod
def create(cls, ptype, pflags, payload=bytes()):
packet = cls()
if ptype not in ControlPacketType.CHECK_VALID:
raise MQTTPacketException("[MQTTPacket::create] Invalid packet type '{0}'!"
.format(ControlPacketType.to_string(ptype)))
if pflags not in ControlPacketType.Flags.CHECK_VALID:
raise MQTTPacketException("[MQTTPacket::create] Invalid packet flags '{0}' (Expected '{1}' for type {2})!"
.format(pflags,
ControlPacketType.__VALID_FLAGS.get(ptype, "?") if ptype != ControlPacketType.PUBLISH else "*",
ControlPacketType.to_string(ptype)))
packet.ptype = ptype
packet.pflag = pflags
if not isinstance(payload, bytes):
if isinstance(payload, int):
# if payload is single numeric value
payload = bytes(tuple(payload))
elif isinstance(payload, (list, tuple)):
payload = bytes(payload)
else:
raise MQTTPacketException("[MQTTPacket::create] Invalid payload?")
if ptype in ControlPacketType.CHECK_HAS_PACKET_ID and len(payload) == 2:
# Assume payload is id
packet.packet_id = payload
packet.length = len(payload)
packet.payload = payload
return packet
@staticmethod
def create_connack(session_present, status_code):
return MQTTPacket.create(ControlPacketType.CONNACK, ControlPacketType.Flags.CONNACK,
bytes((Bits.bit(0, session_present), status_code)))
@staticmethod
def create_publish(flags, packet_id, topic_name, payload):
if not isinstance(flags, ControlPacketType.PublishFlags):
raise MQTTPacketException("[MQTTPacket::create_publish] Invalid PublishFlags?")
packet = Publish()
packet.ptype = ControlPacketType.PUBLISH
packet.pflag = flags
packet.packet_id = packet_id
packet.topic = topic_name
packet.payload = payload
return packet
@staticmethod
def create_puback(packet_id):
packet_id = Bits.pad_bytes(packet_id, 2)
return MQTTPacket.create(ControlPacketType.PUBACK, ControlPacketType.Flags.PUBACK, packet_id)
@staticmethod
def create_pubrec(packet_id):
packet_id = Bits.pad_bytes(packet_id, 2)
return MQTTPacket.create(ControlPacketType.PUBREC, ControlPacketType.Flags.PUBREC, packet_id)
@staticmethod
def create_pubrel(packet_id):
packet_id = Bits.pad_bytes(packet_id, 2)
return MQTTPacket.create(ControlPacketType.PUBREL, ControlPacketType.Flags.PUBREL, packet_id)
@staticmethod
def create_pubcomp(packet_id):
packet_id = Bits.pad_bytes(packet_id, 2)
return MQTTPacket.create(ControlPacketType.PUBCOMP, ControlPacketType.Flags.PUBCOMP, packet_id)
@classmethod
def create_suback(cls, packet_id, topics_dict):
packet = cls()
packet.ptype = ControlPacketType.SUBACK
packet.pflag = ControlPacketType.Flags.SUBACK
packet.packet_id = packet_id
content = bytearray()
# [MQTT-3.8.4-2] Same Packet Identifier as the SUBSCRIBE Packet
content.extend(Bits.pad_bytes(packet_id, 2))
for sub in sorted(topics_dict.values()):
# Assume success
content.append(sub.qos if sub.qos in SUBACKReturnCode.CHECK_VALID else SUBACKReturnCode.FAILURE)
packet.payload = bytes(content)
packet.length = len(packet.payload)
return packet
@staticmethod
def create_unsuback(packet_id):
packet_id = Bits.pad_bytes(packet_id, 2)
return MQTTPacket.create(ControlPacketType.UNSUBACK, ControlPacketType.Flags.UNSUBACK, packet_id)
@staticmethod
def create_pingreq():
return MQTTPacket.create(ControlPacketType.PINGREQ, ControlPacketType.Flags.PINGREQ)
@staticmethod
def create_pingresp():
return MQTTPacket.create(ControlPacketType.PINGRESP, ControlPacketType.Flags.PINGRESP)
# TODO Other packets
def to_bin(self):
data = bytearray()
data.append(self.ptype | self.pflag)
if self.length >= 0:
# If length is given, encode it, else assume its already in payload
data.extend(self._create_length_bytes(self.length))
data.extend(self.payload)
return bytes(data)
class Connect(MQTTPacket):
class ConnectFlags:
def __init__(self, reserved=1, clean=0, will=0, will_qos=0, will_ret=0, passw=0, usr_name=0):
super().__init__()
self.reserved = reserved
self.clean = clean # If 0, store and restore the session with same client, else always create new session.
self.will = will # If 1, publish Will message on error/disconnect, else don't
self.will_qos = will_qos # If will==0 then 0, else if will==1 then qos in WillQoS.CHECK_VALID
self.will_ret = will_ret # If will==0 then 0, else if will==1 then if ret == 0: Publish Will msg as non-retained, else retained.
self.passw = passw # If usr_name==0, then 0, else if passw==1, password must be in payload, else not
self.usr_name = usr_name # If 1, user name must be in payload, else not
def byte(self):
bits = Bits.bit(7, self.usr_name) \
| Bits.bit(6, self.passw) \
| Bits.bit(5, self.will_ret) \
| Bits.bit(3, self.will_qos, 2) \
| Bits.bit(2, self.will) \
| Bits.bit(1, self.clean) \
| Bits.bit(0, self.reserved)
return bytes([bits])
def is_valid(self):
return self.reserved == 0 \
and ( (self.will == 0 and self.will_qos == 0 and self.will_ret == 0) \
or (self.will == 1 and self.will_qos in WillQoS.CHECK_VALID)) \
and ( (self.usr_name == 1) \
or (self.usr_name == 0 and self.passw == 0))
@classmethod
def from_bytes(cls, raw):
raw = Bits.to_single_byte(raw)
return cls(Bits.get(raw, 0), Bits.get(raw, 1), Bits.get(raw, 2), Bits.get(raw, 3, 2),
Bits.get(raw, 5), Bits.get(raw, 6), Bits.get(raw, 7))
def __str__(self):
flags = []
flags.append("clean" if self.clean else "keep")
flags.append("will(QoS={0}, Retain={1})".format(self.will_qos, self.will_ret) \
if self.will else "no will")
if self.usr_name:
flags.append("user")
if self.passw:
flags.append("pass")
return style("<{0}>".format(", ".join(flags)), Colours.FG.CYAN)
def __init__(self, raw=b''):
super().__init__(raw=raw)
# Connect header
self.protocol_name_length = 0
self.protocol_name = b""
self.protocol_level = 0
self.connect_flags = None
self.keep_alive_s = 0
# Payload
self.packet_id = b""
self.will_topic = b""
self.will_msg = b""
self.username = b""
self.password = b""
if raw:
self._parse_payload()
def _parse_payload(self):
# To parse the payload for the Connect packet structure, at least 11 bytes are needed (10+)
if len(self.payload) < 12:
raise MQTTDisconnectError("[MQTTPacket::Connect] Malformed packet (too short)!")
self.protocol_name_length, self.protocol_name = self._extract_next_field()
if self.protocol_name_length != 4:
raise MQTTDisconnectError("[MQTTPacket::Connect] Malformed packet, unexpected protocol length '{0}'!"
.format(self.protocol_name_length))
if self.protocol_name != MQTTPacket.PROTOCOL_NAME:
# [MQTT-3.1.2-1] Invalid protocol, disconnect
raise MQTTDisconnectError("[MQTTPacket::Connect] Invalid protocol name '{0}'!".format(self.protocol_name))
self.protocol_level = Bits.unpack(self.payload[0:1])
self.connect_flags = Connect.ConnectFlags.from_bytes(self.payload[1:2])
if not self.connect_flags.is_valid():
raise MQTTDisconnectError("[MQTTPacket::Connect] Malformed packet flags!")
# Keep alive time, max val is 0xFFFF == 18 hours, 12 minutes and 15 seconds
self.keep_alive_s = Bits.unpack(self.payload[2:4])
self.payload = self.payload[4:]
# Client ID (1...23 length, or 0 length => assign unique)
# if len == 0: assign unique and check if clean flag == 0
# if clean flag == 0: respond with CONNACK return code 0x02 (Identifier rejected) and close conn
_, self.packet_id = self._extract_next_field()
# Will topic
if self.connect_flags.will:
_, self.will_topic = self._extract_next_field()
# Will message
_, self.will_msg = self._extract_next_field()
# [MQTT-3.1.2-9] Make sure will topic and msg exist
if not self.will_topic or not self.will_msg:
raise MQTTDisconnectError("[MQTTPacket::Connect] Will flag is 1, but no topic or message in packet!")
# [MQTT-3.1.2-14] Check Will Qos is valid
if self.connect_flags.will_qos not in WillQoS.CHECK_VALID:
raise MQTTDisconnectError("[MQTTPacket::Connect] Will flag is 1 and will QoS invalid! ({0})".format(self.connect_flags.will_qos))
else:
# [MQTT-3.1.2-13], [MQTT-3.1.2-15]
if self.connect_flags.will_qos != 0:
raise MQTTDisconnectError("[MQTTPacket::Connect] Will flag is 0, but the Will QoS is not equal to 0! ({0})".format(self.connect_flags.will_qos))
if self.connect_flags.will_ret != 0:
raise MQTTDisconnectError("[MQTTPacket::Connect] Will flag is 0, but the Will Retain is not equal to 0! ({0})".format(self.connect_flags.will_ret))
# User name
if self.connect_flags.usr_name:
_, self.username = self._extract_next_field()
# [MQTT-3.1.2-19] If username not present
if not self.username:
raise MQTTDisconnectError("[MQTTPacket::Connect] Username flag is 1, but no username given!")
# Password
if self.connect_flags.passw:
_, self.password = self._extract_next_field()
# [MQTT-3.1.2-21] If password not present
if not self.password:
raise MQTTDisconnectError("[MQTTPacket::Connect] Password flag is 1, but no password given!")
else:
# [MQTT-3.1.2-20] Password flag == 0, no password in payload allowed
_, pw = self._extract_next_field()
if pw:
raise MQTTDisconnectError("[MQTTPacket::Connect] Password flag is 0, but password given!")
else:
_, un = self._extract_next_field()
# [MQTT-3.1.2-18] User flag == 0, no username in payload allowed
if un != None and self.username != b"":
raise MQTTDisconnectError("[MQTTPacket::Connect] Username given while flag was set to 0 (MQTT-3.1.2-18)")
# [MQTT-3.1.2-22] No username, means no password allowed.
if self.connect_flags.passw:
raise MQTTDisconnectError("[MQTTPacket::Connect] Username flag is 0, but password flag is set!")
def is_valid_protocol_level(self):
"""TODO If False, respond with CONNACK 0x01 : Unacceptable protocol level and disconnect."""
return self.protocol_level == 4
def to_bin(self):
# TODO implement for MQTTClient
pass
def __str__(self):
attr = []
if self.protocol_name:
attr.append("prot={0}".format(self.protocol_name))
if self.protocol_level:
attr.append("plvl={0}".format(self.protocol_level))
if self.keep_alive_s:
attr.append("KeepAlive={0}s".format(self.keep_alive_s))
if self.packet_id:
attr.append("id='{0}'".format(Bits.bytes_to_str(self.packet_id)))
if self.will_topic:
attr.append("wtop='{0}'".format(Bits.bytes_to_str(self.will_topic)))
if self.will_msg:
attr.append("wmsg={0}".format(self.will_msg))
if self.username:
attr.append("usr={0}".format(self.username))
if self.password:
attr.append("psw={0}".format(self.password))
text = style("<{0}".format(self.name()), Colours.FG.BLUE)
if self.connect_flags:
text += style(" conn=", Colours.FG.BLUE)
text += str(self.connect_flags)
if attr:
text2 = " " if not self.connect_flags else ", "
text2 += ", ".join(attr)
text += style(text2, Colours.FG.BLUE)
text += style(">", Colours.FG.BLUE)
return text
class Subscribe(MQTTPacket):
def __init__(self, raw=b''):
super().__init__(raw=raw)
self.topics = {} # { topic: TopicSubscription(order, topic, qos) }
if raw:
self._parse_payload()
def _parse_payload(self):
if len(self.payload) < 3:
# Also covers [MQTT-3.8.3-3]: At least one topic is required.
raise MQTTDisconnectError("[MQTTPacket::Subscribe] Malformed packet (too short)!")
# Already got packet_id, if there was one
subscription_order = 0
# Payload contains one or more topics followed by a QoS
while len(self.payload) > 0:
# Get topic filter
topic_len, topic = self._extract_next_field()
if topic_len < 1:
# [MQTT-4.7.3-1] Topic needs to be at least 1 byte long
raise MQTTPacketException("[MQTTPacket::Subscribe] Topic must be at least 1 character long!")
elif b"\x00" in topic:
# [MQTT-4.7.3-2] Topic cannot contain null characters
raise MQTTPacketException("[MQTTPacket::Subscribe] Topic may not contain null characters!")
# Get Requested QOS
qos_len, qos = self._extract_next_field(1)
qos = Bits.unpack(qos)
if qos not in WillQoS.CHECK_VALID:
raise MQTTDisconnectError("[MQTTPacket::Subscribe] Malformed QoS!")
# [MQTT-2.3.1-1] If qos > 0 then packet_id (!= 0) is required
if qos > 0 and (not self.packet_id or (self.packet_id and Bits.unpack(self.packet_id) == 0)):
raise MQTTPacketException("[MQTTPacket::Subscribe] Topic QoS level > 0, but no or zeroed Packet ID given!")
# WARNING The order is important, SUBACK needs to send in same order
sub = TopicSubscription(subscription_order, Bits.bytes_to_str(topic), qos)
subscription_order += 1
self.topics[sub.topic] = sub
def to_bin(self):
# TODO implement for MQTTClient
pass
def __str__(self):
attr = []
if self.packet_id:
attr.append("id={0}".format(self.packet_id))
if self.topics:
attr.append("topics=[{0}]".format(", ".join(map(str, self.topics.values()))))
return style("<{0}{1}>" \
.format(self.name(),
" " + ", ".join(attr) if attr else ""),
Colours.FG.BLUE)
class Unsubscribe(MQTTPacket):
def __init__(self, raw=b''):
super().__init__(raw=raw)
self.topics = [] # [ topics ]
if raw:
self._parse_payload()
def _parse_payload(self):
if len(self.payload) < 3:
# Also covers [MQTT-3.10.3-2]: At least one topic is required.
raise MQTTDisconnectError("[MQTTPacket::Unsubscribe] Malformed packet (too short)!")
# [MQTT-2.3.1-1] If qos > 0 then packet_id (!= 0) is required
# `self.pflag.qos > 0 and` => Cannot check flags here, no flags!
# Unsubscribe always has packet id!
if (not self.packet_id or (self.packet_id and Bits.unpack(self.packet_id) == 0)):
raise MQTTPacketException("[MQTTPacket::Unsubscribe] QoS level > 0, but no or zeroed Packet ID given!")
# Payload contains one or more topics followed by a QoS
while len(self.payload) > 0:
# Get topic filter
topic_len, topic = self._extract_next_field()
if topic_len < 1:
# [MQTT-4.7.3-1] Topic needs to be at least 1 byte long
raise MQTTPacketException("[MQTTPacket::Unsubscribe] Topic must be at least 1 character long!")
elif b"\x00" in topic:
# [MQTT-4.7.3-2] Topic cannot contain null characters
raise MQTTPacketException("[MQTTPacket::Unsubscribe] Topic may not contain null characters!")
self.topics.append(Bits.bytes_to_str(topic))
def to_bin(self):
# TODO implement for MQTTClient
pass
def __str__(self):
attr = []
if self.packet_id:
attr.append("id={0}".format(self.packet_id))
if self.topics:
attr.append("topics={0}".format(self.topics))
return style("<{0}{1}>" \
.format(self.name(),
" " + ", ".join(attr) if attr else ""),
Colours.FG.BLUE)
class Publish(MQTTPacket):
def __init__(self, raw=b''):
super().__init__(raw=raw)
self.pflag = ControlPacketType.PublishFlags.from_byte(self.pflag)
self.topic = b""
if raw:
self._parse_payload()
def _parse_payload(self):
if len(self.payload) < 4:
raise MQTTDisconnectError("[MQTTPacket::Publish] Malformed packet (too short)!")
topic_len, id_len = 0, 0
topic_len, self.topic = self._extract_next_field()
if topic_len < 1:
# [MQTT-4.7.3-1] Topic needs to be at least 1 byte long
raise MQTTPacketException("[MQTTPacket::Publish] Topic must be at least 1 character long!")
# [MQTT-3.3.2-2] Topic cannot contain wildcards
# [MQTT-4.7.3-2] Topic cannot contain null characters
top = Bits.bytes_to_str(self.topic)
if TopicMatcher.HASH in top or TopicMatcher.PLUS in top or b'\x00' in self.topic:
raise MQTTPacketException("[MQTTPacket::Publish] Topic may not contain wildcards or null characters!")
if self.pflag.qos in (WillQoS.QoS_1, WillQoS.QoS_2):
# TODO overrides client_id?
id_len, self.packet_id = self._extract_next_field(length=2)
# [MQTT-2.3.1-1] If qos > 0 then packet_id (!= 0) is required
if not self.packet_id or (self.packet_id and Bits.unpack(self.packet_id) == 0):
raise MQTTPacketException("[MQTTPacket::Publish] QoS level > 0, but no or zeroed Packet ID given!")
elif self.pflag.qos == WillQoS.QoS_0 and self.packet_id:
# [MQTT-2.3.1-5]
raise MQTTPacketException("[MQTTPacket::Publish] QoS level == 0, but Packed ID given! ({0})".format(self.packet_id))
if len(self.payload) == self.length - topic_len - id_len:
print("[MQTTPacket::Publish] Expected size = {0} vs actual = {1}"
.format(self.length - topic_len - id_len, len(self.payload)))
def to_bin(self):
data = bytearray()
data.append(self.ptype | self.pflag.to_bin())
msg = bytearray()
msg.extend(Bits.pack(len(self.topic), 2))
msg.extend(self.topic)
if self.pflag.qos in (WillQoS.QoS_1, WillQoS.QoS_2):
msg.extend(Bits.pad_bytes(self.packet_id, 2))
if self.payload:
msg.extend(self.payload)
self.length = len(msg)
data.extend(self._create_length_bytes(self.length))
data.extend(msg)
return bytes(data)
def __str__(self):
attr = []
attr.append("id={0}".format(self.packet_id or "?"))
if self.topic:
attr.append("topic='{0}'".format(Bits.bytes_to_str(self.topic)))
if self.payload:
attr.append("msg={0}".format(self.payload if len(self.payload) < 100 else \
"({0} bytes)".format(len(self.payload))))
return style("<{0}".format(self.name()), Colours.FG.BLUE) \
+ str(self.pflag) \
+ (style(" " + ", ".join(attr), Colours.FG.BLUE) if attr else "") \
+ style(">", Colours.FG.BLUE)
|
UTF-8
|
Python
| false | false | 25,624 |
py
| 76 |
mqtt_packet.py
| 46 | 0.573017 | 0.557914 | 0.000117 | 643 | 38.8507 | 163 |
woniuxiaoan/naas
| 7,988,639,215,109 |
b4b569ff29435ef72edc40e0048c7541e121332f
|
43992ff03c5401b12eaa6b2095dcbebd6aecdb84
|
/naas/agent/version.py
|
7016d823c81d69cbb67b5c0abf57d20ce03aa6e2
|
[] |
no_license
|
https://github.com/woniuxiaoan/naas
|
d8c0b85cc4c8316e383aa73655979addffd58631
|
d1f16c06c393053194a64ff87c8db1f1c7fbb50d
|
refs/heads/master
| 2016-06-12T04:41:01.438056 | 2016-04-13T05:23:59 | 2016-04-13T05:23:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""NAAS Agent Version Define."""
NAAS_AGENT_VENDOR = "nass"
NAAS_AGENT_PRODUCT = "agent"
NAAS_AGENT_VERSION = "1.0"
def version_string():
return "%s:%s:%s"\
% (NAAS_AGENT_VENDOR, NAAS_AGENT_PRODUCT, NAAS_AGENT_VERSION)
|
UTF-8
|
Python
| false | false | 233 |
py
| 98 |
version.py
| 87 | 0.652361 | 0.643777 | 0 | 9 | 24.888889 | 69 |
fin/cocoaether
| 12,034,498,380,432 |
e4668da27cf503c06d077cdba6884af0f23879b6
|
957c7f222073f262cf3cc66ace77c470325abcbb
|
/plugin/src/xcController.py
|
54159c89169001fb4a4279331ceec1be04cd04ed
|
[] |
no_license
|
https://github.com/fin/cocoaether
|
69f086042ab4eb26ce6825358009f12a59876166
|
aeb7d462e71e68d1ad61e551863b40b65666ae7c
|
refs/heads/master
| 2020-04-27T21:04:13.382431 | 2011-02-03T15:10:15 | 2011-02-03T15:10:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#
# xcController.py
# aethercocoa
#
# Created by fin on 6/21/10.
# Copyright (c) 2010 __MyCompanyName__. All rights reserved.
#
from objc import YES, NO, IBAction, IBOutlet
from Foundation import *
from AppKit import *
import Cocoa
import objc
class xcController(NSWindowController):
computers = objc.IBOutlet()
computers_view = objc.IBOutlet()
webview = objc.IBOutlet()
webViewDelegate = objc.IBOutlet()
def awakeFromNib(self):
resourcePath = NSBundle.mainBundle().resourcePath().replace('/','//').replace(' ', '%20')
self.webview.mainFrame().loadHTMLString_baseURL_(
NSString.stringWithContentsOfFile_(
NSBundle.mainBundle().pathForResource_ofType_("webview", "html")
),
NSURL.URLWithString_("file://%s/" % resourcePath)
)
print dir(self)
self.webview.setUIDelegate_(self.webViewDelegate);
self.webview.setEditingDelegate_(self.webViewDelegate)
self.webViewDelegate.setDataSource_(self.computers);
|
UTF-8
|
Python
| false | false | 1,040 |
py
| 10 |
xcController.py
| 8 | 0.663462 | 0.652885 | 0 | 32 | 31.46875 | 97 |
jirikuncar/analysis-preservation.cern.ch
| 10,445,360,468,846 |
f7a7c538051bc00a61222d68c3bf72de11ba0220
|
786f8a61774581de0d09b2e84be724d2e2fda6ff
|
/cap/modules/access/views.py
|
495121483e0d8e0286838e796652679cc94d78af
|
[] |
no_license
|
https://github.com/jirikuncar/analysis-preservation.cern.ch
|
a17e28ad73b12654fd08ba8ca763fb780a9544ca
|
50e7de692c82cfe3724da8b523fd7bdbabf13cc9
|
refs/heads/master
| 2021-01-23T00:56:31.976634 | 2016-08-25T12:18:28 | 2016-08-25T12:18:28 | 67,034,322 | 0 | 0 | null | true | 2016-08-31T12:14:08 | 2016-08-31T12:14:06 | 2016-08-31T12:14:07 | 2016-08-30T14:04:27 | 3,425 | 0 | 0 | 0 |
JavaScript
| null | null |
"""Access blueprint in order to dispatch the login request."""
from __future__ import absolute_import, print_function
from flask import Blueprint
access_blueprint = Blueprint('cap_access', __name__,
url_prefix='/access',
template_folder='templates')
|
UTF-8
|
Python
| false | false | 311 |
py
| 3 |
views.py
| 2 | 0.607717 | 0.607717 | 0 | 9 | 33.555556 | 62 |
Floweryu/HappyPython
| 1,915,555,451,408 |
e8cca0880572cb568331516a429845e6ffcdecb1
|
332b1394845226008d5dc3bae888b10b6a384edd
|
/WebSpider/VideoSpider/baotu.py
|
049647ff1a1a11687ccb7e1bd3c9b6bf49cd392c
|
[] |
no_license
|
https://github.com/Floweryu/HappyPython
|
397f748aaefe75319061582d4ae348db46cd9713
|
3807214895d166402e143c2c193b770e17f4ce1c
|
refs/heads/master
| 2022-03-25T23:51:04.451175 | 2019-12-18T07:54:54 | 2019-12-18T07:54:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding:utf-8 -*-
# _author_='Zhang JunFeng'
import requests
from lxml import etree
response = requests.get("https://ibaotu.com/shipin/")
html = etree.HTML(response.text)
title_list = html.xpath('//span[@class="video-title"]/text()')
src_list = html.xpath('//div[@class="video-play"]/video/@src')
for title, src in zip(title_list, src_list):
response = requests.get("http:" + src)
filename = "video\\" + title + ".mp4"
print("正在保存视频文件:" + filename)
with open(filename, "wb") as f:
f.write(response.content)
|
UTF-8
|
Python
| false | false | 555 |
py
| 58 |
baotu.py
| 28 | 0.649907 | 0.646182 | 0 | 16 | 32.625 | 62 |
abhisek176/py-cloudy
| 1,245,540,516,982 |
b29f80ca4539b50e8e168e3c695dd3ac30e5fab8
|
d0fff767c26e9d07ffee5b1b76070c460f1f27dd
|
/cloudy/mcmc.py
|
8efe95efb3c66fd6b684f012fce41270445fcc94
|
[] |
no_license
|
https://github.com/abhisek176/py-cloudy
|
d65f6f297f58a0c0c6e696162881fd8941812d32
|
024a57c5bcdabae671ecae8d018a43f638ae1432
|
refs/heads/main
| 2023-04-29T06:06:33.728847 | 2021-05-24T01:55:37 | 2021-05-24T01:55:37 | 370,198,449 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import astropy.table as tab
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
import emcee
import corner
#----data
def get_true_model(model_Q, Q= 18):
"""
:param model: The data where Q18 model is stored
:return: a row of ion column densities at n_H = 1e-4 cm^-2
"""
model = model_Q.split('_Q')[0] + '_Q{}.fits'.format(Q)
data = tab.Table.read(model)
true_ion_col = data [data['hden'] == 1e-4]
# print(true_ion_col)
return true_ion_col
#----model interpolation
def get_interp_func(model_Q, ions_to_use):
number_of_ions = len(ions_to_use)
model = tab.Table.read(model_Q)
sorted_model = model[ions_to_use]
hden_array = np.array(model['hden'])
model_touple = ()
for j in range(number_of_ions):
model_touple += (sorted_model[ions_to_use[j]],)
# interpolating in log log scale
logf = interp1d(np.log10(hden_array), np.log10(model_touple), fill_value='extrapolate')
return logf
#----for mcmc
def log_likelihood(theta, interp_logf, obs_ion_col, col_err, reference_log_metal = -1.0):
"""
For a gaussian distributed errors
:param theta: parameters [nH, Z]
:param x: data x
:param y: data y
:param yerr: data err
:return:
"""
lognH, logZ = theta
# get metal ion column density for n_H and Z = 0.1
col = 10 ** interp_logf(lognH)
# scale the column densities by the metallicity Z
metal_scaling_linear = 10 ** logZ / 10 ** reference_log_metal
model_col = np.log10(col * metal_scaling_linear)
lnL = -0.5 * np.sum(np.log(2 * np.pi * col_err ** 2) + (obs_ion_col - model_col) ** 2 / col_err ** 2)
return lnL
def log_prior(theta):
lognH, logZ = theta
# flat prior
if -6 < lognH < -2 and -2 < logZ < 1 :
return 0.0
return -np.inf
def log_posterior(theta, interp_func, data_col, sigma_col):
log_p = log_prior(theta) + \
log_likelihood(theta, interp_logf = interp_func, obs_ion_col = data_col, col_err = sigma_col)
return log_p
def run_mcmc(model_Q, ions_to_use, true_Q =18, figname = 'test.pdf', same_error = False):
# run_mcmc(model_Q= model, ions_to_use= ions)
# ------------------ here is a way to run code
truths = [-4, -1] # (lognH, logZ) true values
number_of_ions = len(ions_to_use)
data_col_all = get_true_model(model_Q, Q=true_Q)
# converting astropy table row to a list
data_col = []
for name in ions_to_use:
data_col.append(data_col_all[name][0])
np.random.seed(0)
if same_error:
sigma_col = 0.2 * np.ones(number_of_ions)
else:
sigma_col = np.random.uniform(0.1, 0.3, number_of_ions)
print(np.log10(data_col), sigma_col)
interp_logf = get_interp_func(model_Q, ions_to_use)
# Here we'll set up the computation. emcee combines multiple "walkers",
# each of which is its own MCMC chain. The number of trace results will
# be nwalkers * nsteps
ndim = 2 # number of parameters in the model
nwalkers = 50 # number of MCMC walkers
nsteps = 5000 # number of MCMC steps to take
# set theta near the maximum likelihood, with
n_guess = np.random.uniform(-5, -2, nwalkers)
z_guess = np.random.uniform(-2, 1, nwalkers)
starting_guesses = np.vstack((n_guess, z_guess)).T # initialise at a tiny sphere
# Here's the function call where all the work happens:
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=(interp_logf, np.log10(data_col), sigma_col))
sampler.run_mcmc(starting_guesses, nsteps, progress=True)
# find out number of steps
tau = sampler.get_autocorr_time() # number of steps needed to forget the starting position
#print(tau)
thin = int(np.mean(tau) / 2) # use this number for flattning the sample as done below
#thin = 100
flat_samples = sampler.get_chain(discard=thin * 20, thin= 5, flat=True)
# we are discarding some initial steps roughly 5 times the autocorr_time steps
# then we thin by about half the autocorrelation time steps for plotting => one does not have to do this step
labels = ['log nH', 'log Z']
uvb_q= int((model_Q.split('try_Q')[-1]).split('.fits')[0])
if uvb_q == true_Q:
fig = corner.corner(flat_samples, labels=labels, truths=truths, quantiles=[0.16, 0.5, 0.84],
show_titles=True, title_kwargs={"fontsize": 12})
else:
fig = corner.corner(flat_samples, labels=labels, quantiles=[0.16, 0.5, 0.84],
show_titles=True, title_kwargs={"fontsize": 12})
fig.savefig(figname)
for i in range(ndim):
mcmc = np.percentile(flat_samples[:, i], [16, 50, 84])
q = np.diff(mcmc)
print(labels[i], '=', mcmc[1], q[0], q[1])
return flat_samples, ndim
ions_to_use= ['C+3', 'N+3', 'Si+3', 'O+5', 'C+2']
true_Q =18
outpath = '/home/vikram/cloudy_run/figures'
outfile = outpath + '/NH14_out.fits'
uvb_array= [14, 15, 16, 17, 18, 19, 20]
out_tab = tab.Table()
for uvb_q in uvb_array:
model_Q = '/home/vikram/cloudy_run/anshuman/try_Q{}.fits'.format(uvb_q)
name = model_Q.split('/')[-2] + '_' + (model_Q.split('/')[-1]).split('.fits')[0]
figname = outpath + '/' + name + '.pdf'
flat_samples, ndim = run_mcmc(model_Q=model_Q, ions_to_use=ions_to_use, true_Q=true_Q, figname=figname)
# to efficiently save numpy array
save_file_name = outpath + '/' + name
np.save(save_file_name, flat_samples)
out =[[uvb_q]]
for i in range(ndim):
mcmc = np.percentile(flat_samples[:, i], [16, 50, 84])
q = np.diff(mcmc)
out.append([mcmc[1]])
out.append([q[0]])
out.append([q[1]])
print(out)
t = tab.Table(out, names = ('Q', 'nH', 'n16', 'n84', 'Z', 'Z16', 'Z84'))
out_tab = tab.vstack((out_tab, t))
out_tab.write(outfile, overwrite = True)
|
UTF-8
|
Python
| false | false | 5,848 |
py
| 28 |
mcmc.py
| 20 | 0.618844 | 0.593365 | 0 | 175 | 32.405714 | 117 |
Holinc19/Python-Demo
| 13,443,247,662,260 |
074be3fd8dce3deb2bb4d1c242b87ae7b549ce2c
|
b6bfa98239955e4fcbe92850be4ce4ea9802f9b6
|
/exception_demo.py
|
217124801bc40f6785bfccfba4fbea1ab48449e0
|
[
"MIT"
] |
permissive
|
https://github.com/Holinc19/Python-Demo
|
e5b99821b0133cc632c50b37ca3b755812f30d7d
|
8f771d29b4a73f9d76e2cd8bf30e7b3f9bf712dd
|
refs/heads/master
| 2020-03-26T17:41:15.811518 | 2018-08-18T00:04:11 | 2018-08-18T00:04:11 | 145,174,546 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Networkerror(RuntimeError):
def __init__(self, arg):
self.args = arg
try:
# 1 / 0
raise Networkerror("Bad hostname")
except Networkerror as err:
print("自定义异常")
print(err.args)
except Exception as e:
'''异常的父类,可以捕获所有的异常'''
print("0不能被除")
else:
'''保护不抛出异常的代码'''
print("没有异常")
finally:
print("最后总是要执行我")
|
UTF-8
|
Python
| false | false | 449 |
py
| 22 |
exception_demo.py
| 22 | 0.607843 | 0.59944 | 0 | 20 | 16.85 | 38 |
laranea/Decaf-Compiler
| 16,063,177,703,956 |
36dc41eb7a951f2370cfd3bd22786c99449e6264
|
4d7d8840908ab10a0a96b9fc445059f1e76a7ce1
|
/src/ast/Expressions/MethodInvocationExpr.py
|
a0e83a8f5a65c9c0f08ca841c424adb5f65805e9
|
[] |
no_license
|
https://github.com/laranea/Decaf-Compiler
|
8a7d53d624a1bf76ac7a650b4f86f85b83e13916
|
4766024b871a8f5c3ad476190ec4e88c90d03171
|
refs/heads/master
| 2021-05-20T12:41:28.001718 | 2017-09-24T18:17:19 | 2017-09-24T18:17:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from Expr import *
from ast.ast_helpers import *
from ast.Type import *
from ast.Class import *
import ast.Config as Config
class MethodInvocationExpr(Expr):
def __init__(self, field, args, lines):
self.lines = lines
self.base = field.base
self.mname = field.fname
self.args = args
self.method = None
self.__typeof = None
def __repr__(self):
return "Method-call({0}, {1}, {2})".format(self.base, self.mname, self.args)
def typeof(self):
if (self.__typeof == None):
# resolve the method name first
btype = self.base.typeof()
if btype.isok():
if btype.kind not in ['user', 'class_literal']:
signal_type_error("User-defined class/instance type expected, found {0}".format(str(btype)), self.lines)
self.__typeof = Type('error')
else:
if btype.kind == 'user':
# user-defined instance type:
acc = 'instance'
else:
# user-defined class type
acc = 'static'
baseclass = btype.baseclass
argtypes = [a.typeof() for a in self.args]
if (all([a.isok() for a in argtypes])):
j = resolve_method(acc, baseclass, self.mname, argtypes, Config.current_class, self.lines)
if (j == None):
self.__typeof = Type('error')
else:
self.method = j
self.__typeof = j.rtype
else:
self.__typeof = Type('error')
return self.__typeof
|
UTF-8
|
Python
| false | false | 1,816 |
py
| 39 |
MethodInvocationExpr.py
| 38 | 0.464207 | 0.462004 | 0 | 46 | 38.5 | 124 |
Gitnameisname/DLED-NACA-4
| 4,758,823,802,927 |
9aea632a54645a5273db95fdc241252bfaf76444
|
e2ad80bba88ec0ae4047a4fe8a2a13ef3929ee15
|
/NACA4_Config.py
|
31cf2ba5e06132b60dfbc938f9ab146ebb72c030
|
[] |
no_license
|
https://github.com/Gitnameisname/DLED-NACA-4
|
2bb38a97916ef4c2e4e57d7c22fb66bcab19fc48
|
4458032996c334960fa3f72154f56263f5d091f3
|
refs/heads/master
| 2022-01-07T00:28:37.991949 | 2019-06-17T13:30:23 | 2019-06-17T13:30:23 | 191,868,159 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 19 12:28:14 2017
@author: Muuky
@author: K_LAB
"""
"""
Info
=====
Generate NACA 4 digit
"""
import numpy as np
import os
import sys
import matplotlib.pyplot as plt
workdirect=os.getcwd()
codedirect=os.path.dirname(os.path.realpath(__file__))
sys.path.append(codedirect)
# C : Max. Camber
# LC : Loc. Max. Camber
# T : Max. Thickness
def NACA4(No_Point, C, LC, T,Savedirect,no_proc):
m = C/100
p= LC/10
t = T/100
name_file = "Temp Config"+str(no_proc)+".txt"
filedirect=os.path.join(Savedirect,name_file)
# If there are remain Temp Config file, remove #
if os.path.isfile(filedirect):
os.remove(filedirect)
f=open(filedirect,'w')
f.write("TempAirfoil\n")
f.close()
point = 1-(np.logspace(0.0,1.0,No_Point/2)-1)/18
point = np.append(point,(np.logspace(1.0,0.0,No_Point/2)-1)/18)
point = np.flip(np.unique(point),0)
# Write Upper side point #
for x in point:
upper = naca4upper(x,m,p,t)
f=open(filedirect,"a")
f.write("{:10.5f}{:10.5f}\n".format(upper[0], upper[1]))
f.close()
# Write Bottom side point #
point=np.flip(point,0)
for x in point:
lower = naca4lower(x,m,p,t)
f=open(filedirect,"a")
f.write("{:10.5f}{:10.5f}\n".format(lower[0], lower[1]))
f.close()
def draw_NACA4(No_Point, C, LC, T,Savedirect):
m = C/100
p= LC/10
t = T/100
filedirect=os.path.join(Savedirect,"Predicted NACA.txt")
# If there are remain Temp Config file, remove #
if os.path.isfile(filedirect):
os.remove(filedirect)
f=open(filedirect,'w')
f.write("Predicted NACA\n")
f.close()
point = 1-(np.logspace(0.0,1.0,No_Point/2)-1)/18
point = np.append(point,(np.logspace(1.0,0.0,No_Point/2)-1)/18)
point = np.flip(np.unique(point),0)
Up_point = np.zeros([0,2])
Lo_point = np.zeros([0,2])
# Write Upper side point #
for x in point:
upper = naca4upper(x,m,p,t)
f=open(filedirect,"a")
f.write("{:10.5f}{:10.5f}\n".format(upper[0], upper[1]))
f.close()
Up_point=np.append(Up_point,np.expand_dims(np.array(upper),axis=0),axis=0)
# Write Bottom side point #
point=np.flip(point,0)
for x in point:
lower = naca4lower(x,m,p,t)
f=open(filedirect,"a")
f.write("{:10.5f}{:10.5f}\n".format(lower[0], lower[1]))
f.close()
Lo_point=np.append(Lo_point,np.expand_dims(np.array(lower),axis=0),axis=0)
plt.close('all')
plt.plot(Up_point[:,0],Up_point[:,1],label='Upper')
plt.plot(Lo_point[:,0],Lo_point[:,1],label='Lower')
plt.grid(True)
plt.xlabel('x',fontsize=16)
plt.ylabel('y',fontsize=16)
plt.legend()
plt.title('Predicted Airfoil',loc='left',fontsize=20)
range_plot = 1.2
plt.xlim([-0.1,-0.1+range_plot])
plt.ylim([0.0-range_plot/2,0.0+range_plot/2])
plt.savefig(os.path.join(Savedirect,'Predicted Airfoil'))
return Lo_point
def cosine_spacing(num):
beta0 = np.linspace(0.0,1.0,num+1)
x = []
for beta in beta0:
x.append((0.5*(1.0-np.cos(beta))))
return x
def camber_line( x, m, p):
if (x>=0) & (x < p):
return (m/(p**2.))*(2.*p*x - x**2.0)
elif (x>=p) & (x<=1):
return (m/(1-p)**2)*(1 - 2.0*p + 2.0*p*x- x**2.0)
def dyc_over_dx(x, m, p):
if (x >= 0) & (x < p):
return (2.0*m/(p**2.))*(p - x)
elif (x >= p) & (x <= 1):
return (2.0*m/((1-p)**2))*(p - x)
def thickness(x, t):
term1 = 0.2969 * (np.sqrt(x))
term2 = -0.1260 * x
term3 = -0.3516 * x**2.0
term4 = 0.2843 * x**3.0
term5 = -0.1015 * x**4.0
return 5 * t * (term1 + term2 + term3 + term4 + term5)
def naca4upper(x, m, p, t):
dyc_dx = dyc_over_dx(x, m, p)
th = np.arctan(dyc_dx)
yt = thickness(x, t)
yc = camber_line(x, m, p)
xx = x - yt*np.sin(th)
yy = yc + yt*np.cos(th)
return (xx,yy)
def naca4lower(x,m,p,t,c=1):
dyc_dx = dyc_over_dx(x, m, p)
th = np.arctan(dyc_dx)
yt = thickness(x, t)
yc = camber_line(x, m, p)
xx = x + yt*np.sin(th)
yy = yc - yt*np.cos(th)
return (xx,yy)
|
UTF-8
|
Python
| false | false | 4,484 |
py
| 65 |
NACA4_Config.py
| 16 | 0.523417 | 0.475245 | 0 | 165 | 25.187879 | 82 |
martinmcbride/python-for-gcse-maths
| 489,626,314,013 |
f2de1631f07f5130274c3868b1ee0a016fc62ffc
|
c23600a1dc68f88ed57e42a09c2a683e99b3c3c0
|
/geometry/cairobase.py
|
086196e43a33d76c3fb78b24831162ee6622c02e
|
[
"MIT"
] |
permissive
|
https://github.com/martinmcbride/python-for-gcse-maths
|
097797182a1dac7c937c19426a5f1051e4da1554
|
502daf85d045036655be44864db86ba78c83f6b2
|
refs/heads/master
| 2020-04-03T09:52:34.700751 | 2016-06-10T23:23:41 | 2016-06-10T23:23:41 | 60,217,112 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
####
#
# cairobase.py
#
# Author martin.mcbride@axlesoft.com
# Copyright schoolcoders.com 2016
# MIT licence
#
####
import cairo
def save(draw, filename, width=500, height=500, fill=(1, 1, 1),
scale=25):
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, width, height)
ctx = cairo.Context (surface)
ctx.rectangle(0, 0, width, height)
ctx.set_source_rgb(*fill)
ctx.fill()
ctx.translate(width/2, height/2)
ctx.scale(scale, -scale)
xr = width/scale
yr = height/scale
ctx.set_source_rgb(.8, .8, 1)
ctx.set_line_width(.1)
for n in range(int(-xr/2), int(xr/2)):
if n:
ctx.move_to(n, -yr/2)
ctx.line_to(n, yr/2)
ctx.stroke()
for n in range(int(-yr/2), int(yr/2)):
if n:
ctx.move_to(-xr/2, n)
ctx.line_to(xr/2, n)
ctx.stroke()
ctx.set_source_rgb(.4, .4, 1)
ctx.move_to(0, -yr/2)
ctx.line_to(0, yr/2)
ctx.stroke()
ctx.move_to(-xr/2, 0)
ctx.line_to(xr/2, 0)
ctx.stroke()
ctx.set_source_rgb(0,0,0)
ctx.set_line_width(.1)
draw(ctx)
surface.write_to_png(filename)
if __name__=='__main__':
save(lambda x: 0, 'grid.png')
|
UTF-8
|
Python
| false | false | 1,278 |
py
| 3 |
cairobase.py
| 2 | 0.525822 | 0.48748 | 0 | 49 | 25.081633 | 69 |
SCARLETRAIN511/Python
| 11,141,145,209,188 |
83c3011b36ddbe70ddb7ec672515f399588b6d41
|
0de8df3875beb874e9691e798aa7c2d199461519
|
/leetcode/marshallWace.py
|
2a9a16d9cdeaadf541b61ba1ca58b6e9798ddcb1
|
[] |
no_license
|
https://github.com/SCARLETRAIN511/Python
|
695e6b7e823183e1f59a32ac4d26cfcfb23f4412
|
4d8a6e03a81322cb215e3989840549faf98fd71e
|
refs/heads/master
| 2021-06-12T14:55:21.806772 | 2021-04-13T10:06:30 | 2021-04-13T10:06:30 | 180,573,108 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def solution(s):
# write your code in Python 3.6
letters = dict()
for i in s:
if i not in letters.keys():
letters[i] = 1
else:
letters[i] += 1
deleteNum = 0;
for j in letters.keys():
if letters[j] %2 != 0:
deleteNum += 1
return deleteNum;
def solution2(A):
maxNum = 0
maxPosNag = 0
for i in A:
if i >= maxNum:
print(maxNum)
maxNum = i
for j in A:
if -maxNum == j:
maxPosNag = maxNum
return maxPosNag
def solution3(N):
listStr = list(str(N))
isNeg = 0
if N < 0:
isNeg = 1
listStr = listStr[1:]
insertIndex = 0
#see if the num is negative or not
if not isNeg:
for i in range(len(listStr)):
if int(listStr[i]) > 5:
insertIndex += 1
else:
break
listStr.insert(insertIndex,"5")
strNum = ""
for i in listStr:
strNum += i
num = int(strNum)
else:
for i in range(len(listStr)):
if int(listStr[i])<5:
insertIndex += 1
else:
break
listStr.insert(insertIndex,"5")
strNum = ""
for i in listStr:
strNum += i
num = -int(strNum)
return num
if __name__ == "__main__":
print(solution("aaxxxa"))
print(solution2([3,2,-2,5,-3]))
print(solution3(-2698))
|
UTF-8
|
Python
| false | false | 1,533 |
py
| 238 |
marshallWace.py
| 230 | 0.451402 | 0.429224 | 0 | 70 | 20.914286 | 42 |
anup5889/AnalyzingNYCDataSet
| 14,216,341,785,306 |
f6f09213d7f5af1fe287ab868d8d9f9e2ad4bfbc
|
712acf1e7c48d839a4e211a73434f1ee17f71edf
|
/TitanicDataSetExercise/SimpleHueristic.py
|
6c9a497e46ef4f4dceef11afa21e128951d93f22
|
[] |
no_license
|
https://github.com/anup5889/AnalyzingNYCDataSet
|
c5d62852556178b1559735bb20feff584b99d74b
|
5136b410e735f936fb67d54a2bbc84f9a6f13314
|
refs/heads/master
| 2021-01-21T04:35:42.545849 | 2016-06-11T22:53:56 | 2016-06-11T22:53:56 | 40,094,916 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy
import pandas
import statsmodels.api as sm
def simple_heuristic(file_path):
predictions = {}
df = pandas.read_csv(file_path)
for passenger_index, passenger in df.iterrows():
passenger_id = passenger['PassengerId']
if passenger['Sex']=='male':
predictions[passenger_id]=0
else:
predictions[passenger_id]=1
# Your code here:
# For example, let's assume that if the passenger
# is a male, then the passenger survived.
# if passenger['Sex'] == 'male':
# predictions[passenger_id] = 1
return predictions
print simple_heuristic("titanic_data (1).csv")
|
UTF-8
|
Python
| false | false | 696 |
py
| 3 |
SimpleHueristic.py
| 2 | 0.600575 | 0.594828 | 0 | 24 | 27.916667 | 57 |
kielejocain/AM_2015_06_15
| 14,817,637,203,170 |
3b65995961e3224fe4cce017bfc25b8ddb243b3f
|
17e496f9b28dfcf25608994276d4fb36aaa2de9e
|
/StudentWork/bestevez32/Week_3/Name in Ascii.py
|
39adf69af10c1f81290b58dbffe0fc19570aec48
|
[] |
no_license
|
https://github.com/kielejocain/AM_2015_06_15
|
b29a6a9be3f20c11dfe723343913936441df4270
|
0b2955582a6fdbc98db2cb39e8f4f2f0c44ba65a
|
refs/heads/master
| 2021-07-16T12:51:30.485125 | 2020-06-16T23:28:59 | 2020-06-16T23:28:59 | 37,479,388 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
s = "BJE"
for c in s:
print c
print ord(c)
for string_to_ascii in (s):
output = []
for c in s:
v = ord(c)
# opposite of chr
output.append(v)
print(bin(v))
return output
print(string_to_ascii("BJE"))
print(string_to_ascii("Brandon"))
|
UTF-8
|
Python
| false | false | 300 |
py
| 541 |
Name in Ascii.py
| 304 | 0.52 | 0.52 | 0 | 17 | 16.647059 | 33 |
Tashunya/Inspection2
| 9,216,999,867,488 |
0bd0782996ca39cf0f54d5a41ebcbf92fc1ee8ae
|
ac628ae0ec7c715197da432974a3705ef1a46ec0
|
/app/api_1_0/errors.py
|
c49ba269f23ad8f25753658866cae43868ccfc47
|
[] |
no_license
|
https://github.com/Tashunya/Inspection2
|
22c2540cc476563400bf52c558d3555efab84823
|
3a2d81bef8dfe58ef9de6757664cbb858e34f607
|
refs/heads/master
| 2023-03-05T03:15:55.211056 | 2022-04-24T16:25:52 | 2022-04-24T16:25:52 | 187,200,501 | 1 | 1 | null | false | 2023-02-15T21:52:20 | 2019-05-17T10:55:41 | 2023-01-11T14:56:15 | 2023-02-15T21:52:16 | 573 | 1 | 1 | 6 |
Python
| false | false |
"""
The module is used to handle errors for api
"""
from flask import jsonify
def forbidden(message):
"""
Returns error response if route is forbidden
:param message:
:return:
"""
response = jsonify({'error': 'forbidden', 'message': message})
response.status_code = 403
return response
def bad_request(message):
"""
Returns error response if request is incorrect
:param message:
:return:
"""
response = jsonify({'error': 'bad request', 'message': message})
response.status_code = 400
return response
def unauthorized(message):
"""
Returns error response if user is unauthorized
:param message:
:return:
"""
response = jsonify({'error': 'unauthorized', 'message': message})
response.status_code = 401
return response
|
UTF-8
|
Python
| false | false | 819 |
py
| 44 |
errors.py
| 25 | 0.644689 | 0.6337 | 0 | 38 | 20.552632 | 69 |
maths22/trawl
| 1,099,511,638,771 |
8c6119262541edc685aa39542542c68d6702032d
|
a2156e3a66e16ece72fcabc2feb4dd215080aa5c
|
/phishing/forms.py
|
b20cfa9113e53e2dfca52df7b463f3d4177cbb0d
|
[] |
no_license
|
https://github.com/maths22/trawl
|
7837fa52161433d7d44c23ddcd26bcddf5ad32fc
|
1160c6fd816175f374289738b5553707cd3b9267
|
refs/heads/master
| 2020-03-15T22:28:19.320188 | 2018-06-02T03:12:06 | 2018-06-02T03:12:06 | 132,374,126 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import datetime
import boto3
from django import forms
from .models import Submission, MTurkUser, EvaluationTask
# import the logging library
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
create_hits_in_live = False
environments = {
"live": {
"endpoint": "https://mturk-requester.us-east-1.amazonaws.com",
"preview": "https://www.mturk.com/mturk/preview",
"manage": "https://requester.mturk.com/mturk/manageHITs",
"reward": "0.00"
},
"sandbox": {
"endpoint": "https://mturk-requester-sandbox.us-east-1.amazonaws.com",
"preview": "https://workersandbox.mturk.com/mturk/preview",
"manage": "https://requestersandbox.mturk.com/mturk/manageHITs",
"reward": "0.11"
},
}
mturk_environment = environments["live"] if create_hits_in_live else environments["sandbox"]
session = boto3.Session()
client = session.client(
service_name='mturk',
region_name='us-east-1',
endpoint_url=mturk_environment['endpoint'],
)
class CreateTemplate(forms.Form):
message_template = forms.CharField()
subject = forms.CharField()
worker_id = forms.CharField()
assignment_id = forms.CharField()
def execute(self):
message_template = self.cleaned_data['message_template']
subject = self.cleaned_data['subject']
worker_id = self.cleaned_data['worker_id']
assignment_id = self.cleaned_data['assignment_id']
try:
mt_usr = MTurkUser.objects.get(pk=worker_id)
except MTurkUser.DoesNotExist:
mt_usr = MTurkUser(workerId=worker_id)
mt_usr.save()
# todo validate
s = Submission(assignmentId=assignment_id,
creator=mt_usr,
payout=False,
when_submitted=datetime.datetime.now(),
subject=subject,
text=message_template)
s.save()
objects = Submission.objects.filter(task__isnull=True)
while len(objects) >= 3:
targets = objects.all()[:3]
et = EvaluationTask()
et.save()
for target in targets:
target.task = et
target.save()
self.registerMturk(et)
objects = Submission.objects.filter(task__isnull=True)
return s
def registerMturk(self, et):
et_id = et.id
url = "https://security.maths22.com/review?task=" + str(et_id)
question = """
<ExternalQuestion xmlns="http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd">
<ExternalURL>%s</ExternalURL>
<FrameHeight>800</FrameHeight>
</ExternalQuestion>
""" % url
# Create the HIT
response = client.create_hit(
MaxAssignments=10,
LifetimeInSeconds=60000,
AssignmentDurationInSeconds=6000,
Reward=mturk_environment['reward'],
Title='Mark emails as spam or not spam',
Keywords='reading, classification',
Description='Read some emails and decide if they are spam',
Question=question,
# QualificationRequirements=worker_requirements,
)
logger.warning(response)
hit_id = response['HIT']['HITId']
et.hit_id = hit_id
et.save()
|
UTF-8
|
Python
| false | false | 3,461 |
py
| 11 |
forms.py
| 8 | 0.589425 | 0.578734 | 0 | 108 | 31.037037 | 131 |
chan3256995/vueproject
| 17,746,804,871,809 |
7e6b45b7f78c989b21882816fee6d7fe8924aa9a
|
75c3ce2153613a0ff754f51062beec325aa2bb26
|
/xiaoEdaifa/backstage/urls.py
|
0b7210faa95464c4925223189119455b2ea00e81
|
[] |
no_license
|
https://github.com/chan3256995/vueproject
|
a3c600ea2880b694a53b6f346bcb840581a7d1fc
|
681d5a943f8699750ced49b40097bb7f24c810aa
|
refs/heads/master
| 2023-02-21T04:21:01.964410 | 2023-02-10T11:14:13 | 2023-02-10T11:14:13 | 198,947,244 | 0 | 0 | null | false | 2022-12-11T20:30:08 | 2019-07-26T04:39:25 | 2022-01-08T05:58:58 | 2022-12-11T20:30:07 | 4,053 | 0 | 0 | 40 |
JavaScript
| false | false |
"""xxdaina URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib.staticfiles import views as my_view
from django.views.static import serve
from backstage import views
from backstage import trade_views
from backstage import bl_site_views
from rest_framework import routers
from xiaoEdaifa import settings
router = routers.DefaultRouter()
router.register(r'orderGoods', views.OrderGoodsViewSet, base_name='orderGoods')
router.register(r'orders', views.OrderViewSet, base_name='orders')
router.register(r'nullOrders', views.NullOrderViewSet, base_name='nullOrders')
# 优惠卡
router.register(r'discountCard', views.DiscountCardViewSet, base_name='discountCard')
router.register(r'users', views.UserViewSet, base_name='users')
# 用户认证
router.register(r'alipayAccountInfo', views.AlipayAccountInfoViewSet, base_name='alipayAccountInfo')
# 邀请注册信息
router.register(r'inviteRegInfo', views.InviteRegisterInfoViewSet, base_name='inviteRegInfo')
router.register(r'returnPackageInfo', views.ReturnPackageInfoViewSet, base_name='returnPackageInfo')
router.register(r'taskThread', views.TaskThreadViewSet, base_name='taskThread')
router.register(r'goodsRefund', views.OrderGoodsRefundViewSet, base_name='goodsRefund')
router.register(r'tradeInfo', trade_views.TradeInfoViewSet, base_name='tradeInfo')
# router.register(r'tagPrint', trade_views.TagPrintViewSet, base_name='tradeInfo')
# *************************************抖音**************************************
router.register(r'getDouYinShopForCollect', views.DouYinShopViewSet, base_name='getDouYinShopForCollect')
# *************************************抖音**************************************
#************************问题单跟单*******************************
# 问题单列表
router.register(r'troubleOrderList', trade_views.TroubleOrderView,base_name='troubleOrderList')
#************************问题单跟单*******************************
urlpatterns = [
# url(r'^static/(?P<path>.*)$', my_view.serve),
url(r'static/(?P<path>.*)', serve, {'document_root': settings.STATIC_ROOT}),
# 导出标签打印状态订单
url('outputExcel/', views.OutPutOrdersView.as_view()),
url('getZhaoYaoJingImage/', views.GetZhaoYaoJingImage.as_view()),
# 导出付款状态的订单 (同时修改状态为快递打印【tag_type字段为1】)(tag_type 默认为null , 0 为失败 ,1为进行中状态)
url('outputNullOrder/', views.OutPutNullOrderView.as_view()),
# 导出付款状态的订单 (同时修改状态为快递打印【tag_type字段为1】)下单到第三方失败要调用这个接口修改恢复订单状态为付款状态 tag_type 为0
url('outputNullOrderOtherSiteException/', views.OutputNullOrderOtherSiteExceptionView.as_view()),
# 导出付款状态的订单 (同时修改状态为快递打印【tag_type字段为1】)下单到第三方成功之后要调用这个接口修改tag_type 为0
url('outputNullOrderOtherSiteSuccess/', views.OutputNullOrderOtherSiteSuccessView.as_view()),
# (tag_type 默认为null 0 为失败 1为进行中状态)
# 导出付款状态的订单 (同时修改状态为【tag_type字段为1】)下单到第三方失败要调用这个接口修改恢复订单状态为付款状态 tag_type 为0
url('outputOrderOtherSiteException/', views.OutputOrderOtherSiteExceptionView.as_view()),
# 导出付款状态的订单 (同时修改状态为快递打印【tag_type字段为1】)下单到第三方成功之后要调用这个接口修改tag_type 为0
url('outputOrderOtherSiteSuccess/', views.OutputOrderOtherSiteSuccessView.as_view()),
# 充值审核通过
url('rechargePass/', trade_views.RechargePassView.as_view()),
# 支付宝账号真实信息审核通过
url('userAlipayAccountCheckPass/', trade_views.UserAlipayAccountCheckPassView.as_view()),
url('stopDeliverPass/', trade_views.StopDeliverPass.as_view()),
# 添加折扣卡
url('add_discount_card/', trade_views.AddDiscountCardView.as_view()),
# 打印标签请求 把商品状态改为打印标签
url('tagPrint/', trade_views.TagPrintView.as_view()),
# 采购中
url('purchaseGoods/', trade_views.PurchaseGoodsView.as_view()),
# 采购完成 /已拿货
url('purchaseGoodsComplete/', trade_views.PurchasedGoodsCompleteView.as_view()),
# 快递单打印
url('logisticsPrint/', trade_views.LogisticsPrintView.as_view()),
# 发货
url('deliverGoods/', trade_views.DeliverGoodsView.as_view()),
# 空包发货
url('deliverNullOrder/', trade_views.DeliverNullOrderView.as_view()),
# 这个接口只为315物流来源发货
url('deliverFrom315/', trade_views.DeliverFrom315View.as_view()),
# 这个接口只为BL物流来源发货
url('deliverFromBL/', trade_views.DeliverFromBLView.as_view()),
# 明天有货
url('tomorrowGoods/', trade_views.TomorrowGoodsView.as_view()),
url('autoScanYiNaHuoOrder/', trade_views.AutoScanYiNaHuoOrder.as_view()),
# 修改拿货中状态商品 统一用这个接口(如 拿货中状态 改为 明日有货 2-5天有货 已拿货 其他)
url('changePurchasingStatus/', trade_views.ChangePurchasingStatus.as_view()),
# 上传一个订单号 表示该订单下的全部商品已经拿货
url('changePurchasingStatusByOrderNumber/', trade_views.ChangeAllOrderGoodsPurchasingStatusViews.as_view()),
url('notGoods/', trade_views.NotHasGoods.as_view()),
# 明日有货 重新修改为 付款状态
url('tomorrowStatusReset/', trade_views.TomorrowStatusResetView.as_view()),
# 明日有货 重新修改为 付款状态 定时器开关
url('timeSwitch/', trade_views.TimeSwitchView.as_view()),
# 临时处理代码 比如批量修改数据库信息
url('temp/', trade_views.Temp.as_view()),
# 临时处理代码 比如批量修改数据库信息
url('temp2/', trade_views.Temp2.as_view()),
# 监听收款码付款
url('appclient/', trade_views.AppClient.as_view()),
url('addOrderToChuanMei/', trade_views.AddOrderToChuanMeiView.as_view()),
# 退包入库
url('addReturnPackages/', trade_views.AddReturnPackages.as_view()),
# 添加用户余额
url('addUserBalance/', trade_views.AddUserBalance.as_view()),
# *****************************bl********************
url('bl_tuihuotuikuan_apply/', bl_site_views.BLTuihuotuikApply.as_view()),
url('bl_get_order_info/', bl_site_views.BLGetOrderInfo.as_view()),
url('bl_get_account_record_by_order_number/', bl_site_views.BLGetAccountRecordByOrderNumber.as_view()),
# *****************************bl********************
# *************************************抖音**************************************
url('addDouYinGoods/', trade_views.SaveDouYinGoods.as_view()),
# *************************************抖音**************************************
#***************************************问题单跟单*****************************
#
url('troubleOrderAdd/', trade_views.AddTroubleOrderView.as_view()),
url('troubleOrderEdit/', trade_views.EditTroubleOrderView.as_view()),
url('troubleOrderDelete/', trade_views.DeleteTroubleOrderView.as_view()),
#***************************************问题单跟单*****************************
url(r'', include(router.urls))
]
|
UTF-8
|
Python
| false | false | 7,985 |
py
| 148 |
urls.py
| 87 | 0.667053 | 0.66198 | 0 | 138 | 48.992754 | 112 |
dmaclay/vumi
| 3,367,254,388,124 |
6b93ee2011e46651399ea1604dce3d825ec08206
|
1fb80842534d8c810610d2996a2950814810a3e0
|
/vumi/workers/smpp/client.py
|
7a2b11ef5fb2dfc569c5987dfc17d549fb7b0939
|
[
"BSD-2-Clause"
] |
permissive
|
https://github.com/dmaclay/vumi
|
e31779d92478e3ff52f7cf1fd31f803f81098ba0
|
0d44e435bd01606f6fe168e7ed4906a63bca9003
|
refs/heads/master
| 2021-01-18T09:56:07.030517 | 2011-09-19T14:36:09 | 2011-09-19T14:36:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
import json
import uuid
import redis
from twisted.python import log
from twisted.internet.protocol import Protocol, ReconnectingClientFactory
from twisted.internet.task import LoopingCall
import binascii
from smpp.pdu import unpack_pdu
from smpp.pdu_builder import (BindTransceiver,
DeliverSMResp,
SubmitSM,
SubmitMulti,
EnquireLink,
EnquireLinkResp,
QuerySM
)
from smpp.pdu_inspector import (MultipartMessage,
detect_multipart,
multipart_key
)
from vumi.utils import get_deploy_int
# TODO this will move to pdu_inspector in python-smpp
ESME_command_status_map = {
"ESME_ROK" : "No Error",
"ESME_RINVMSGLEN" : "Message Length is invalid",
"ESME_RINVCMDLEN" : "Command Length is invalid",
"ESME_RINVCMDID" : "Invalid Command ID",
"ESME_RINVBNDSTS" : "Incorrect BIND Status for given command",
"ESME_RALYBND" : "ESME Already in Bound State",
"ESME_RINVPRTFLG" : "Invalid Priority Flag",
"ESME_RINVREGDLVFLG" : "Invalid Registered Delivery Flag",
"ESME_RSYSERR" : "System Error",
"ESME_RINVSRCADR" : "Invalid Source Address",
"ESME_RINVDSTADR" : "Invalid Dest Addr",
"ESME_RINVMSGID" : "Message ID is invalid",
"ESME_RBINDFAIL" : "Bind Failed",
"ESME_RINVPASWD" : "Invalid Password",
"ESME_RINVSYSID" : "Invalid System ID",
"ESME_RCANCELFAIL" : "Cancel SM Failed",
"ESME_RREPLACEFAIL" : "Replace SM Failed",
"ESME_RMSGQFUL" : "Message Queue Full",
"ESME_RINVSERTYP" : "Invalid Service Type",
"ESME_RINVNUMDESTS" : "Invalid number of destinations",
"ESME_RINVDLNAME" : "Invalid Distribution List name",
"ESME_RINVDESTFLAG" : "Destination flag is invalid (submit_multi)",
"ESME_RINVSUBREP" : "Invalid 'submit with replace' request (i.e. submit_sm with replace_if_present_flag set)",
"ESME_RINVESMCLASS" : "Invalid esm_class field data",
"ESME_RCNTSUBDL" : "Cannot Submit to Distribution List",
"ESME_RSUBMITFAIL" : "submit_sm or submit_multi failed",
"ESME_RINVSRCTON" : "Invalid Source address TON",
"ESME_RINVSRCNPI" : "Invalid Source address NPI",
"ESME_RINVDSTTON" : "Invalid Destination address TON",
"ESME_RINVDSTNPI" : "Invalid Destination address NPI",
"ESME_RINVSYSTYP" : "Invalid system_type field",
"ESME_RINVREPFLAG" : "Invalid replace_if_present flag",
"ESME_RINVNUMMSGS" : "Invalid number of messages",
"ESME_RTHROTTLED" : "Throttling error (ESME has exceeded allowed message limits)",
"ESME_RINVSCHED" : "Invalid Scheduled Delivery Time",
"ESME_RINVEXPIRY" : "Invalid message validity period (Expiry time)",
"ESME_RINVDFTMSGID" : "Predefined Message Invalid or Not Found",
"ESME_RX_T_APPN" : "ESME Receiver Temporary App Error Code",
"ESME_RX_P_APPN" : "ESME Receiver Permanent App Error Code",
"ESME_RX_R_APPN" : "ESME Receiver Reject Message Error Code",
"ESME_RQUERYFAIL" : "query_sm request failed",
"ESME_RINVOPTPARSTREAM" : "Error in the optional part of the PDU Body.",
"ESME_ROPTPARNOTALLWD" : "Optional Parameter not allowed",
"ESME_RINVPARLEN" : "Invalid Parameter Length.",
"ESME_RMISSINGOPTPARAM" : "Expected Optional Parameter missing",
"ESME_RINVOPTPARAMVAL" : "Invalid Optional Parameter Value",
"ESME_RDELIVERYFAILURE" : "Delivery Failure (used for data_sm_resp)",
"ESME_RUNKNOWNERR" : "Unknown Error",
}
class EsmeTransceiver(Protocol):
def __init__(self, seq, config, vumi_options):
self.build_maps()
self.name = 'Proto' + str(seq)
log.msg('__init__', self.name)
self.defaults = {}
self.state = 'CLOSED'
log.msg(self.name, 'STATE :', self.state)
self.seq = seq
self.config = config
self.vumi_options = vumi_options
self.inc = int(self.config['smpp_increment'])
self.incSeq()
self.datastream = ''
self.__connect_callback = None
self.__submit_sm_resp_callback = None
self.__delivery_report_callback = None
self.__deliver_sm_callback = None
self._send_failure_callback = None
self.error_handlers = {
"ok": self.dummy_ok,
"mess_permfault": self.dummy_mess_permfault,
"mess_tempfault": self.dummy_mess_tempfault,
"conn_permfault": self.dummy_conn_permfault,
"conn_tempfault": self.dummy_conn_tempfault,
"conn_throttle": self.dummy_conn_throttle,
}
self.r_server = redis.Redis("localhost",
db=get_deploy_int(self.vumi_options['vhost']))
log.msg("Connected to Redis")
self.r_prefix = "%s@%s:%s" % (
self.config['system_id'],
self.config['host'],
self.config['port'])
log.msg("r_prefix = %s" % self.r_prefix)
# Dummy error handler functions, just log invocation
def dummy_ok(self, *args, **kwargs):
m = "%s.%s(*args=%s, **kwargs=%s)" % (
__name__,
"dummy_ok",
args,
kwargs)
#log.msg(m)
# Dummy error handler functions, just log invocation
def dummy_mess_permfault(self, *args, **kwargs):
m = "%s.%s(*args=%s, **kwargs=%s)" % (
__name__,
"dummy_mess_permfault",
args,
kwargs)
log.msg(m)
# Dummy error handler functions, just log invocation
def dummy_mess_tempfault(self, *args, **kwargs):
m = "%s.%s(*args=%s, **kwargs=%s)" % (
__name__,
"dummy_mess_tempfault",
args,
kwargs)
log.msg(m)
# Dummy error handler functions, just log invocation
def dummy_conn_permfault(self, *args, **kwargs):
m = "%s.%s(*args=%s, **kwargs=%s)" % (
__name__,
"dummy_conn_permfault",
args,
kwargs)
log.msg(m)
# Dummy error handler functions, just log invocation
def dummy_conn_tempfault(self, *args, **kwargs):
m = "%s.%s(*args=%s, **kwargs=%s)" % (
__name__,
"dummy_conn_tempfault",
args,
kwargs)
log.msg(m)
# Dummy error handler functions, just log invocation
def dummy_conn_throttle(self, *args, **kwargs):
m = "%s.%s(*args=%s, **kwargs=%s)" % (
__name__,
"dummy_conn_throttle",
args,
kwargs)
log.msg(m)
def build_maps(self):
self.ESME_command_status_dispatch_map = {
"ESME_ROK" : self.dispatch_ok,
"ESME_RINVMSGLEN" : self.dispatch_mess_permfault,
"ESME_RINVCMDLEN" : self.dispatch_mess_permfault,
"ESME_RINVCMDID" : self.dispatch_mess_permfault,
"ESME_RINVBNDSTS" : self.dispatch_conn_tempfault,
"ESME_RALYBND" : self.dispatch_conn_tempfault,
"ESME_RINVPRTFLG" : self.dispatch_mess_permfault,
"ESME_RINVREGDLVFLG" : self.dispatch_mess_permfault,
"ESME_RSYSERR" : self.dispatch_conn_permfault,
"ESME_RINVSRCADR" : self.dispatch_mess_permfault,
"ESME_RINVDSTADR" : self.dispatch_mess_permfault,
"ESME_RINVMSGID" : self.dispatch_mess_permfault,
"ESME_RBINDFAIL" : self.dispatch_conn_permfault,
"ESME_RINVPASWD" : self.dispatch_conn_permfault,
"ESME_RINVSYSID" : self.dispatch_conn_permfault,
"ESME_RCANCELFAIL" : self.dispatch_mess_permfault,
"ESME_RREPLACEFAIL" : self.dispatch_mess_permfault,
"ESME_RMSGQFUL" : self.dispatch_conn_throttle,
"ESME_RINVSERTYP" : self.dispatch_conn_permfault,
"ESME_RINVNUMDESTS" : self.dispatch_mess_permfault,
"ESME_RINVDLNAME" : self.dispatch_mess_permfault,
"ESME_RINVDESTFLAG" : self.dispatch_mess_permfault,
"ESME_RINVSUBREP" : self.dispatch_mess_permfault,
"ESME_RINVESMCLASS" : self.dispatch_mess_permfault,
"ESME_RCNTSUBDL" : self.dispatch_mess_permfault,
"ESME_RSUBMITFAIL" : self.dispatch_mess_tempfault,
"ESME_RINVSRCTON" : self.dispatch_mess_permfault,
"ESME_RINVSRCNPI" : self.dispatch_mess_permfault,
"ESME_RINVDSTTON" : self.dispatch_mess_permfault,
"ESME_RINVDSTNPI" : self.dispatch_mess_permfault,
"ESME_RINVSYSTYP" : self.dispatch_conn_permfault,
"ESME_RINVREPFLAG" : self.dispatch_mess_permfault,
"ESME_RINVNUMMSGS" : self.dispatch_mess_tempfault,
"ESME_RTHROTTLED" : self.dispatch_conn_throttle,
"ESME_RINVSCHED" : self.dispatch_mess_permfault,
"ESME_RINVEXPIRY" : self.dispatch_mess_permfault,
"ESME_RINVDFTMSGID" : self.dispatch_mess_permfault,
"ESME_RX_T_APPN" : self.dispatch_mess_tempfault,
"ESME_RX_P_APPN" : self.dispatch_mess_permfault,
"ESME_RX_R_APPN" : self.dispatch_mess_permfault,
"ESME_RQUERYFAIL" : self.dispatch_mess_permfault,
"ESME_RINVOPTPARSTREAM" : self.dispatch_mess_permfault,
"ESME_ROPTPARNOTALLWD" : self.dispatch_mess_permfault,
"ESME_RINVPARLEN" : self.dispatch_mess_permfault,
"ESME_RMISSINGOPTPARAM" : self.dispatch_mess_permfault,
"ESME_RINVOPTPARAMVAL" : self.dispatch_mess_permfault,
"ESME_RDELIVERYFAILURE" : self.dispatch_mess_tempfault,
"ESME_RUNKNOWNERR" : self.dispatch_mess_tempfault,
}
def command_status_dispatch(self, pdu):
method = self.ESME_command_status_dispatch_map.get(
pdu['header']['command_status'],
self.dispatch_ok)
handler = method()
if pdu['header']['command_status'] != "ESME_ROK":
log.msg("ERROR handler:%s pdu:%s" % (handler, pdu))
return handler
'''This maps SMPP error states to VUMI error states
For now assume VUMI understands:
connection -> temp fault or permanent fault
message -> temp fault or permanent fault
and the need to throttle the traffic on the connection
'''
def dispatch_ok(self):
return self.error_handlers.get("ok")
def dispatch_conn_permfault(self):
return self.error_handlers.get("conn_permfault")
def dispatch_mess_permfault(self):
return self.error_handlers.get("mess_permfault")
def dispatch_conn_tempfault(self):
return self.error_handlers.get("conn_tempfault")
def dispatch_mess_tempfault(self):
return self.error_handlers.get("mess_tempfault")
def dispatch_conn_throttle(self):
return self.error_handlers.get("conn_throttle")
def update_error_handlers(self, handler_dict={}):
self.error_handlers.update(handler_dict)
def getSeq(self):
return self.seq[0]
def incSeq(self):
self.seq[0] += self.inc
def popData(self):
data = None
if(len(self.datastream) >= 16):
command_length = int(binascii.b2a_hex(self.datastream[0:4]), 16)
if(len(self.datastream) >= command_length):
data = self.datastream[0:command_length]
self.datastream = self.datastream[command_length:]
return data
def handleData(self, data):
pdu = unpack_pdu(data)
log.msg('INCOMING <<<<', binascii.b2a_hex(data))
log.msg('INCOMING <<<<', pdu)
error_handler = self.command_status_dispatch(pdu)
error_handler(pdu=pdu)
if pdu['header']['command_id'] == 'bind_transceiver_resp':
self.handle_bind_transceiver_resp(pdu)
if pdu['header']['command_id'] == 'submit_sm_resp':
self.handle_submit_sm_resp(pdu)
if pdu['header']['command_id'] == 'submit_multi_resp':
self.handle_submit_multi_resp(pdu)
if pdu['header']['command_id'] == 'deliver_sm':
self.handle_deliver_sm(pdu)
if pdu['header']['command_id'] == 'enquire_link':
self.handle_enquire_link(pdu)
if pdu['header']['command_id'] == 'enquire_link_resp':
self.handle_enquire_link_resp(pdu)
log.msg(self.name, 'STATE :', self.state)
def loadDefaults(self, defaults):
self.defaults = dict(self.defaults, **defaults)
def setConnectCallback(self, connect_callback):
self.__connect_callback = connect_callback
def setSubmitSMRespCallback(self, submit_sm_resp_callback):
self.__submit_sm_resp_callback = submit_sm_resp_callback
def setDeliveryReportCallback(self, delivery_report_callback):
self.__delivery_report_callback = delivery_report_callback
def setDeliverSMCallback(self, deliver_sm_callback):
self.__deliver_sm_callback = deliver_sm_callback
def setSendFailureCallback(self, send_failure_callback):
self._send_failure_callback = send_failure_callback
def connectionMade(self):
self.state = 'OPEN'
log.msg(self.name, 'STATE :', self.state)
pdu = BindTransceiver(self.getSeq(), **self.defaults)
log.msg(pdu.get_obj())
self.incSeq()
self.sendPDU(pdu)
def connectionLost(self, *args, **kwargs):
self.state = 'CLOSED'
log.msg(self.name, 'STATE :', self.state)
try:
self.lc_enquire.stop()
del self.lc_enquire
log.msg(self.name, 'stop & del enquire link looping call')
except:
pass
#try:
#self.lc_query.stop()
#del self.lc_query
#print self.name, 'stop & del query sm looping call'
#except:
#pass
def disconnect(self):
"""
Attempt gracefull disconnect
"""
pass
def forceConnectionFailure(self):
"""
For when the tcp socket stream gets corrupted
or something equally unrecoverable
"""
pass
def dataReceived(self, data):
self.datastream += data
data = self.popData()
while data != None:
self.handleData(data)
data = self.popData()
def sendPDU(self, pdu):
data = pdu.get_bin()
log.msg('OUTGOING >>>>', unpack_pdu(data))
self.transport.write(data)
def handle_bind_transceiver_resp(self, pdu):
if pdu['header']['command_status'] == 'ESME_ROK':
self.state = 'BOUND_TRX'
self.lc_enquire = LoopingCall(self.enquire_link)
self.lc_enquire.start(55.0)
self.__connect_callback(self)
log.msg(self.name, 'STATE :', self.state)
def handle_submit_sm_resp(self, pdu):
self.pop_unacked()
message_id = pdu.get('body', {}).get(
'mandatory_parameters', {}).get('message_id')
self.__submit_sm_resp_callback(
sequence_number=pdu['header']['sequence_number'],
command_status=pdu['header']['command_status'],
command_id=pdu['header']['command_id'],
message_id=message_id)
if pdu['header']['command_status'] == 'ESME_ROK':
pass
def handle_submit_multi_resp(self, pdu):
if pdu['header']['command_status'] == 'ESME_ROK':
pass
def _decode_message(self, message, data_coding):
codec = {
1: 'ascii',
3: 'latin1',
8: 'utf-16be', # Actually UCS-2, but close enough.
}.get(data_coding, None)
if codec is None or message is None:
log.msg("WARNING: Not decoding message with data_coding=%s" % (
data_coding,))
return message
return message.decode(codec)
def handle_deliver_sm(self, pdu):
if pdu['header']['command_status'] == 'ESME_ROK':
sequence_number = pdu['header']['sequence_number']
message_id = str(uuid.uuid4())
pdu_resp = DeliverSMResp(sequence_number,
**self.defaults)
self.sendPDU(pdu_resp)
delivery_report = re.search(
# SMPP v3.4 Issue 1.2 pg. 167 is wrong on id length
'id:(?P<id>\S{,65}) +sub:(?P<sub>...)'
+ ' +dlvrd:(?P<dlvrd>...)'
+ ' +submit date:(?P<submit_date>\d*)'
+ ' +done date:(?P<done_date>\d*)'
+ ' +stat:(?P<stat>[A-Z]{7})'
+ ' +err:(?P<err>...)'
+ ' +[Tt]ext:(?P<text>.{,20})'
+ '.*',
pdu['body']['mandatory_parameters']['short_message'] or ''
)
if delivery_report:
self.__delivery_report_callback(
destination_addr=pdu['body']['mandatory_parameters']['destination_addr'],
source_addr=pdu['body']['mandatory_parameters']['source_addr'],
delivery_report=delivery_report.groupdict()
)
elif detect_multipart(pdu):
redis_key = "%s#multi_%s" % (
self.r_prefix, multipart_key(detect_multipart(pdu)))
log.msg("Redis multipart key: %s" % (redis_key))
value = json.loads(self.r_server.get(redis_key) or 'null')
log.msg("Retrieved value: %s" % (repr(value)))
multi = MultipartMessage(value)
multi.add_pdu(pdu)
completed = multi.get_completed()
if completed:
self.r_server.delete(redis_key)
log.msg("Reassembled Message: %s" % (completed['message']))
# and we can finally pass the whole message on
self.__deliver_sm_callback(
destination_addr=completed['to_msisdn'],
source_addr=completed['from_msisdn'],
short_message=completed['message'],
message_id=message_id,
)
else:
self.r_server.set(redis_key, json.dumps(multi.get_array()))
else:
pdu_mp = pdu['body']['mandatory_parameters']
decoded_msg = self._decode_message(pdu_mp['short_message'],
pdu_mp['data_coding'])
self.__deliver_sm_callback(
destination_addr=pdu_mp['destination_addr'],
source_addr=pdu_mp['source_addr'],
short_message=decoded_msg,
message_id=message_id,
)
def handle_enquire_link(self, pdu):
if pdu['header']['command_status'] == 'ESME_ROK':
sequence_number = pdu['header']['sequence_number']
pdu_resp = EnquireLinkResp(sequence_number)
self.sendPDU(pdu_resp)
def handle_enquire_link_resp(self, pdu):
if pdu['header']['command_status'] == 'ESME_ROK':
pass
def get_unacked_count(self):
return int(self.r_server.llen("%s#unacked" % self.r_prefix))
def push_unacked(self, sequence_number=-1):
self.r_server.lpush("%s#unacked" % self.r_prefix, sequence_number)
log.msg("%s#unacked pushed to: %s" % (
self.r_prefix, self.get_unacked_count()))
def pop_unacked(self):
self.r_server.lpop("%s#unacked" % self.r_prefix)
log.msg("%s#unacked popped to: %s" % (
self.r_prefix, self.get_unacked_count()))
def submit_sm(self, **kwargs):
if self.state in ['BOUND_TX', 'BOUND_TRX']:
sequence_number = self.getSeq()
pdu = SubmitSM(sequence_number, **dict(self.defaults, **kwargs))
self.incSeq()
self.sendPDU(pdu)
self.push_unacked(sequence_number)
return sequence_number
return 0
def submit_multi(self, dest_address=[], **kwargs):
if self.state in ['BOUND_TX', 'BOUND_TRX']:
sequence_number = self.getSeq()
pdu = SubmitMulti(sequence_number, **dict(self.defaults, **kwargs))
for item in dest_address:
if isinstance(item, str):
# assume strings are addresses not lists
pdu.addDestinationAddress(
item,
dest_addr_ton=self.defaults['dest_addr_ton'],
dest_addr_npi=self.defaults['dest_addr_npi'],
)
elif isinstance(item, dict):
if item.get('dest_flag') == 1:
pdu.addDestinationAddress(
item.get('destination_addr', ''),
dest_addr_ton=item.get('dest_addr_ton',
self.defaults['dest_addr_ton']),
dest_addr_npi=item.get('dest_addr_npi',
self.defaults['dest_addr_npi']),
)
elif item.get('dest_flag') == 2:
pdu.addDistributionList(item.get('dl_name'))
self.incSeq()
self.sendPDU(pdu)
return sequence_number
return 0
def enquire_link(self, **kwargs):
if self.state in ['BOUND_TX', 'BOUND_TRX']:
sequence_number = self.getSeq()
pdu = EnquireLink(sequence_number, **dict(self.defaults, **kwargs))
self.incSeq()
self.sendPDU(pdu)
return sequence_number
return 0
def query_sm(self, message_id, source_addr, **kwargs):
if self.state in ['BOUND_TX', 'BOUND_TRX']:
sequence_number = self.getSeq()
pdu = QuerySM(sequence_number,
message_id=message_id,
source_addr=source_addr,
**dict(self.defaults, **kwargs))
self.incSeq()
self.sendPDU(pdu)
return sequence_number
return 0
class EsmeTransceiverFactory(ReconnectingClientFactory):
def __init__(self, config, vumi_options):
self.config = config
self.vumi_options = vumi_options
if int(self.config['smpp_increment']) \
< int(self.config['smpp_offset']):
raise Exception("increment may not be less than offset")
if int(self.config['smpp_increment']) < 1:
raise Exception("increment may not be less than 1")
if int(self.config['smpp_offset']) < 1:
raise Exception("offset may not be less than 1")
self.esme = None
self.__connect_callback = None
self.__disconnect_callback = None
self.__submit_sm_resp_callback = None
self.__delivery_report_callback = None
self.__deliver_sm_callback = None
self.seq = [int(self.config['smpp_offset'])]
log.msg("Set sequence number: %s" % (self.seq))
self.initialDelay = 30.0
self.maxDelay = 45
self.defaults = {
'host': '127.0.0.1',
'port': 2775,
'dest_addr_ton': 0,
'dest_addr_npi': 0,
}
def loadDefaults(self, defaults):
self.defaults = dict(self.defaults, **defaults)
def setLastSequenceNumber(self, last):
self.seq = [last]
log.msg("Set sequence number: %s" % (self.seq))
def setConnectCallback(self, connect_callback):
self.__connect_callback = connect_callback
def setDisconnectCallback(self, disconnect_callback):
self.__disconnect_callback = disconnect_callback
def setSubmitSMRespCallback(self, submit_sm_resp_callback):
self.__submit_sm_resp_callback = submit_sm_resp_callback
def setDeliveryReportCallback(self, delivery_report_callback):
self.__delivery_report_callback = delivery_report_callback
def setDeliverSMCallback(self, deliver_sm_callback):
self.__deliver_sm_callback = deliver_sm_callback
def setSendFailureCallback(self, send_failure_callback):
self._send_failure_callback = send_failure_callback
def startedConnecting(self, connector):
print 'Started to connect.'
def buildProtocol(self, addr):
print 'Connected'
self.esme = EsmeTransceiver(self.seq, self.config, self.vumi_options)
self.esme.loadDefaults(self.defaults)
self.esme.setConnectCallback(
connect_callback=self.__connect_callback)
self.esme.setSubmitSMRespCallback(
submit_sm_resp_callback=self.__submit_sm_resp_callback)
self.esme.setDeliveryReportCallback(
delivery_report_callback=self.__delivery_report_callback)
self.esme.setDeliverSMCallback(
deliver_sm_callback=self.__deliver_sm_callback)
self.resetDelay()
return self.esme
def clientConnectionLost(self, connector, reason):
print 'Lost connection. Reason:', reason
self.__disconnect_callback()
ReconnectingClientFactory.clientConnectionLost(
self, connector, reason)
def clientConnectionFailed(self, connector, reason):
print 'Connection failed. Reason:', reason
ReconnectingClientFactory.clientConnectionFailed(
self, connector, reason)
|
UTF-8
|
Python
| false | false | 26,369 |
py
| 74 |
client.py
| 57 | 0.552391 | 0.55004 | 0 | 638 | 40.330721 | 120 |
carsonk/betrayal-utils
| 2,448,131,367,199 |
dcb3f927e6e57d9d5a9bcbe6f598a622d29ff198
|
4799f7af76c51c7cd081f0ee73d4e4c47306a73b
|
/game_tracker/migrations/0003_auto_20151229_2229.py
|
ac387e0234281dc04efd38d1da15109a42782a27
|
[] |
no_license
|
https://github.com/carsonk/betrayal-utils
|
02983c290255f290a925397eaa2f20f183db8747
|
61018c29d874b54863bf0f7829e74cb9b847a867
|
refs/heads/master
| 2021-01-10T13:30:39.581594 | 2015-12-31T03:20:54 | 2015-12-31T03:20:54 | 48,719,284 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-30 03:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game_tracker', '0002_character_name'),
]
operations = [
migrations.RenameField(
model_name='character',
old_name='knowledge',
new_name='knowledge_index',
),
migrations.RenameField(
model_name='character',
old_name='might',
new_name='might_index',
),
migrations.RenameField(
model_name='character',
old_name='sanity',
new_name='sanity_index',
),
migrations.RenameField(
model_name='character',
old_name='speed',
new_name='speed_index',
),
migrations.AddField(
model_name='character',
name='knowledge_options',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='character',
name='might_options',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='character',
name='sanity_options',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='character',
name='speed_options',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
]
|
UTF-8
|
Python
| false | false | 1,731 |
py
| 11 |
0003_auto_20151229_2229.py
| 7 | 0.53264 | 0.514731 | 0 | 59 | 28.338983 | 63 |
charles-freitas/2018.2-ProgComp
| 773,094,160,284 |
f25ba493a65667dd248b5a492541679b971e04be
|
bcbaec1422a84aebdd1c307114b43c33fb12fa1a
|
/20181018 - Cartela Bingo/cartela_bingo_v2.py
|
f19aed56989ea7a0ec9386118cf5b9d6e88338e6
|
[] |
no_license
|
https://github.com/charles-freitas/2018.2-ProgComp
|
80a5581745840c153783632c053b3f550d3e4751
|
b8a5f509522a572d243ac5976aac2e3b42145fb9
|
refs/heads/master
| 2018-11-02T08:45:30.432189 | 2018-11-02T00:09:41 | 2018-11-02T00:09:41 | 145,893,635 | 0 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
lista_B = []
lista_I = []
lista_N = []
lista_G = []
lista_O = []
for contador in range(1, 6):
# Gerando um número inteiro aleatório entre 1 e 15
numero_B = random.randint(1,15)
# Adicionando o número gerado na lista se ele não já estiver nela
if numero_B not in lista_B: lista_B.append(numero_B)
numero_I = random.randint(16,30)
# Adicionando o número gerado na lista se ele não já estiver nela
if numero_I not in lista_I: lista_I.append(numero_I)
# Gerando um número inteiro aleatório entre 31 e 45
numero_N = random.randint(31,45)
# Adicionando o número gerado na lista se ele não já estiver nela
if numero_N not in lista_N: lista_N.append(numero_N)
# Gerando um número inteiro aleatório entre 46 e 60
numero_G = random.randint(46,60)
# Adicionando o número gerado na lista se ele não já estiver nela
if numero_G not in lista_G: lista_G.append(numero_G)
# Gerando um número inteiro aleatório entre 61 e 75
numero_O = random.randint(61,75)
# Adicionando o número gerado na lista se ele não já estiver nela
if numero_O not in lista_O: lista_O.append(numero_O)
# Imprimindo a lista_B
print(lista_B)
# Imprimindo a lista_I
print(lista_I)
# Imprimindo a lista_N
print(lista_N)
# Imprimindo a lista_G
print(lista_G)
# Imprimindo a lista_O
print(lista_O)
|
UTF-8
|
Python
| false | false | 1,349 |
py
| 58 |
cartela_bingo_v2.py
| 56 | 0.708145 | 0.680995 | 0 | 40 | 32.125 | 68 |
briandrawert/stochss
| 11,338,713,688,004 |
b288a8e251ef8a7efd66296f851aabbb111517f7
|
5d1655135be351c42cd0f856fe94a410296a61ec
|
/app/backend/bin/sccpy.py
|
3c8ea3217b7a3c366f2f394c057ca12ed9f56308
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/briandrawert/stochss
|
5570cfcbd4614a17f5ffd72d1c285d6f61c2741a
|
61cebc8cc4c5d00225845c60442906cf7a0bc7e1
|
refs/heads/master
| 2021-01-16T19:36:10.527465 | 2018-04-26T14:50:20 | 2018-04-26T14:50:20 | 12,758,360 | 0 | 0 |
NOASSERTION
| true | 2018-09-28T14:58:01 | 2013-09-11T14:26:25 | 2018-04-26T14:51:04 | 2018-09-28T14:58:01 | 347,007 | 0 | 0 | 0 |
Python
| false | null |
#!/usr/bin/env python
import sys
import logging
import os
import argparse
import boto
import boto.s3
from boto.s3.lifecycle import Lifecycle, Expiration
def get_scp_command(user, ip, keyfile, target, source):
return 'scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {keyfile} {source} {user}@{ip}:{target}'.format(
keyfile=keyfile, user=user, ip=ip,
source=source, target=target)
def get_arg_parser():
parser = argparse.ArgumentParser(description="SCCPY : Secure Copy Tool\
Tool for uploading job output tar to Amazon S3 (for EC2 agent) or\
scp-ing to queue head node (for Flex Agent)")
parser.add_argument('-f', '--file', help="File to upload", action="store", dest="filename")
parser.add_argument('--ec2', nargs=1, metavar=('BUCKET_NAME'),
help='Upload to Amazon S3', action='store', dest='ec2_config')
parser.add_argument('--flex', nargs=3, metavar=('QUEUE_HEAD_IP', 'QUEUE_HEAD_USERNAME', 'QUEUE_HEAD_KEYFILE'),
help='Upload to Flex Cloud Queue Head', action='store', dest='flex_config')
return parser
class StorageAgent(object):
def upload_file(self, filename):
raise NotImplementedError
class AmazonS3Agent(StorageAgent):
def __init__(self, bucket_name):
self.bucket_name = bucket_name
def upload_file(self, filename):
try:
lifecycle = Lifecycle()
lifecycle.add_rule('rulename', prefix='logs/', status='Enabled',
expiration=Expiration(days=10))
conn = boto.connect_s3()
if conn.lookup(self.bucket_name): # bucket exisits
bucket = conn.get_bucket(self.bucket_name)
else:
# create a bucket
bucket = conn.create_bucket(self.bucket_name, location=boto.s3.connection.Location.DEFAULT)
bucket.configure_lifecycle(lifecycle)
from boto.s3.key import Key
k = Key(bucket)
k.key = filename
k.set_contents_from_filename(filename, cb=self.percent_cb, num_cb=10)
k.set_acl('public-read-write')
except Exception, e:
sys.stdout.write("AmazonS3Agent failed with exception:\n{0}".format(str(e)))
sys.stdout.flush()
raise e
def percent_cb(self, complete, total):
sys.stdout.write('.')
sys.stdout.flush()
class FlexStorageAgent(StorageAgent):
OUTPUT_DIR = '~/stochss/app/backend/tmp/flex/output/'
def __init__(self, queue_head_ip, queue_head_username, queue_head_keyfile):
self.queue_head_ip = queue_head_ip
self.queue_head_username = queue_head_username
self.queue_head_keyfile = queue_head_keyfile
def upload_file(self, filename):
try:
scp_command = get_scp_command(user=self.queue_head_username, ip=self.queue_head_ip,
keyfile=self.queue_head_keyfile,
target=self.OUTPUT_DIR, source=filename)
sys.stdout.write(scp_command)
sys.stdout.flush()
if os.system(scp_command) != 0:
raise Exception('FlexStorageAgent: scp failed')
except Exception, e:
sys.stdout.write("FlexStorageAgent failed with exception:\n{0}".format(str(e)))
sys.stdout.flush()
raise e
if __name__ == '__main__':
parser = get_arg_parser()
parsed_args = parser.parse_args(sys.argv[1:])
if parsed_args.filename == None or not os.path.exists(parsed_args.filename):
raise Exception('Please pass valid filename existing locally!')
if parsed_args.ec2_config != None:
if len(parsed_args.ec2_config) != 1:
raise Exception('Need 1 argument for --ec2 option.')
s3_bucket_name = parsed_args.ec2_config[0]
a = AmazonS3Agent(bucket_name=s3_bucket_name)
a.upload_file(filename=parsed_args.filename)
elif parsed_args.flex_config != None:
if len(parsed_args.flex_config) != 3:
raise Exception('Need 3 arguments for --flex option.')
queue_head_ip = parsed_args.flex_config[0]
queue_head_username = parsed_args.flex_config[1]
queue_head_keyfile = parsed_args.flex_config[2]
f = FlexStorageAgent(queue_head_ip=queue_head_ip,
queue_head_keyfile=queue_head_keyfile, queue_head_username=queue_head_username)
f.upload_file(filename=parsed_args.filename)
else:
raise Exception('Invalid option chosen!')
|
UTF-8
|
Python
| false | false | 4,679 |
py
| 260 |
sccpy.py
| 178 | 0.60483 | 0.596922 | 0 | 124 | 36.725806 | 127 |
pinieco23/energiaEvoluciona
| 3,556,232,950,842 |
97511df42be334771897a2e3317650b9b2b47040
|
ae267c6177190ba25e737a966dbc0a48caf0b628
|
/energias/admin.py
|
ba1ec34900119ec28f43224f6400437124df7788
|
[] |
no_license
|
https://github.com/pinieco23/energiaEvoluciona
|
eb56aa70a0d52b3b4b36cb93f952a647ce7b51fb
|
cd3a9622cfa3cfa0900235a2372772b223c43386
|
refs/heads/master
| 2023-04-07T12:25:56.529014 | 2019-02-15T20:04:07 | 2019-02-15T20:04:07 | 355,335,372 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from energias.models import contenedor_1, contenedor_2, contenedor_3,imagenes_c3, experto, redes, contenedor_5, comision, fuentes
admin.site.register(contenedor_1)
admin.site.register(contenedor_2)
admin.site.register(contenedor_3)
admin.site.register(imagenes_c3)
admin.site.register(experto)
admin.site.register(contenedor_5)
admin.site.register(redes)
admin.site.register(comision)
admin.site.register(fuentes)
|
UTF-8
|
Python
| false | false | 449 |
py
| 18 |
admin.py
| 12 | 0.817372 | 0.7951 | 0 | 14 | 31.142857 | 129 |
swastishreya/HelloFriend
| 3,521,873,216,327 |
8bb06f8aa4f9d8cde68240d9d308cd5a38d4fe53
|
13eea0f00071355d93806d359e2b6a0270774802
|
/backend/hello_friend_db_api/tests/__init__.py
|
8b7d702d42a230f69cdac31667760b6cab82830e
|
[] |
no_license
|
https://github.com/swastishreya/HelloFriend
|
8ddf1ffafe868916cb0e87458b1125b6099710c9
|
16064de5f0615a279b58c1a547cdf10ec7a91c67
|
refs/heads/main
| 2023-04-30T06:18:06.989752 | 2021-05-19T15:04:43 | 2021-05-19T15:04:43 | 359,912,927 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from hello_friend_db_api.tests.model_test import *
from hello_friend_db_api.tests.view_test import *
|
UTF-8
|
Python
| false | false | 100 |
py
| 19 |
__init__.py
| 13 | 0.79 | 0.79 | 0 | 2 | 49.5 | 50 |
ntnu-ai-lab/HUNT4-HAR
| 6,305,012,029,900 |
9733935d31ead85278e179aebe98d432aafdb11b
|
f31b8b5a0777c25f55d9156b44043b821f225728
|
/HAR_PostProcessing/playground/weekdays.py
|
c95f43349737c18532f56002b7e563af0a929bcb
|
[] |
no_license
|
https://github.com/ntnu-ai-lab/HUNT4-HAR
|
49805508aa312ab2f079b6cdabafa4d8085fc680
|
9bd72ebe21b1b6234297536981a5d737962ff470
|
refs/heads/master
| 2023-03-20T04:57:04.879176 | 2019-09-27T07:46:38 | 2019-09-27T07:46:38 | 136,932,423 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pandas as pd
import Dictinaries
data_raw = pd.read_csv("../output/1176_summary.csv", parse_dates=[0])
data = data_raw[['date','lying']]
data = data.set_index('date')
data = data.divide(60 * 60)
x_lab_data = data
x_lab_data['weekday'] = data.index.weekday
x_lab_data['datestr'] = data.index.strftime('%d.%m.%Y')
x_lab_data['final'] = x_lab_data.weekday.map(Dictinaries.weekdays_norsk)
x_lab_data['final'] = x_lab_data['datestr'].map(str) + " " + x_lab_data['final']
print(x_lab_data['final'])
|
UTF-8
|
Python
| false | false | 571 |
py
| 68 |
weekdays.py
| 63 | 0.660245 | 0.642732 | 0 | 22 | 25 | 80 |
ursgal/ursgal
| 12,146,167,518,942 |
741639dffa43ed790f6d15b93308a21a5737555b
|
5fe963f6ecd0db1e7800c78d29737f4bab92ea8c
|
/ursgal/resources/platform_independent/arc_independent/get_ftp_files_1_0_0/get_ftp_files_1_0_0.py
|
b16e3dade0fd85eeae5193c6e8bb0ed787f5a662
|
[
"MIT",
"LGPL-3.0-only"
] |
permissive
|
https://github.com/ursgal/ursgal
|
746c30d7eeb7e83dc520adc9b47c74b6ee1f5503
|
25ed2fc75cbb4bd6656aa11df95023cb6acd1929
|
refs/heads/dev
| 2022-10-13T06:28:02.572027 | 2022-07-08T12:57:39 | 2022-07-08T12:57:39 | 44,454,082 | 39 | 54 |
MIT
| false | 2023-09-04T14:04:38 | 2015-10-17T20:49:38 | 2023-07-30T06:35:21 | 2023-09-04T14:04:38 | 13,693 | 41 | 31 | 4 |
Python
| false | false |
#!/usr/bin/env python
# encoding: utf-8
"""
Retrieve data from ftp server
usage:
get_ftp_files_1_0_0.py <ftp_address> <login> <password> <filter_entension>
"""
# import glob
import ftplib
from ftplib import FTP
import os
import tempfile
def main(
ftp_url=None,
folder=None,
login=None,
password=None,
include_ext=None,
output_folder=None,
max_number_of_files=None,
blocksize=None,
):
# retrieve files via ftp
assert ftp_url is not None, "[ -<FTP>-- ] Require ftp_url not None to run ;)"
print(
"[ -<FTP>-- ] Downloading files from {0}, this can take a while...".format(
ftp_url
)
)
if include_ext is None:
include_ext = set()
# statinfo = os.stat( target_path )
# 'size' : statinfo.st_size
ftp = FTP(ftp_url.replace("ftp://", ""))
ftp.login(
user=login,
passwd=password,
)
if folder is not None:
ftp.cwd("/" + folder + "/")
# does not hurt, just to be sure ...
if output_folder is None:
output_folder = tempfile.gettempdir()
downloaded_files = []
def download_file(source, target, file_size):
print(
"[ -<FTP>-- ] Downloading: {0} into {1} with file size of {2:1.1f} MB".format(
source,
target,
file_size / 1e6,
)
)
with open(target, "wb") as io:
ftp.retrbinary("RETR " + source, io.write, blocksize=1024)
return
def walk_deeper(folder=None, output_root=None, downloaded_files=None):
if folder is None:
folder = ""
if downloaded_files is None:
downloaded_files = []
for file_or_directory in ftp.nlst(folder):
# print( file_or_directory )
try:
ftp_size = ftp.size(file_or_directory)
is_file = True
# this raises exeption ftplib.error_perm on peptideatlas.org
except ftplib.error_perm:
is_file = False
walk_deeper(file_or_directory, output_root=output_root)
if is_file:
allowed_file = False
for extension in include_ext:
if file_or_directory.upper().endswith(extension.upper()):
allowed_file = True
break
# for extension in exclude_ext:
# if file_or_directory.upper().endswith(extension.upper()):
# allowed_file = False
# this DOES not work ... :)
if allowed_file:
dirname = os.path.dirname(file_or_directory)
file_path_on_host = os.path.join(output_root, file_or_directory)
folder_path_on_host = os.path.join(output_root, dirname)
if os.path.exists(file_path_on_host):
if ftp_size != os.stat(file_path_on_host).st_size:
print(
"[ -<FTP>-- ] Downloading again: {0} because download was incomplete!".format(
file_path_on_host
)
)
download_file(
file_or_directory, file_path_on_host, ftp_size
)
else:
print(
"[ -<FTP>-- ] File: {0} already downloaded!".format(
file_path_on_host
)
)
downloaded_files.append(file_path_on_host)
else:
if os.path.exists(folder_path_on_host) is False:
print(
"[ -<FTP>-- ] Created directory: {0}".format(
folder_path_on_host
)
)
os.makedirs(folder_path_on_host)
download_file(file_or_directory, file_path_on_host, ftp_size)
downloaded_files.append(file_path_on_host)
return downloaded_files
downloaded_files = walk_deeper(
output_root=output_folder, downloaded_files=downloaded_files
)
ftp.quit()
return downloaded_files
if __name__ == "__main__":
main(
ftp_url="ftp.pride.ebi.ac.uk",
folder="/pride/data/archive/2013/08/PXD000278",
include_ext=[".txt"],
output_folder="/tmp",
# max_number_of_files = 1,
blocksize=None,
)
|
UTF-8
|
Python
| false | false | 4,743 |
py
| 288 |
get_ftp_files_1_0_0.py
| 216 | 0.473329 | 0.466582 | 0 | 145 | 31.710345 | 110 |
bowdenk7/yago
| 1,322,849,936,898 |
13a7ae241b4fc407a88ed7a0a42c4c1c28b1e178
|
68029c4d4282ea55a14280ab33a32de31a3ed6d9
|
/feed/admin.py
|
c7420597399aef2a6369ba8f9132b272042b2ed2
|
[] |
no_license
|
https://github.com/bowdenk7/yago
|
7c020b589a57864e5c2caaa526178444ccce1eea
|
40df55022b16d5eae12f2e888a904aecab6dfd43
|
refs/heads/master
| 2020-12-25T14:08:24.341516 | 2015-04-17T00:40:12 | 2015-04-17T00:40:12 | 30,390,747 | 5 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from feed.models import VenueClassification, Venue, District
admin.site.register(VenueClassification)
admin.site.register(Venue)
admin.site.register(District)
|
UTF-8
|
Python
| false | false | 195 |
py
| 32 |
admin.py
| 28 | 0.835897 | 0.835897 | 0 | 9 | 20.777778 | 60 |
xmxoxo/Text-Opinion-Mining
| 17,325,898,107,360 |
e705383c046951987ce2fed82a18ab21191fe2f5
|
462232447fc046828a26dbc8a2225835bc812c1e
|
/modelscore.py
|
10961847e252039a0d5993267d0ee7a4550ae888
|
[] |
no_license
|
https://github.com/xmxoxo/Text-Opinion-Mining
|
74e6bef618a8c55fa20a114f8b68b426269e984f
|
3b6b2a14070eb3cf9446260f87d21a32ef5ed185
|
refs/heads/master
| 2023-02-22T22:58:04.496770 | 2021-01-29T01:52:50 | 2021-01-29T01:52:50 | 204,389,839 | 40 | 18 | null | true | 2019-08-26T03:38:39 | 2019-08-26T03:38:39 | 2019-08-23T08:42:42 | 2019-08-23T08:42:41 | 0 | 0 | 0 | 0 | null | false | false |
#!/usr/bin/env python3
#coding:utf-8
# update : 2019/8/30 8:31
# version: 0.2.0
__author__ = 'xmxoxo<xmxoxo@qq.com>'
'''
模型评价工具 modelscore.py
电商评论观点挖掘 比赛 https://zhejianglab.aliyun.com/entrance/231731/introduction
四、评分标准
1、相同ID内逐一匹配各四元组,若AspectTerm,OpinionTerm,Category,Polarity四个字段均正确,则该四元组正确;
2、预测的四元组总个数记为P;真实标注的四元组总个数记为G;正确的四元组个数记为S:
(1)精确率: Precision=S/P
(2)召回率: Recall=S/G
(3)F值:F1-score=(2*Precision*Recall)/(Precision+Recall)
命令行格式:
python modelscore.py -h
python modelscore.py 原始数据文件 预测结果文件
参数说明:
原始数据文件: 原始数据文件,默认为 ./TRAIN/Train_labels.csv
预测结果文件:模型预测输出的结果文件,默认值为 ./output/Result.csv
快速进行评价: python modelscore.py
指定文件评价: python modelscore.py ./data/labels.csv ./output1/Result.csv
'''
import os
import sys
import pandas as pd
import argparse
#计算得分
def getscore (lstS,lstP):
y_test = list(lstS)
classs_predictions = list(lstP)
ret = ""
#预测的四元组总个数记为P
P = len(classs_predictions)
#真实标注的四元组总个数记为G;
G = len(y_test)
#1、相同ID内逐一匹配各四元组,若AspectTerm,OpinionTerm,Category,Polarity四个字段均正确,则该四元组正确;
#正确的四元组个数记为S:
S = 0
setRet = set()
for x in classs_predictions:
if x in y_test:
#for x in y_test:
# if x in classs_predictions:
setRet.add(x)
S += 1
S1 = len(setRet)
ret += '唯一正确:%d, 正确个数:%d\n' % (S1,S)
S = S1
#print('P:%d G:%d S:%d' % (P,G,S) )
ret += 'P:%d G:%d S:%d\n' % (P,G,S)
if P == 0:
Precision,Recall,f1_score = 0,0,0
else:
#(1)精确率: Precision=S/P
Precision = S/P
#(2)召回率: Recall=S/G
Recall = S/G
#(3)F值:F1-score=(2*Precision*Recall)/(Precision+Recall)
f1_score = (2*Precision*Recall)/(Precision+Recall)
ret += "精确率: %2.3f\n" % ( Precision)
ret += "召回率: %2.3f\n" % ( Recall)
ret += "F1得分: %2.3f\n" % ( f1_score)
return ret
#-----------------------------------------
#主方法 模型评估
def modelscore (args):
sorucefile = args.soruce
predictfile = args.result
if not os.path.isfile(sorucefile):
print('未找到原始数据文件:%s' % sorucefile )
sys.exit()
if not os.path.isfile(predictfile):
print('未找到预测结果文件:%s' % predictfile )
sys.exit()
#读入数据
df_source = pd.read_csv(sorucefile)
df_predict = pd.read_csv(predictfile,header = None)
#字段处理
#id,AspectTerms,A_start,A_end,OpinionTerms,O_start,O_end,Categories,Polarities
lstColumns = ['id','AspectTerms','OpinionTerms','Categories','Polarities']
df_source = df_source[lstColumns]
df_predict.columns = lstColumns
#2019/8/30 todo: 加入数据分析
#把各字段文本连接起来
df_source['txt'] = df_source['id'].astype(str) + df_source['AspectTerms'] + \
df_source['OpinionTerms']+df_source['Categories']+df_source['Polarities']
df_predict['txt'] = df_predict['id'].astype(str) + df_predict['AspectTerms'] + \
df_predict['OpinionTerms']+df_predict['Categories']+df_predict['Polarities']
#把各字段文本连接起来
df_source['txt1'] = df_source['id'].astype(str) + \
df_source['AspectTerms'] + df_source['OpinionTerms']
df_predict['txt1'] = df_predict['id'].astype(str) + \
df_predict['AspectTerms'] + df_predict['OpinionTerms']
print('数据记录情况'.center(30,'=')) # + '\n'
print(df_source.head(10))
print('-'*30)
print(df_predict.head(10))
print('-'*30)
ret = ""
ret += '抽取模型评估得分'.center(30,'=') + '\n'
ret += getscore (df_source['txt1'],df_predict['txt1'] )
ret += '完整模型评估得分'.center(30,'=') + '\n'
ret += getscore (df_source['txt'],df_predict['txt'] )
print(ret)
print('-'*30)
#命令行解析
def main_cli ():
pass
parser = argparse.ArgumentParser(description='“电商评论观点挖掘”比赛模型评价,计算模型各项得分')
parser.add_argument('-soruce', type=str, default="./TRAIN/Train_labels.csv",
help='原始数据文件,默认为 ./TRAIN/Train_labels.csv')
parser.add_argument('-result', type=str, default='./output/Result.csv',
help='预测结果文件,默认为 ./output/Result.csv')
args = parser.parse_args()
modelscore(args)
if __name__ == '__main__':
pass
main_cli()
|
UTF-8
|
Python
| false | false | 4,929 |
py
| 68 |
modelscore.py
| 7 | 0.60572 | 0.585676 | 0 | 147 | 26.829932 | 84 |
sandeepkumar8713/pythonapps
| 6,365,141,564,207 |
e61b336a2e5024b7151bd88eed475c34fc301019
|
8a83bb7acb9b62183fca817e1f196dd8075630a4
|
/22_secondFolder/26_min_diff_subset.py
|
eba3c37b37d37a631194de25459884ff11cfd4af
|
[] |
no_license
|
https://github.com/sandeepkumar8713/pythonapps
|
ff5ad3da854aa58e60f2c14d27359f8b838cac57
|
5dcb5ad4873124fed2ec3a717bfa379a4bbd197d
|
refs/heads/main
| 2023-09-01T04:12:03.865755 | 2023-08-31T07:04:58 | 2023-08-31T07:04:58 | 234,762,925 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# https://leetcode.com/discuss/interview-experience/343949/Google-or-L4-or-Bay-Area-or-July-2019
# Question : Given an array of intergers divide them into n subsets such that difference
# between sum of each subset is minimum.
#
# Example : input [1,1,4,2,8] and n=2
# output = [1,1,4,2] and [8]
#
# input [1,1,4,2,8] and n=3
# output = [1,1,2] and [4] and [8]
#
# Question Type : ShouldSee
# Used : sort input array . pull max out, push in set1, then pull max out, push in set2 and then pull max out,
# push in set3. Now, pull max out, push in least sum set. Repeat this until input array is empty.
# Complexity : O(n log n + n)
# TODO : add code
#
|
UTF-8
|
Python
| false | false | 657 |
py
| 757 |
26_min_diff_subset.py
| 741 | 0.680365 | 0.625571 | 0 | 16 | 40.0625 | 110 |
ehan831/coding_study
| 4,191,888,100,780 |
bf9dc115380b3662f1373eec2c9968dd40167d7e
|
0ca0471d6457d8dcacc5f3433af586bed44cb7af
|
/python/aBasic/c_module_class/myfile.py
|
b2f0df7fa561e59c216034d7c419b07a2ba8ff1d
|
[] |
no_license
|
https://github.com/ehan831/coding_study
|
61f47a8b5a7fe448fc71a868637590821d988729
|
14958b6b4642e6488156091293e854cc36cf9411
|
refs/heads/master
| 2022-12-21T20:29:10.265425 | 2019-09-05T04:07:22 | 2019-09-05T04:07:22 | 181,843,058 | 0 | 0 | null | false | 2022-12-16T00:45:37 | 2019-04-17T07:50:37 | 2019-09-05T04:07:56 | 2022-12-16T00:45:33 | 116,081 | 0 | 0 | 7 |
Jupyter Notebook
| false | false |
# 1. 모듈 전체를 참조할 때에는 import
# import mymodule
#
# today = mymodule.get_weather()
# print('오늘의 날씨는', today)
# print(mymodule.get_date(), '요일입니다.')
# 2. 모듈에 별칭 부여
# import mymodule as my
# today = my.get_weather()
# print('오늘의 날씨는', today)
# print(my.get_date(), '요일입니다.')
# 3. 모듈에서 필요한 부분만 import
from mymodule import get_weather
today = get_weather()
print('오늘의 날씨는', today)
from mymodule import get_date as gd
print(gd(), '요일입니다.')
|
UTF-8
|
Python
| false | false | 558 |
py
| 433 |
myfile.py
| 280 | 0.667431 | 0.66055 | 0 | 21 | 19.761905 | 38 |
Khelgrin/pytest_tutorial
| 5,566,277,618,359 |
131038ca922801dda325c37f583c0e2aabc35b32
|
188e0d3fad2fa1afad7e7e6eb92152440a8544db
|
/tests/fileReader_tests/test_FileReader_Mocking.py
|
6840c6bf646a35b9a83fa352fcb349ef26b3c48c
|
[] |
no_license
|
https://github.com/Khelgrin/pytest_tutorial
|
3a186875e36c557ffac22a2c1a40804c287a3ad2
|
c1dd2efe0b3591cfc3e5b204446a366eff020287
|
refs/heads/master
| 2022-04-11T01:05:02.239298 | 2020-04-03T23:15:59 | 2020-04-03T23:27:27 | 252,861,711 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from Filereader import read_from_file
from unittest.mock import MagicMock
import pytest
# def test_can_call_read_from_file():
# read_from_file("myfile")
@pytest.fixture()
def mock_open(monkeypatch):
mockfile = MagicMock()
mockfile.readline = MagicMock(return_value="test line")
mock_open = MagicMock(return_value=mockfile)
monkeypatch.setattr("builtins.open", mock_open)
return mock_open
def test_returns_correct_string(monkeypatch, mock_open):
mock_exist = MagicMock(return_value=True)
monkeypatch.setattr("os.path.exists", mock_exist)
result = read_from_file("blah")
mock_open.assert_called_once_with("blah", "r")
assert result == "test line"
def test_throws_exception_with_no_file(monkeypatch, mock_open):
mock_exist = MagicMock(return_value=False)
monkeypatch.setattr("os.path.exists", mock_exist)
with pytest.raises(Exception):
result = read_from_file("blah")
|
UTF-8
|
Python
| false | false | 934 |
py
| 9 |
test_FileReader_Mocking.py
| 7 | 0.714133 | 0.714133 | 0 | 28 | 32.357143 | 63 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.