prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#
import numpy
import pandas
from sklearn.preprocessing import StandardScaler
# trash and should be removed
class PctTransformer:
def __init__(self):
self.f_row = None
def fit(self, data):
pass
# are you ok, man? what's this?
def transform(self, data):
self.f_row = data[[0], :]
data_lag = numpy.roll(data, shift=1, axis=1) # <- really?
data_lag = data_lag[1:, :]
data_pct = (data_lag / data[:-1, :]) - 1 # it that a joke, huh?
return data_pct
def inverse_transform(self, data_pct):
data_cp = data_pct + 1
data_cp = numpy.concatenate((self.f_row, data_cp), axis=0)
data_cp = data_cp.cumprod(axis=0)
return data_cp
class LogPctTransformer:
def __init__(self):
self.full_set = None
# self.check_row = None
self.last_row = None
self.shape = None
pass
def fit(self, data):
self.full_set = data.copy()
# self.check_row = numpy.log(data[[1], :]) - numpy.log(data[[0], :])
self.last_row = data[[data.shape[0] - 1], :]
self.shape = data.shape
pass
def transform(self, data):
data_log_pct = numpy.log(data) - numpy.log(numpy.roll(data, shift=1, axis=0))
data_log_pct[0, :] = numpy.nan
return data_log_pct
"""
def inverse_transform(self, data):
if self.shape[0] == data.shape[0] and self.shape[1] == data.shape[1]:
if pandas.isna(data).all(axis=1)[0]:
# suppose it is train
result = self._inverse_transform(data, self.full_set)
else:
# suppose it is test
result = self._inverse_transform(data, self.last_row)
else:
# suppose it is test
result = self._inverse_transform(data, self.last_row)
return result
def _inverse_transform(self, data, first_row):
current_row = first_row
rows_stack = [current_row]
for j in numpy.arange(data.shape[0]):
if j != 0:
current_row = numpy.exp((data[j, :] + numpy.log(rows_stack[-1])))
rows_stack.append(current_row)
result = numpy.concatenate(rows_stack, axis=0)
return result
"""
def inverse_transform(self, data):
if self.shape[0] == data.shape[0]:
rows_stack = []
for j in range(data.shape[0]):
if pandas.isna(data[j, :]).any():
rows_stack.append(numpy.array([numpy.nan] * data.shape[1]).reshape(1, -1))
else:
current_row = self.full_set[j - 1, :] * numpy.exp(data[j, :]).reshape(1, -1)
rows_stack.append(current_row)
result = numpy.concatenate(rows_stack, axis=0)
return result
else:
current_row = self.full_set[-1, :].reshape(1, -1)
rows_stack = [current_row]
for j in numpy.arange(data.shape[0]):
if j != 0:
current_row = numpy.exp((data[j, :] + numpy.log(rows_stack[-1]))).reshape(1, -1)
rows_stack.append(current_row)
result = numpy.concatenate(rows_stack, axis=0)
return result
class __LogPctTransformer:
def __init__(self):
self.first_row = None
# self.check_row = None
self.last_row = None
self.shape = None
pass
def fit(self, data):
self.first_row = data[[0], :]
# self.check_row = numpy.log(data[[1], :]) - numpy.log(data[[0], :])
self.last_row = data[[data.shape[0] - 1], :]
self.shape = data.shape
pass
def transform(self, data):
data_log_pct = numpy.log(data) - numpy.log(numpy.roll(data, shift=1, axis=0))
data_log_pct[0, :] = numpy.nan
return data_log_pct
def inverse_transform(self, data):
if self.shape[0] == data.shape[0] and self.shape[1] == data.shape[1]:
"""
if (self.check_row == data[[1], :]).all():
result = self._inverse_transform(data, self.first_row)
else:
first_row = numpy.ones(shape=(1, data.shape[1]))
result = self._inverse_transform(data, first_row)
"""
if
|
pandas.isna(data)
|
pandas.isna
|
"""pandasなどなど関連。"""
from __future__ import annotations
import gc
import html
import logging
import typing
import warnings
import numpy as np
import pandas as pd
import sklearn.utils
import pytoolkit as tk
logger = logging.getLogger(__name__)
def label_encoding(values: pd.Series | np.ndarray, values_set: typing.Iterable):
"""ラベルエンコーディング。"""
return pd.Series(values).map({v: i for i, v in enumerate(values_set)})
def target_encoding(
values: pd.Series | np.ndarray,
values_train: pd.Series | np.ndarray,
target_train: np.ndarray,
min_samples_leaf: int = 3,
smoothing: float = 1.0,
):
"""ターゲットエンコーディング。"""
d = make_target_encoding_map(
values_train, target_train, min_samples_leaf, smoothing
)
return pd.Series(values).map(d)
def make_target_encoding_map(
values_train: pd.Series | np.ndarray,
target_train: np.ndarray,
min_samples_leaf: int = 3,
smoothing: float = 1.0,
) -> dict[typing.Any, np.float32]:
"""ターゲットエンコーディングの変換用dictの作成。"""
df_tmp = pd.DataFrame()
df_tmp["values"] = values_train
df_tmp["target"] = target_train
g = df_tmp.groupby("values")["target"]
s = g.mean()
c = g.count()
prior = df_tmp["target"].mean()
smoove = 1 / (1 + np.exp(-(c - min_samples_leaf) / smoothing))
smoothed = prior * (1 - smoove) + s.values * smoove
smoothed[c <= min_samples_leaf] = prior
d = dict(zip(s.index.values, np.float32(smoothed)))
return d
def safe_apply(s: pd.Series, fn) -> pd.Series:
"""nan以外にのみapply"""
return s.apply(lambda x: x if pd.isnull(x) else fn(x))
def add_col(
df: pd.DataFrame, column_name: str, values: typing.Sequence[typing.Any]
) -> None:
"""上書きしないようにチェックしつつ列追加。"""
if column_name in df:
raise ValueError(f"Column '{column_name}' already exists.")
df[column_name] = values
def add_cols(
df: pd.DataFrame, column_names: list[str], values: typing.Sequence[typing.Any]
) -> None:
"""上書きしないようにチェックしつつ列追加。"""
for column_name in column_names:
if column_name in df:
raise ValueError(f"Column '{column_name}' already exists.")
df[column_names] = values
def group_columns(
df: pd.DataFrame, cols: typing.Sequence[str] = None
) -> dict[str, list[str]]:
"""列を型ごとにグルーピングして返す。
Args:
df: DataFrame
cols: 対象の列名の配列
Returns:
種類ごとの列名のlist
- "binary": 二値列
- "numeric": 数値列
- "categorical": カテゴリ列(など)
- "unknown": その他
"""
binary_cols = []
numeric_cols = []
categorical_cols = []
unknown_cols = []
for c in cols or df.columns.values:
if pd.api.types.is_bool_dtype(df[c].dtype):
binary_cols.append(c)
elif pd.api.types.is_numeric_dtype(df[c].dtype):
numeric_cols.append(c)
elif pd.api.types.is_categorical_dtype(
df[c].dtype
) or
|
pd.api.types.is_object_dtype(df[c].dtype)
|
pandas.api.types.is_object_dtype
|
import os
from functools import lru_cache
from glob import glob
from time import time
import numpy as np
import pandas as pd
import torch
import yaml
from fire import Fire
from glog import logger
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from torch.utils.data import DataLoader
from tqdm import tqdm
from dataset import IdRndDataset
from metrics import accuracy, spoof_metric
from pred import TestAntispoofDataset
pd.options.display.float_format = lambda x: '{:.0f}'.format(x) if round(x, 0) == x else '{:,.4f}'.format(x)
@lru_cache(maxsize=10)
def make_test_dataset(n_fold=1):
with open('config.yaml') as cfg:
config = yaml.load(cfg)['test']
config['n_fold'] = n_fold
dataset = IdRndDataset.from_config(config)
files = dataset.imgs
labels = dataset.labels
paths = [{'id': labels[idx],
'path': files[idx],
'frame': np.float32(0),
}
for idx in range(len(files))]
test_dataset = TestAntispoofDataset(paths=paths)
return test_dataset
def parse_tb(path):
_dir = os.path.dirname(path)
files = sorted(glob(f'{_dir}/*tfevents*'))
if not files:
return {}
# fixme: it should pick proper metric file
ea = EventAccumulator(files[0])
ea.Reload()
res = {}
for k in ('train_acc', 'train_loss', 'val_acc', 'val_loss'):
try:
vals = [x.value for x in ea.Scalars(k)]
f = np.min if 'loss' in k else np.max
res[k] = f(vals)
except Exception:
logger.exception(f'Can not process {k} from {files[0]}')
res[k] = None
return res
def evaluate(model, dataset, batch_size):
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=8)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
labels, preds, times = [], [], []
with torch.no_grad():
for gt, frames, batch in tqdm(dataloader):
batch = batch.to(device)
t1 = time()
logits = model(batch)
t2 = time()
proba = torch.softmax(logits, dim=1).cpu().numpy()
labels.extend(gt)
preds.extend(proba)
times.append(t2 - t1)
preds, labels = map(np.array, (preds, labels))
return {'test_accuracy': accuracy(pred=preds, labels=labels),
'test_metric': spoof_metric(pred=preds, labels=labels),
'inference_time': np.mean(times),
}
def explore_models(models, batch_size):
logger.info(f'There are {len(models)} models to evaluate')
for m in models:
t0 = time()
model = torch.jit.load(m).to('cuda:0')
t1 = time()
d = {'load_time': t1 - t0,
'name': m
}
*_, n_fold = os.path.basename(m).split('_')
n_fold, _ = n_fold.split('.')
n_fold = int(n_fold)
metrics = evaluate(model=model,
batch_size=batch_size,
dataset=make_test_dataset(n_fold))
# tb_data = parse_tb(m)
d.update(metrics)
yield d
def main(pattern="./**/*_?.trcd", batch_size=64):
models = glob(pattern, recursive=False)
data = []
for x in explore_models(models, batch_size=batch_size):
data.append(x)
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
"""
上市公司公告查询
来源:[巨潮资讯网](http://www.cninfo.com.cn/new/commonUrl?url=disclosure/list/notice-sse#)
备注
使用实际公告时间
如查询公告日期为2018-12-15 实际公告时间为2018-12-14 16:00:00
"""
import asyncio
from aiohttp.client_exceptions import ContentTypeError
import math
import time
import aiohttp
import logbook
import pandas as pd
import requests
from logbook.more import ColorizedStderrHandler
from sqlalchemy import func
from cnswd.sql.base import get_engine, get_session
from cnswd.sql.info import Disclosure
logger = logbook.Logger('公司公告')
URL = 'http://www.cninfo.com.cn/new/hisAnnouncement/query'
COLUMNS = ['序号', '股票代码', '股票简称', '公告标题', '公告时间', '下载网址']
HEADERS = {
'Host': 'www.cninfo.com.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'Keep-Alive',
'Referer': 'http://www.cninfo.com.cn/new/commonUrl?url=disclosure/list/notice',
}
CATEGORIES = {
'全部': None,
'年报': 'category_nbbg_szsh',
'半年报': 'category_bndbg_szsh',
'一季报': 'category_yjdbg_szsh',
'三季报': 'category_sjdbg_szsh',
'业绩预告': 'category_yjygjxz_szsh',
'权益分派': 'category_qyfpxzcs_szsh',
'董事会': 'category_dshgg_szsh',
'监事会': 'category_jshgg_szsh',
'股东大会': 'category_gddh_szsh',
'日常经营': 'category_rcjy_szsh',
'公司治理': 'category_gszl_szsh',
'中介报告': 'category_zj_szsh',
'首发': 'category_sf_szsh',
'增发': 'category_zf_szsh',
'股权激励': 'category_gqjl_szsh',
'配股': 'category_pg_szsh',
'解禁': 'category_jj_szsh',
'债券': 'category_zq_szsh',
'其他融资': 'category_qtrz_szsh',
'股权变动': 'category_gqbd_szsh',
'补充更正': 'category_bcgz_szsh',
'澄清致歉': 'category_cqdq_szsh',
'风险提示': 'category_fxts_szsh',
'特别处理和退市': 'category_tbclts_szsh',
}
PLATES = {
'sz': ('szse', '深市'),
'shmb': ('sse', '沪市')
}
def _get_total_record_num(data):
"""公告总数量"""
return math.ceil(int(data['totalRecordNum']) / 30)
def _to_dataframe(data):
def f(page_data):
res = []
for row in page_data['announcements']:
to_add = (
row['announcementId'],
row['secCode'],
row['secName'],
row['announcementTitle'],
pd.Timestamp(row['announcementTime'], unit='ms'),
'http://www.cninfo.com.cn/' + row['adjunctUrl'],
)
res.append(to_add)
df = pd.DataFrame.from_records(res, columns=COLUMNS)
return df
dfs = []
for page_data in data:
try:
dfs.append(f(page_data))
except Exception:
pass
return pd.concat(dfs)
async def _fetch_disclosure_async(session, plate, category, date_str, page):
assert plate in PLATES.keys(), f'可接受范围{PLATES}'
assert category in CATEGORIES.keys(), f'可接受分类范围:{CATEGORIES}'
market = PLATES[plate][1]
sedate = f"{date_str}+~+{date_str}"
kwargs = dict(
tabName='fulltext',
seDate=sedate,
category=CATEGORIES[category],
plate=plate,
column=PLATES[plate][0],
pageNum=page,
pageSize=30,
)
# 如果太频繁访问,容易导致关闭连接
async with session.post(URL, data=kwargs, headers=HEADERS) as r:
msg = f"{market} {date_str} 第{page}页 响应状态:{r.status}"
logger.info(msg)
await asyncio.sleep(1)
try:
return await r.json()
except ContentTypeError:
return {}
async def _fetch_one_day(session, plate, date_str):
"""获取深交所或上交所指定日期所有公司公告"""
data = await _fetch_disclosure_async(session, plate, '全部', date_str, 1)
page_num = _get_total_record_num(data)
if page_num == 0:
return
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Generate ensemble submission by majority vote.
Authors:
<NAME> and <NAME>
"""
import argparse
import glob
import pandas as pd
parser = argparse.ArgumentParser('Get args for ensemble script')
parser.add_argument('--split',
type=str,
default='dev',
choices=('dev', 'test'),
help='Split to use for ensembling.')
parser.add_argument('--sub_file',
type=str,
default='val_submission.csv',
help='Name for submission file.')
parser.add_argument('--out_dir',
type=str,
default='',
help='Name for out directory')
parser.add_argument('--file_to_omit',
type=str,
default='none',
help='Allow specification of file to omit')
parser.add_argument('--metric_name',
type=str,
default='F1',
choices=('EM', 'F1'),
help='Name of metric to determine tie breaking')
parser.add_argument('--threshold',
type=float,
default=65.0,
help='Threshold for models to include in ensemble')
parser.add_argument('--models_to_include',
type=str,
default=None,
help='Optional file specifying exact models to include')
args = parser.parse_args()
source_folder = './save/' + f'{args.split}' + '_submissions/'
stats_file = 'sub_stats.csv'
stats = pd.read_csv(stats_file)
# Either read in models to include in ensemble from provided txt file, or use metric and threshold
mods_to_include = []
if args.models_to_include is not None:
filename = './save/' + f'{args.split}' + '_submissions/' + f'{args.models_to_include}'
with open(filename, 'r') as fh:
lines = fh.read().splitlines()
for line in lines:
mods_to_include.append(line)
stats_sub = stats[stats['TestName'].isin(mods_to_include)]
else:
stats_sub = stats[(stats[args.metric_name] >= args.threshold) & (stats['TestName'] != 'none') &
(stats['TestName'] != args.file_to_omit)]
# Get best models by given metric for tie breaking
by_best_metric = stats_sub.sort_values(by=args.metric_name, ascending=False)
file_best_metric = source_folder + by_best_metric['TestName'].iloc[0] + '.csv'
file_2nd_best_metric = source_folder + by_best_metric['TestName'].iloc[1] + '.csv'
# Get list of filenames for for-loop
filenames = list(stats_sub['TestName'])
filenames = [source_folder + file + '.csv' for file in filenames]
# Combine model outputs into one dataframe
data = []
is_first_file = True
for filename in glob.glob(source_folder + '*.csv'):
if filename in filenames:
df = pd.read_csv(filename, keep_default_na=False)
if is_first_file:
df = df.rename(columns={'Predicted': filename})
is_first_file = False
else:
df = df.rename(columns={'Predicted': filename})
df = df[filename]
data.append(df)
df_all = pd.concat(data, axis=1)
# Get best answer given question by majority vote
# Break ties by favoring model with best F1 score
def get_pred(row):
pred = row.loc[file_best_metric]
pred2 = row.loc[file_2nd_best_metric]
counts = row.value_counts(dropna=False)
top_count = counts[0]
if top_count == 1:
return pred
top_preds = list(counts[counts == top_count].index)
if pred in top_preds:
return pred
if pred2 in top_preds:
return pred2
return top_preds[0]
# Apply function above to each question
preds = df_all.apply(get_pred, axis=1)
d = {'Id': list(df_all['Id']), 'Predicted': preds.values}
output =
|
pd.DataFrame(data=d)
|
pandas.DataFrame
|
"""Implement custom daily and weekly trading day calendars and datetime methods
- pandas custom business calendar
Author: <NAME>
License: MIT
"""
import datetime
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import pandas_datareader as pdr
from pandas.tseries.holiday import USFederalHolidayCalendar
from sqlalchemy import Column, Integer
from pandas.api.types import is_list_like
from pandas.tseries.offsets import MonthEnd, YearEnd, QuarterEnd
import config
# .to_pydatetime() - convert pandas format (Timestamp, datetime64) to datetime
# datetime.date.strftime(d, '%Y%m%d') - convert datetime to string
# np.array([self(dates)], dtype='datetime64[D]') - converts to numpy date format
# datetime.datetime(year, month, day) - returns datetime.datetime format
def to_monthend(dt):
"""Return calendar monthend date given an int date or list"""
if is_list_like(dt):
return [to_monthend(d) for d in dt]
if dt <= 9999:
d = datetime.datetime(year=dt, month=12, day=1) +
|
MonthEnd(0)
|
pandas.tseries.offsets.MonthEnd
|
import collections
import fnmatch
import os
from typing import Union
import tarfile
import pandas as pd
import numpy as np
from pandas.core.dtypes.common import is_string_dtype, is_numeric_dtype
from hydrodataset.data.data_base import DataSourceBase
from hydrodataset.data.stat import cal_fdc
from hydrodataset.utils import hydro_utils
from hydrodataset.utils.hydro_utils import download_one_zip, unzip_nested_zip
CAMELS_NO_DATASET_ERROR_LOG = (
"We cannot read this dataset now. Please check if you choose the correct dataset:\n"
' ["AUS", "BR", "CA", "CL", "GB", "US", "YR"]'
)
def time_intersect_dynamic_data(obs: np.array, date: np.array, t_range: list):
"""
chose data from obs in the t_range
Parameters
----------
obs
a np array
date
all periods for obs
t_range
the time range we need, such as ["1990-01-01","2000-01-01"]
Returns
-------
np.array
the chosen data
"""
t_lst = hydro_utils.t_range_days(t_range)
nt = t_lst.shape[0]
if len(obs) != nt:
out = np.full([nt], np.nan)
[c, ind1, ind2] = np.intersect1d(date, t_lst, return_indices=True)
out[ind2] = obs[ind1]
else:
out = obs
return out
class Camels(DataSourceBase):
def __init__(self, data_path, download=False, region: str = "US"):
"""
Initialization for CAMELS series dataset
Parameters
----------
data_path
where we put the dataset
download
if true, download
region
the default is CAMELS(-US), since it's the first CAMELS dataset.
Others now include: AUS, BR, CL, GB, YR
"""
super().__init__(data_path)
region_lst = ["AUS", "BR", "CA", "CE", "CL", "GB", "US", "YR"]
assert region in region_lst
self.region = region
self.data_source_description = self.set_data_source_describe()
if download:
self.download_data_source()
self.camels_sites = self.read_site_info()
def get_name(self):
return "CAMELS_" + self.region
def set_data_source_describe(self) -> collections.OrderedDict:
"""
Introduce the files in the dataset and list their location in the file system
Returns
-------
collections.OrderedDict
the description for a CAMELS dataset
"""
camels_db = self.data_source_dir
if self.region == "US":
# shp file of basins
camels_shp_file = os.path.join(
camels_db, "basin_set_full_res", "HCDN_nhru_final_671.shp"
)
# config of flow data
flow_dir = os.path.join(
camels_db,
"basin_timeseries_v1p2_metForcing_obsFlow",
"basin_dataset_public_v1p2",
"usgs_streamflow",
)
# forcing
forcing_dir = os.path.join(
camels_db,
"basin_timeseries_v1p2_metForcing_obsFlow",
"basin_dataset_public_v1p2",
"basin_mean_forcing",
)
forcing_types = ["daymet", "maurer", "nldas"]
# attr
attr_dir = os.path.join(
camels_db, "camels_attributes_v2.0", "camels_attributes_v2.0"
)
gauge_id_file = os.path.join(attr_dir, "camels_name.txt")
attr_key_lst = ["topo", "clim", "hydro", "vege", "soil", "geol"]
download_url_lst = [
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/camels_attributes_v2.0.zip",
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_set_full_res.zip",
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_timeseries_v1p2_metForcing_obsFlow.zip",
]
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_FORCING_TYPE=forcing_types,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
CAMELS_DOWNLOAD_URL_LST=download_url_lst,
)
elif self.region == "AUS":
# id and name
gauge_id_file = os.path.join(
camels_db,
"01_id_name_metadata",
"01_id_name_metadata",
"id_name_metadata.csv",
)
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"02_location_boundary_area",
"02_location_boundary_area",
"shp",
"CAMELS_AUS_BasinOutlets_adopted.shp",
)
# config of flow data
flow_dir = os.path.join(camels_db, "03_streamflow", "03_streamflow")
# attr
attr_dir = os.path.join(camels_db, "04_attributes", "04_attributes")
# forcing
forcing_dir = os.path.join(
camels_db, "05_hydrometeorology", "05_hydrometeorology"
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "BR":
# attr
attr_dir = os.path.join(
camels_db, "01_CAMELS_BR_attributes", "01_CAMELS_BR_attributes"
)
# we don't need the location attr file
attr_key_lst = [
"climate",
"geology",
"human_intervention",
"hydrology",
"land_cover",
"quality_check",
"soil",
"topography",
]
# id and name, there are two types stations in CAMELS_BR, and we only chose the 897-stations version
gauge_id_file = os.path.join(attr_dir, "camels_br_topography.txt")
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"14_CAMELS_BR_catchment_boundaries",
"14_CAMELS_BR_catchment_boundaries",
"camels_br_catchments.shp",
)
# config of flow data
flow_dir_m3s = os.path.join(
camels_db, "02_CAMELS_BR_streamflow_m3s", "02_CAMELS_BR_streamflow_m3s"
)
flow_dir_mm_selected_catchments = os.path.join(
camels_db,
"03_CAMELS_BR_streamflow_mm_selected_catchments",
"03_CAMELS_BR_streamflow_mm_selected_catchments",
)
flow_dir_simulated = os.path.join(
camels_db,
"04_CAMELS_BR_streamflow_simulated",
"04_CAMELS_BR_streamflow_simulated",
)
# forcing
forcing_dir_precipitation_chirps = os.path.join(
camels_db,
"05_CAMELS_BR_precipitation_chirps",
"05_CAMELS_BR_precipitation_chirps",
)
forcing_dir_precipitation_mswep = os.path.join(
camels_db,
"06_CAMELS_BR_precipitation_mswep",
"06_CAMELS_BR_precipitation_mswep",
)
forcing_dir_precipitation_cpc = os.path.join(
camels_db,
"07_CAMELS_BR_precipitation_cpc",
"07_CAMELS_BR_precipitation_cpc",
)
forcing_dir_evapotransp_gleam = os.path.join(
camels_db,
"08_CAMELS_BR_evapotransp_gleam",
"08_CAMELS_BR_evapotransp_gleam",
)
forcing_dir_evapotransp_mgb = os.path.join(
camels_db,
"09_CAMELS_BR_evapotransp_mgb",
"09_CAMELS_BR_evapotransp_mgb",
)
forcing_dir_potential_evapotransp_gleam = os.path.join(
camels_db,
"10_CAMELS_BR_potential_evapotransp_gleam",
"10_CAMELS_BR_potential_evapotransp_gleam",
)
forcing_dir_temperature_min_cpc = os.path.join(
camels_db,
"11_CAMELS_BR_temperature_min_cpc",
"11_CAMELS_BR_temperature_min_cpc",
)
forcing_dir_temperature_mean_cpc = os.path.join(
camels_db,
"12_CAMELS_BR_temperature_mean_cpc",
"12_CAMELS_BR_temperature_mean_cpc",
)
forcing_dir_temperature_max_cpc = os.path.join(
camels_db,
"13_CAMELS_BR_temperature_max_cpc",
"13_CAMELS_BR_temperature_max_cpc",
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=[
flow_dir_m3s,
flow_dir_mm_selected_catchments,
flow_dir_simulated,
],
CAMELS_FORCING_DIR=[
forcing_dir_precipitation_chirps,
forcing_dir_precipitation_mswep,
forcing_dir_precipitation_cpc,
forcing_dir_evapotransp_gleam,
forcing_dir_evapotransp_mgb,
forcing_dir_potential_evapotransp_gleam,
forcing_dir_temperature_min_cpc,
forcing_dir_temperature_mean_cpc,
forcing_dir_temperature_max_cpc,
],
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "CL":
# attr
attr_dir = os.path.join(camels_db, "1_CAMELScl_attributes")
attr_file = os.path.join(attr_dir, "1_CAMELScl_attributes.txt")
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"CAMELScl_catchment_boundaries",
"catchments_camels_cl_v1.3.shp",
)
# config of flow data
flow_dir_m3s = os.path.join(camels_db, "2_CAMELScl_streamflow_m3s")
flow_dir_mm = os.path.join(camels_db, "3_CAMELScl_streamflow_mm")
# forcing
forcing_dir_precip_cr2met = os.path.join(
camels_db, "4_CAMELScl_precip_cr2met"
)
forcing_dir_precip_chirps = os.path.join(
camels_db, "5_CAMELScl_precip_chirps"
)
forcing_dir_precip_mswep = os.path.join(
camels_db, "6_CAMELScl_precip_mswep"
)
forcing_dir_precip_tmpa = os.path.join(camels_db, "7_CAMELScl_precip_tmpa")
forcing_dir_tmin_cr2met = os.path.join(camels_db, "8_CAMELScl_tmin_cr2met")
forcing_dir_tmax_cr2met = os.path.join(camels_db, "9_CAMELScl_tmax_cr2met")
forcing_dir_tmean_cr2met = os.path.join(
camels_db, "10_CAMELScl_tmean_cr2met"
)
forcing_dir_pet_8d_modis = os.path.join(
camels_db, "11_CAMELScl_pet_8d_modis"
)
forcing_dir_pet_hargreaves = os.path.join(
camels_db,
"12_CAMELScl_pet_hargreaves",
)
forcing_dir_swe = os.path.join(camels_db, "13_CAMELScl_swe")
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=[flow_dir_m3s, flow_dir_mm],
CAMELS_FORCING_DIR=[
forcing_dir_precip_cr2met,
forcing_dir_precip_chirps,
forcing_dir_precip_mswep,
forcing_dir_precip_tmpa,
forcing_dir_tmin_cr2met,
forcing_dir_tmax_cr2met,
forcing_dir_tmean_cr2met,
forcing_dir_pet_8d_modis,
forcing_dir_pet_hargreaves,
forcing_dir_swe,
],
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=attr_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "GB":
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
"CAMELS_GB_catchment_boundaries",
"CAMELS_GB_catchment_boundaries.shp",
)
# flow and forcing data are in a same file
flow_dir = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
"timeseries",
)
forcing_dir = flow_dir
# attr
attr_dir = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
)
gauge_id_file = os.path.join(
attr_dir, "CAMELS_GB_hydrometry_attributes.csv"
)
attr_key_lst = [
"climatic",
"humaninfluence",
"hydrogeology",
"hydrologic",
"hydrometry",
"landcover",
"soil",
"topographic",
]
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "YR":
# shp files of basins
camels_shp_files_dir = os.path.join(
camels_db, "9_Normal_Camels_YR", "Normal_Camels_YR_basin_boundary"
)
# attr, flow and forcing data are all in the same dir. each basin has one dir.
flow_dir = os.path.join(
camels_db, "9_Normal_Camels_YR", "1_Normal_Camels_YR_basin_data"
)
forcing_dir = flow_dir
attr_dir = flow_dir
# no gauge id file for CAMELS_YR; natural_watersheds.txt showed unregulated basins in CAMELS_YR
gauge_id_file = os.path.join(
camels_db, "9_Normal_Camels_YR", "natural_watersheds.txt"
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_DIR=camels_shp_files_dir,
)
elif self.region == "CA":
# shp file of basins
camels_shp_files_dir = os.path.join(camels_db, "CANOPEX_BOUNDARIES")
# config of flow data
flow_dir = os.path.join(
camels_db, "CANOPEX_NRCAN_ASCII", "CANOPEX_NRCAN_ASCII"
)
forcing_dir = flow_dir
# There is no attr data in CANOPEX, hence we use attr from HYSET -- https://osf.io/7fn4c/
attr_dir = camels_db
gauge_id_file = os.path.join(camels_db, "STATION_METADATA.xlsx")
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_DIR=camels_shp_files_dir,
)
elif self.region == "CE":
# We use A_basins_total_upstrm
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"A_basins_total_upstrm",
"3_shapefiles",
"Basins_A.shp",
)
# config of flow data
flow_dir = os.path.join(
camels_db, "2_LamaH-CE_daily", "D_gauges", "2_timeseries", "daily"
)
forcing_dir = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"A_basins_total_upstrm",
"2_timeseries",
"daily",
)
attr_dir = os.path.join(
camels_db, "2_LamaH-CE_daily", "A_basins_total_upstrm", "1_attributes"
)
gauge_id_file = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"D_gauges",
"1_attributes",
"Gauge_attributes.csv",
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def download_data_source(self) -> None:
"""
Download CAMELS dataset.
Now we only support CAMELS-US's downloading.
For others, please download it manually and put all files of a CAMELS dataset in one directory.
For example, all files of CAMELS_AUS should be put in "camels_aus" directory
Returns
-------
None
"""
camels_config = self.data_source_description
if self.region == "US":
if not os.path.isdir(camels_config["CAMELS_DIR"]):
os.makedirs(camels_config["CAMELS_DIR"])
[
download_one_zip(attr_url, camels_config["CAMELS_DIR"])
for attr_url in camels_config["CAMELS_DOWNLOAD_URL_LST"]
if not os.path.isfile(
os.path.join(camels_config["CAMELS_DIR"], attr_url.split("/")[-1])
)
]
print("The CAMELS_US data have been downloaded!")
print(
"Please download it manually and put all files of a CAMELS dataset in the CAMELS_DIR directory."
)
print("We unzip all files now.")
if self.region == "CE":
# We only use CE's dauly files now and it is tar.gz formatting
file = tarfile.open(
os.path.join(camels_config["CAMELS_DIR"], "2_LamaH-CE_daily.tar.gz")
)
# extracting file
file.extractall(
os.path.join(camels_config["CAMELS_DIR"], "2_LamaH-CE_daily")
)
file.close()
for f_name in os.listdir(camels_config["CAMELS_DIR"]):
if fnmatch.fnmatch(f_name, "*.zip"):
unzip_dir = os.path.join(camels_config["CAMELS_DIR"], f_name[0:-4])
file_name = os.path.join(camels_config["CAMELS_DIR"], f_name)
unzip_nested_zip(file_name, unzip_dir)
def read_site_info(self) -> pd.DataFrame:
"""
Read the basic information of gages in a CAMELS dataset
Returns
-------
pd.DataFrame
basic info of gages
"""
camels_file = self.data_source_description["CAMELS_GAUGE_FILE"]
if self.region == "US":
data = pd.read_csv(
camels_file, sep=";", dtype={"gauge_id": str, "huc_02": str}
)
elif self.region == "AUS":
data = pd.read_csv(camels_file, sep=",", dtype={"station_id": str})
elif self.region == "BR":
data = pd.read_csv(camels_file, sep="\s+", dtype={"gauge_id": str})
elif self.region == "CL":
data = pd.read_csv(camels_file, sep="\t", index_col=0)
elif self.region == "GB":
data =
|
pd.read_csv(camels_file, sep=",", dtype={"gauge_id": str})
|
pandas.read_csv
|
from typing import Iterable, Tuple
from ._account import Account
import pandas as pd
import numpy as np
from math import isfinite
from collections import OrderedDict
TRADE_KEYS = ('asset', 'date_entry', 'date_exit', 'side', 'n_transactions', 'wavg_price_entered', 'wavg_price_exited',
'qty_entered', 'qty_exited', 'pnl', 'pnl_perc', 'costs', 'context')
"""Trade records static keys for export"""
class Trade:
def __init__(self, dt, transaction):
# Expected Transaction keys
# 'asset', 'position_action', 'qty', 'price_close', 'price_exec', 'costs_close', 'costs_exec', 'pnl_close', 'pnl_execution'
assert transaction['position_action'] == 1, 'Must be opening transaction'
self._pnl = transaction['pnl_execution']
self._n_transations = 1
self._entry_qty = abs(transaction['qty'])
self._entry_value = transaction['price_exec'] * self._entry_qty
self._exit_qty = 0
self._exit_value = 0
self._costs = transaction['costs_exec']
self._entry_date = dt
self._exit_date = dt
self._side = 1 if transaction['qty'] > 0 else -1
self._is_closed = False
self._asset = transaction['asset']
self._qty = transaction['qty']
self._context = np.nan
if 'context' in transaction:
if transaction['context'] is not None:
self._context = transaction['context']
@property
def is_closed(self):
return self._is_closed
def as_tuple(self):
entry_avg_px = self._entry_value / self._entry_qty if self._entry_qty > 0 else np.nan
exit_avg_px = self._exit_value / self._exit_qty if self._exit_qty > 0 else np.nan
pnl_perc = (exit_avg_px / entry_avg_px - 1) * self._side
return (
self._asset,
self._entry_date,
self._exit_date,
self._side,
self._n_transations,
entry_avg_px,
exit_avg_px,
self._entry_qty,
self._exit_qty,
self._pnl,
pnl_perc, # % trade pnl
self._costs,
self._context,
)
def add_transaction(self, dt, transaction):
qty = transaction['qty']
pnl = transaction['pnl_execution']
costs = transaction['costs_exec']
exec_px = transaction['price_exec']
assert transaction['asset'] == self._asset
assert not ((self._qty > 0 and self._qty + qty < 0) or
(self._qty < 0 and self._qty + qty > 0)), f'Reversal transaction detected! {self._asset} at {dt}: ' \
f'Opened: {self._qty} Trans: {qty}'
assert not self._is_closed, 'Position already closed'
if isfinite(pnl):
self._pnl += pnl
else:
if isfinite(transaction['pnl_close']):
self._pnl += transaction['pnl_close']
exec_px = transaction['price_close']
if isfinite(costs):
self._costs += costs
else:
if isfinite(transaction['costs_close']):
self._costs += transaction['costs_close']
self._qty += qty
self._exit_date = dt
if transaction['position_action'] == 1:
# Add qty to existing position
self._entry_qty += abs(qty)
self._entry_value += exec_px * abs(qty)
self._n_transations += 1
if transaction['position_action'] == -1:
# Add qty to existing position
self._exit_qty += abs(transaction['qty'])
self._exit_value += exec_px * abs(qty)
self._n_transations += 1
if self._qty == 0:
self._is_closed = True
self._exit_date = dt
class Report:
"""
Generic backtester report
"""
def __init__(self, accounts: Iterable[Account], **kwargs):
"""
Build backtester report after initialization
:param accounts: list of accounts
"""
self.accounts = accounts
self.results = {}
for acc in accounts:
if acc in self.results:
raise ValueError(f"Duplicated account name '{acc}'")
self.results[acc] = self._build(acc)
def stats(self) -> pd.DataFrame:
"""
Re
:return:
"""
return pd.DataFrame({acc: r[0] for acc, r in self.results.items()})
def series(self, series_name) -> pd.DataFrame:
"""
Return dataframe of multiple account series
:param series_name: (see. Account.as_dataframe)
Return dataframe of account's arrays of :
- 'equity' (at exec time)
- 'capital_invested'
- 'costs' (at exec time)
- 'margin'
- 'pnl' (at exec time)
:return:
"""
return pd.DataFrame({acc: r[1][series_name] for acc, r in self.results.items()})
def trades(self, acc_name) -> pd.DataFrame:
"""
Returns trades list for specific account name
:param acc_name: account name
:return:
"""
return self.results[acc_name][2]
@staticmethod
def _produce_trades_list(account) -> pd.DataFrame:
"""
Produces trades list using account transactions
:param account:
:return:
"""
all_transactions = account.as_transactions()
closed_trades = []
trades = {}
for dt, trans in all_transactions.iterrows():
a = trans['asset']
if a not in trades:
trades[a] = Trade(dt, trans)
else:
t = trades[a]
t.add_transaction(dt, trans)
if t.is_closed:
closed_trades.append(t)
del trades[a]
# Add all remaining opened trades
for t in trades.values():
closed_trades.append(t)
trade_tuples = [t.as_tuple() for t in closed_trades]
return
|
pd.DataFrame(trade_tuples, columns=TRADE_KEYS)
|
pandas.DataFrame
|
import pandas as pd
def filter_data(df,center,attr_name,tolerance=5):
lat_name,lon_name,_ = attr_name
return df[attr_name][(df[lat_name]>center[0]-tolerance) & (df[lat_name]<center[0]+tolerance) & (df[lon_name]>center[1]-tolerance) & (df[lon_name]<center[1]+tolerance)]
def convert_timestamp(df,time_name):
df[time_name] =
|
pd.to_datetime(df[time_name])
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
#ads_1_sum,ads_2_sum是每个店铺90天的广告费用和
ads_all=pd.read_csv('../JDD_sale/dataset/sort_t_ads.csv')
ads_all['create_dt']=
|
pd.to_datetime(ads_all['create_dt'])
|
pandas.to_datetime
|
import pandas as pd
import ibis
from ibis.backends.base.sql.compiler import Compiler
from .conftest import get_query
def test_simple_scalar_aggregates(con):
# Things like table.column.{sum, mean, ...}()
table = con.table('alltypes')
expr = table[table.c > 0].f.sum()
query = get_query(expr)
sql_query = query.compile()
expected = """\
SELECT sum(`f`) AS `sum`
FROM alltypes
WHERE `c` > 0"""
assert sql_query == expected
# Maybe the result handler should act on the cursor. Not sure.
handler = query.result_handler
output = pd.DataFrame({'sum': [5]})
assert handler(output) == 5
def test_scalar_aggregates_multiple_tables(con):
# #740
table = ibis.table([('flag', 'string'), ('value', 'double')], 'tbl')
flagged = table[table.flag == '1']
unflagged = table[table.flag == '0']
expr = flagged.value.mean() / unflagged.value.mean() - 1
result = Compiler.to_sql(expr)
expected = """\
SELECT (t0.`mean` / t1.`mean`) - 1 AS `tmp`
FROM (
SELECT avg(`value`) AS `mean`
FROM tbl
WHERE `flag` = '1'
) t0
CROSS JOIN (
SELECT avg(`value`) AS `mean`
FROM tbl
WHERE `flag` = '0'
) t1"""
assert result == expected
fv = flagged.value
uv = unflagged.value
expr = (fv.mean() / fv.sum()) - (uv.mean() / uv.sum())
result = Compiler.to_sql(expr)
expected = """\
SELECT t0.`tmp` - t1.`tmp` AS `tmp`
FROM (
SELECT avg(`value`) / sum(`value`) AS `tmp`
FROM tbl
WHERE `flag` = '1'
) t0
CROSS JOIN (
SELECT avg(`value`) / sum(`value`) AS `tmp`
FROM tbl
WHERE `flag` = '0'
) t1"""
assert result == expected
def test_table_column_unbox(alltypes):
table = alltypes
m = table.f.sum().name('total')
agged = table[table.c > 0].group_by('g').aggregate([m])
expr = agged.g
query = get_query(expr)
sql_query = query.compile()
expected = """\
SELECT `g`
FROM (
SELECT `g`, sum(`f`) AS `total`
FROM alltypes
WHERE `c` > 0
GROUP BY 1
) t0"""
assert sql_query == expected
# Maybe the result handler should act on the cursor. Not sure.
handler = query.result_handler
output =
|
pd.DataFrame({'g': ['foo', 'bar', 'baz']})
|
pandas.DataFrame
|
import os
import yaml
import json
import pandas as pd
import matplotlib.pyplot as plt
from pylab import rcParams
import seaborn as sns
import numpy as np
from sklearn.linear_model import LinearRegression
import glob
import time
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: convertYaml2PandasDataframeT20
# This function converts yaml files to Pandas dataframe and saves as CSV
#
###########################################################################################
def convertYaml2PandasDataframeT20(infile,source,dest):
'''
Converts and save T20 yaml files to pandasdataframes
Description
This function coverts all T20 Yaml files from source directory to pandas ata frames.
The data frames are then stored as .csv files The saved file is of the format
team1-team2-date.csv For e.g. Kolkata Knight Riders-Sunrisers Hyderabad-2016-05-22.csv etc
Usage
convertYaml2PandasDataframeT20(yamlFile,sourceDir=".",targetDir=".")
Arguments
yamlFile
The yaml file to be converted to dataframe and saved
sourceDir
The source directory of the yaml file
targetDir
The target directory in which the data frame is stored as RData file
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
convertYaml2PandasDataframeT20
Examples
# In the example below ../yamldir c
convertYaml2PandasDataframeT20("225171.yaml",".","../data")
'''
os.chdir(source)
os.path.join(source,infile)
# Read Yaml file and convert to json
print('Converting file:',infile)
with open(infile) as f:
a=yaml.load(f)
# 1st innings
deliveries=a['innings'][0]['1st innings']['deliveries']
#Create empty dataframe for team1
team1=pd.DataFrame()
# Loop through all the deliveries of 1st innings and append each row to dataframe
for i in range(len(deliveries)):
df = pd.DataFrame(deliveries[i])
b= df.T
team1=pd.concat([team1,b])
# Rename batsman to striker/non-striker as there is another column batsman who scored runs
team1=team1.rename(columns={'batsman':'striker'})
# All extras column names
extras=[0,'wides','byes','legbyes','noballs','penalty']
if 'extras' in team1: #Check if extras are there
# Get the columns in extras for team1
b=team1.extras.apply(pd.Series).columns
# Find the missing extras columns
diff= list(set(extras) - set(b))
print('Team1:diff:',diff)
# Rename extras dict column as there is another column extras which comes from runs_dict
team1=team1.rename(columns={'extras':'extras_dict'})
#Create new columns by splitting dictionary columns - extras and runs
team1=pd.concat([team1,team1['extras_dict'].apply(pd.Series)], axis=1)
# Add the missing columns
for col in diff:
print("team1:",col)
team1[col]=0
team1=team1.drop(columns=0)
else:
print('Team1:Extras not present')
# Rename runs columns to runs_dict
if 'runs' in team1: #Check if runs in team1
team1=team1.rename(columns={'runs':'runs_dict'})
team1=pd.concat([team1,team1['runs_dict'].apply(pd.Series)], axis=1)
else:
print('Team1:Runs not present')
if 'wicket' in team1: #Check if wicket present
# Rename wicket as wicket_dict dict column as there is another wicket column
team1=team1.rename(columns={'wicket':'wicket_dict'})
team1=pd.concat([team1,team1['wicket_dict'].apply(pd.Series)], axis=1)
else:
print('Team1: Wicket not present')
team1['team']=a['innings'][0]['1st innings']['team']
team1=team1.reset_index(inplace=False)
#Rename index to delivery
team1=team1.rename(columns={'index':'delivery'})
# 2nd innings - Check if the 2nd inning was played
if len(a['innings']) > 1: # Team2 played
deliveries=a['innings'][1]['2nd innings']['deliveries']
#Create empty dataframe for team1
team2=pd.DataFrame()
# Loop through all the deliveries of 1st innings
for i in range(len(deliveries)):
df = pd.DataFrame(deliveries[i])
b= df.T
team2=pd.concat([team2,b])
# Rename batsman to striker/non-striker as there is another column batsman who scored runs
team2=team2.rename(columns={'batsman':'striker'})
# Get the columns in extras for team1
if 'extras' in team2: #Check if extras in team2
b=team2.extras.apply(pd.Series).columns
diff= list(set(extras) - set(b))
print('Team2:diff:',diff)
# Rename extras dict column as there is another column extras which comes from runs_dict
team2=team2.rename(columns={'extras':'extras_dict'})
#Create new columns by splitting dictionary columns - extras and runs
team2=pd.concat([team2,team2['extras_dict'].apply(pd.Series)], axis=1)
# Add the missing columns
for col in diff:
print("team2:",col)
team2[col]=0
team2=team2.drop(columns=0)
else:
print('Team2:Extras not present')
# Rename runs columns to runs_dict
if 'runs' in team2:
team2=team2.rename(columns={'runs':'runs_dict'})
team2=pd.concat([team2,team2['runs_dict'].apply(pd.Series)], axis=1)
else:
print('Team2:Runs not present')
if 'wicket' in team2:
# Rename wicket as wicket_dict column as there is another column wicket
team2=team2.rename(columns={'wicket':'wicket_dict'})
team2=pd.concat([team2,team2['wicket_dict'].apply(pd.Series)], axis=1)
else:
print('Team2:wicket not present')
team2['team']=a['innings'][1]['2nd innings']['team']
team2=team2.reset_index(inplace=False)
#Rename index to delivery
team2=team2.rename(columns={'index':'delivery'})
else: # Create empty columns for team2 so that the complete DF as all columns
team2 = pd.DataFrame()
cols=['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']
team2 = team2.reindex(columns=cols)
#Check for missing columns. It is possible that no wickets for lost in the entire innings
cols=['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']
# Team1 - missing columns
msngCols=list(set(cols) - set(team1.columns))
print('Team1-missing columns:', msngCols)
for col in msngCols:
print("Adding:team1:",col)
team1[col]=0
# Team2 - missing columns
msngCols=list(set(cols) - set(team2.columns))
print('Team2-missing columns:', msngCols)
for col in msngCols:
print("Adding:team2:",col)
team2[col]=0
# Now both team1 and team2 should have the same columns. Concatenate
team1=team1[['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']]
team2=team2[['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']]
df=pd.concat([team1,team2])
#Fill NA's with 0s
df=df.fillna(0)
# Fill in INFO
print("Length of info field=",len(a['info']))
#City
try:
df['city']=a['info']['city']
except:
df['city'] =0
#Date
df['date']=a['info']['dates'][0]
#Gender
df['gender']=a['info']['gender']
#Match type
df['match_type']=a['info']['match_type']
# Neutral venue
try:
df['neutral_venue'] = a['info']['neutral_venue']
except KeyError as error:
df['neutral_venue'] = 0
#Outcome - Winner
try:
df['winner']=a['info']['outcome']['winner']
# Get the win type - runs, wickets etc
df['winType']=list(a['info']['outcome']['by'].keys())[0]
print("Wintype=",list(a['info']['outcome']['by'].keys())[0])
#Get the value of wintype
winType=list(a['info']['outcome']['by'].keys())[0]
print("Win value=",list(a['info']['outcome']['by'].keys())[0] )
# Get the win margin - runs,wickets etc
df['winMargin']=a['info']['outcome']['by'][winType]
print("win margin=", a['info']['outcome']['by'][winType])
except:
df['winner']=0
df['winType']=0
df['winMargin']=0
# Outcome - Tie
try:
df['result']=a['info']['outcome']['result']
df['resultHow']=list(a['info']['outcome'].keys())[0]
df['resultTeam'] = a['info']['outcome']['eliminator']
print(a['info']['outcome']['result'])
print(list(a['info']['outcome'].keys())[0])
print(a['info']['outcome']['eliminator'])
except:
df['result']=0
df['resultHow']=0
df['resultTeam']=0
try:
df['non_boundary'] = a['info']['non_boundary']
except KeyError as error:
df['non_boundary'] = 0
try:
df['ManOfMatch']=a['info']['player_of_match'][0]
except:
df['ManOfMatch']=0
# Identify the winner
df['overs']=a['info']['overs']
df['team1']=a['info']['teams'][0]
df['team2']=a['info']['teams'][1]
df['tossWinner']=a['info']['toss']['winner']
df['tossDecision']=a['info']['toss']['decision']
df['venue']=a['info']['venue']
# Rename column 'striker' to batsman
# Rename column 'batsman' to runs as it signifies runs scored by batsman
df=df.rename(columns={'batsman':'runs'})
df=df.rename(columns={'striker':'batsman'})
if (type(a['info']['dates'][0]) == str):
outfile=a['info']['teams'][0]+ '-' + a['info']['teams'][1] + '-' +a['info']['dates'][0] + '.csv'
else:
outfile=a['info']['teams'][0]+ '-' + a['info']['teams'][1] + '-' +a['info']['dates'][0].strftime('%Y-%m-%d') + '.csv'
destFile=os.path.join(dest,outfile)
print(destFile)
df.to_csv(destFile,index=False)
print("Dataframe shape=",df.shape)
return df, outfile
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: convertAllYaml2PandasDataframesT20
# This function converts all yaml files to Pandas dataframes and saves as CSV
#
###########################################################################################
def convertAllYaml2PandasDataframesT20(source,dest):
'''
Convert and save all Yaml files to pandas dataframes and save as CSV
Description
This function coverts all Yaml files from source directory to data frames. The data frames are
then stored as .csv. The saved files are of the format team1-team2-date.RData For
e.g. England-India-2008-04-06.RData etc
Usage
convertAllYaml2PandasDataframesT20(sourceDir=".",targetDir=".")
Arguments
sourceDir
The source directory of the yaml files
targetDir
The target directory in which the data frames are stored as RData files
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
convertYaml2PandasDataframe
Examples
# In the example below ../yamldir is the source dir for the yaml files
convertAllYaml2PandasDataframesT20("../yamldir","../data")
'''
files = os.listdir(source)
for index, file in enumerate(files):
print("\n\nFile no=",index)
if file.endswith(".yaml"):
df, filename = convertYaml2PandasDataframeT20(file, source, dest)
#print(filename)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getRuns
# This function gets the runs scored by batsmen
#
###########################################################################################
def getRuns(df):
df1=df[['batsman','runs','extras','total','non_boundary']]
# Determine number of deliveries faced and runs scored
runs=df1[['batsman','runs']].groupby(['batsman'],sort=False,as_index=False).agg(['count','sum'])
# Drop level 0
runs.columns = runs.columns.droplevel(0)
runs=runs.reset_index(inplace=False)
runs.columns=['batsman','balls','runs']
return(runs)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getFours
# This function gets the fours scored by batsmen
#
###########################################################################################
def getFours(df):
df1=df[['batsman','runs','extras','total','non_boundary']]
# Get number of 4s. Check if it is boundary (non_boundary=0)
m=df1.loc[(df1.runs >=4) & (df1.runs <6) & (df1.non_boundary==0)]
# Count the number of 4s
noFours= m[['batsman','runs']].groupby('batsman',sort=False,as_index=False).count()
noFours.columns=['batsman','4s']
return(noFours)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getSixes
# This function gets the sixes scored by batsmen
#
###########################################################################################
def getSixes(df):
df1=df[['batsman','runs','extras','total','non_boundary']]
df2= df1.loc[(df1.runs ==6)]
sixes= df2[['batsman','runs']].groupby('batsman',sort=False,as_index=False).count()
sixes.columns=['batsman','6s']
return(sixes)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getExtras
# This function gets the extras for the team
#
###########################################################################################
def getExtras(df):
df3= df[['total','wides', 'noballs', 'legbyes', 'byes', 'penalty', 'extras']]
a=df3.sum().astype(int)
#Convert series to dataframe
extras=a.to_frame().T
return(extras)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBattingScorecardMatch
# This function returns the team batting scorecard
#
###########################################################################################
def teamBattingScorecardMatch (match,theTeam):
'''
Team batting scorecard of a team in a match
Description
This function computes returns the batting scorecard (runs, fours, sixes, balls played) for the team
Usage
teamBattingScorecardMatch(match,theTeam)
Arguments
match
The match for which the score card is required e.g.
theTeam
Team for which scorecard required
Value
scorecard A data frame with the batting scorecard
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBatsmenPartnershipMatch
teamBowlingScorecardMatch
teamBatsmenVsBowlersMatch
Examples
x1,y1=teamBattingScorecardMatch(kkr_sh,"<NAME>")
print(x1)
print(y1)
'''
scorecard=pd.DataFrame()
if(match.size != 0):
team=match.loc[match['team'] == theTeam]
else:
return(scorecard,-1)
a1= getRuns(team)
b1= getFours(team)
c1= getSixes(team)
# Merge columns
d1=pd.merge(a1, b1, how='outer', on='batsman')
e=pd.merge(d1,c1,how='outer', on='batsman')
e=e.fillna(0)
e['4s']=e['4s'].astype(int)
e['6s']=e['6s'].astype(int)
e['SR']=(e['runs']/e['balls']) *100
scorecard = e[['batsman','runs','balls','4s','6s','SR']]
extras=getExtras(match)
return(scorecard,extras)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getRunsConceded
# This function gets the runs conceded by bowler
#
###########################################################################################
def getRunsConceded(df):
# Note the column batsman has the runs scored by batsman
df1=df[['bowler','runs','wides', 'noballs']]
df2=df1.groupby('bowler').sum()
# Only wides and no balls included in runs conceded
df2['runs']=(df2['runs']+df2['wides']+df2['noballs']).astype(int)
df3 = df2['runs']
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getOvers
# This function gets the overs for bowlers
#
###########################################################################################
def getOvers(df):
df1=df[['bowler','delivery']]
df2=(df1.groupby('bowler').count()/6).astype(int)
df2.columns=['overs']
return(df2)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getMaidens
# This function gets the maiden overs for bowlers
#
###########################################################################################
def getMaidens(df):
df1=df[['bowler','delivery','runs','wides', 'noballs']]
# Get the over
df1['over']=df1.delivery.astype(int)
# Runs conceded includes wides and noballs
df1['runsConceded']=df1['runs'] + df1['wides'] + df1['noballs']
df2=df1[['bowler','over','runsConceded']]
# Compute runs in each over by bowler
df3=df2.groupby(['bowler','over']).sum()
df4=df3.reset_index(inplace=False)
# If maiden set as 1 else as 0
df4.loc[df4.runsConceded !=0,'maiden']=0
df4.loc[df4.runsConceded ==0,'maiden']=1
# Sum te maidens
df5=df4[['bowler','maiden']].groupby('bowler').sum()
return(df5)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getWickets
# This function gets the wickets for bowlers
#
###########################################################################################
def getWickets(df):
df1=df[['bowler','kind', 'player_out', 'fielders']]
# Check if the team took wickets. Then this column will be a string
if isinstance(df1.player_out.iloc[0],str):
df2= df1[df1.player_out !='0']
df3 = df2[['bowler','player_out']].groupby('bowler').count()
else: # Did not take wickets. Set wickets as 0
df3 = df1[['bowler','player_out']].groupby('bowler').count()
df3['player_out']=0 # Set wicktes as 0
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBowlingScorecardMatch
# This function gets the bowling scorecard
#
###########################################################################################
def teamBowlingScorecardMatch (match,theTeam):
'''
Compute and return the bowling scorecard of a team in a match
Description
This function computes and returns the bowling scorecard of a team in a match
Usage
teamBowlingScorecardMatch(match,theTeam)
Arguments
match
The match between the teams
theTeam
Team for which bowling performance is required
Value
l A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingWicketMatch
teamBowlersVsBatsmenMatch
teamBattingScorecardMatch
Examples
m=teamBowlingScorecardMatch(kkr_sh,"<NAME>")
print(m)
'''
team=match.loc[match.team== theTeam]
# Compute overs bowled
a1= getOvers(team).reset_index(inplace=False)
# Compute runs conceded
b1= getRunsConceded(team).reset_index(inplace=False)
# Compute maidens
c1= getMaidens(team).reset_index(inplace=False)
# Compute wickets
d1= getWickets(team).reset_index(inplace=False)
e1=pd.merge(a1, b1, how='outer', on='bowler')
f1= pd.merge(e1,c1,how='outer', on='bowler')
g1= pd.merge(f1,d1,how='outer', on='bowler')
g1 = g1.fillna(0)
# Compute economy rate
g1['econrate'] = g1['runs']/g1['overs']
g1.columns=['bowler','overs','runs','maidens','wicket','econrate']
g1.maidens = g1.maidens.astype(int)
g1.wicket = g1.wicket.astype(int)
return(g1)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBatsmenPartnershipMatch
# This function gets the batting partnerships
#
###########################################################################################
def teamBatsmenPartnershipMatch(match,theTeam,opposition,plot=True,savePic=False, dir1=".",picFile="pic1.png"):
'''
Team batting partnerships of batsmen in a match
Description
This function plots the partnerships of batsmen in a match against an opposition or it can return the data frame
Usage
teamBatsmenPartnershipMatch(match,theTeam,opposition, plot=TRUE)
Arguments
match
The match between the teams
theTeam
The team for which the the batting partnerships are sought
opposition
The opposition team
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
df The data frame of the batsmen partnetships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBattingScorecardMatch
teamBowlingWicketKindMatch
teamBatsmenVsBowlersMatch
matchWormChart
Examples
teamBatsmenPartnershipMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
m=teamBatsmenPartnershipMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=False)
print(m)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['batsman','runs','non_striker']]
if plot == True:
df3=df2.groupby(['batsman','non_striker']).sum().unstack().fillna(0)
rcParams['figure.figsize'] = 10, 6
df3.plot(kind='bar',stacked=True)
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -batting partnership- vs ' + opposition)
plt.text(4, 30,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBatsmenPartnershipMatch
# This function gives the performances of batsmen vs bowlers
#
###########################################################################################
def teamBatsmenVsBowlersMatch(match,theTeam,opposition, plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Team batsmen against bowlers in a match
Description
This function plots the performance of batsmen versus bowlers in a match or it can return the data frame
Usage
teamBatsmenVsBowlersMatch(match,theTeam,opposition, plot=TRUE)
Arguments
match
The match between the teams
theTeam
The team for which the the batting partnerships are sought
opposition
The opposition team
plot
If plot=TRUE then a plot is created otherwise a data frame is return
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
b The data frame of the batsmen vs bowlers performance
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingWicketKindMatch
teamBowlingWicketMatch
Examples
teamBatsmenVsBowlersMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['batsman','runs','bowler']]
if plot == True:
df3=df2.groupby(['batsman','bowler']).sum().unstack().fillna(0)
df3.plot(kind='bar',stacked=True)
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -Batsman vs Bowler- in match against ' + opposition)
plt.text(4, 30,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
df3=df2.groupby(['batsman','bowler']).sum().reset_index(inplace=False)
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBowlingWicketKindMatch
# This function gives the wicket kind for bowlers
#
###########################################################################################
def teamBowlingWicketKindMatch(match,theTeam,opposition, plot=True,savePic=False, dir1=".",picFile="pic1.png"):
'''
Compute and plot the wicket kinds by bowlers in match
Description
This function computes returns kind of wickets (caught, bowled etc) of bowlers in a match between 2 teams
Usage
teamBowlingWicketKindMatch(match,theTeam,opposition,plot=TRUE)
Arguments
match
The match between the teams
theTeam
Team for which bowling performance is required
opposition
The opposition team
plot
If plot= TRUE the dataframe will be plotted else a data frame will be returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or data fame A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingWicketMatch
teamBowlingWicketRunsMatch
teamBowlersVsBatsmenMatch
Examples
teamBowlingWicketKindMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
m=teamBowlingWicketKindMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=False)
print(m)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['bowler','kind','player_out']]
# Find all rows where there was a wicket
df3=df2[df2.player_out != '0']
if plot == True:
# Find the different types of wickets for each bowler
df4=df3.groupby(['bowler','kind']).count().unstack().fillna(0)
df4.plot(kind='bar',stacked=True)
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -Wicketkind vs Runs- given against ' + opposition)
plt.text(4, 30,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile))
else:
plt.show()
plt.gcf().clear()
else:
# Find the different types of wickets for each bowler
df4=df3.groupby(['bowler','kind']).count().reset_index(inplace=False)
return(df4)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBowlingWicketMatch
# This function gives the wickets for bowlers
#
###########################################################################################
def teamBowlingWicketMatch(match,theTeam,opposition, plot=True,savePic=False, dir1=".",picFile="pic1.png"):
'''
Compute and plot wickets by bowlers in match
Description
This function computes returns the wickets taken bowlers in a match between 2 teams
Usage
teamBowlingWicketMatch(match,theTeam,opposition, plot=TRUE)
Arguments
match
The match between the teams
theTeam
Team for which bowling performance is required
opposition
The opposition team
plot
If plot= TRUE the dataframe will be plotted else a data frame will be returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or data fame A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingWicketMatch
teamBowlingWicketRunsMatch
teamBowlersVsBatsmenMatch
Examples
teamBowlingWicketMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['bowler','kind','player_out']]
# Find all rows where there was a wicket
df3=df2[df2.player_out != '0']
if plot == True:
# Find the different types of wickets for each bowler
df4=df3.groupby(['bowler','player_out']).count().unstack().fillna(0)
df4.plot(kind='bar',stacked=True)
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -No of Wickets vs Runs conceded- against ' + opposition)
plt.text(1, 1,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
# Find the different types of wickets for each bowler
df4=df3.groupby(['bowler','player_out']).count().reset_index(inplace=False)
return(df4)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBowlersVsBatsmenMatch
# This function gives the bowlers vs batsmen and runs conceded
#
###########################################################################################
def teamBowlersVsBatsmenMatch (match,theTeam,opposition, plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Team bowlers vs batsmen in a match
Description
This function computes performance of bowlers of a team against an opposition in a match
Usage
teamBowlersVsBatsmenMatch(match,theTeam,opposition, plot=TRUE)
Arguments
match
The data frame of the match. This can be obtained with the call for e.g a <- getMatchDetails("England","Pakistan","2006-09-05",dir="../temp")
theTeam
The team against which the performance is required
opposition
The opposition team
plot
This parameter specifies if a plot is required, If plot=FALSE then a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or dataframe If plot=TRUE there is no return. If plot=TRUE then the dataframe is returned
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBattingScorecardMatch
teamBowlingWicketKindMatch
matchWormChart
Examples
teamBowlersVsBatsmenMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['batsman','runs','bowler']]
if plot == True:
df3=df2.groupby(['batsman','bowler']).sum().unstack().fillna(0)
df3.plot(kind='bar',stacked=True)
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -Bowler vs Batsman- against ' + opposition)
plt.text(4, 20,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
df3=df2.groupby(['batsman','bowler']).sum().reset_index(inplace=False)
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: matchWormChart
# This function draws the match worm chart
#
###########################################################################################
def matchWormChart(match,team1,team2,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot the match worm graph
Description
This function plots the match worm graph between 2 teams in a match
Usage
matchWormGraph(match,t1,t2)
Arguments
match
The dataframe of the match
team1
The 1st team of the match
team2
the 2nd team in the match
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
none
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBatsmenVsBowlersMatch
teamBowlingWicketKindMatch
Examples
## Not run:
#Get the match details
a <- getMatchDetails("England","Pakistan","2006-09-05",dir="../temp")
# Plot tne match worm plot
matchWormChart(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad")
'''
df1=match.loc[match.team==team1]
df2=match.loc[match.team==team2]
df3=df1[['delivery','total']]
df3['cumsum']=df3.total.cumsum()
df4 = df2[['delivery','total']]
df4['cumsum'] = df4.total.cumsum()
df31 = df3[['delivery','cumsum']]
df41 = df4[['delivery','cumsum']]
#plt.plot(df3.delivery.values,df3.cumsum.values)
df51= pd.merge(df31,df41,how='outer', on='delivery').dropna()
df52=df51.set_index('delivery')
df52.columns = [team1,team2]
df52.plot()
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Delivery')
plt.ylabel('Runs')
plt.title('Match worm chart ' + team1 + ' vs ' + team2)
plt.text(10, 10,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if plot == True:
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: getAllMatchesBetweenTeams
# This function gets all the matches between 2 IPL teams
#
###########################################################################################
def getAllMatchesBetweenTeams(team1,team2,dir=".",save=False,odir="."):
'''
Get data on all matches between 2 opposing teams
Description
This function gets all the data on matches between opposing IPL teams This can be saved
by the user which can be used in function in which analyses are done for all matches
between these teams.
Usage
getAllMatchesBetweenTeams(team1,team2,dir=".",save=FALSE)
Arguments
team1
One of the team in consideration e.g (KKR, CSK etc)
team2
The other team for which matches are needed e.g( MI, GL)
dir
The directory which has the RData files of matches between teams
save
Default=False. This parameter indicates whether the combined data frame
needs to be saved or not. It is recommended to save this large dataframe as
the creation of this data frame takes a several seconds depending on the number of matches
Value
matches - The combined data frame
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
plotWinsbyTossDecision
teamBowlersVsBatsmenOppnAllMatches
'''
# Create the 2 combinations
t1 = team1 +'-' + team2 + '*.csv'
t2 = team2 + '-' + team1 + '*.csv'
path1= os.path.join(dir,t1)
path2 = os.path.join(dir,t2)
files = glob.glob(path1) + glob.glob(path2)
print(len(files))
# Save as CSV only if there are matches between the 2 teams
if len(files) !=0:
df = pd.DataFrame()
for file in files:
df1 = pd.read_csv(file)
df=pd.concat([df,df1])
if save==True:
dest= team1 +'-' + team2 + '-allMatches.csv'
output=os.path.join(odir,dest)
df.to_csv(output)
else:
return(df)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: saveAllMatchesBetween2IPLTeams
# This function saves all the matches between allIPL teams
#
###########################################################################################
def saveAllMatchesBetween2IPLTeams(dir1,odir="."):
'''
Saves all matches between 2 IPL teams as dataframe
Description
This function saves all matches between 2 IPL teams as a single dataframe in the
current directory
Usage
saveAllMatchesBetween2IPLTeams(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenVsBowlersOppnAllMatches
'''
teams = ["Chennai Super Kings","Deccan Chargers","Delhi Daredevils",
"Kings XI Punjab", 'Kochi Tuskers Kerala',"Kolkata Knight Riders",
"Mumbai Indians", "Pune Warriors","Rajasthan Royals",
"Royal Challengers Bangalore","Sunrisers Hyderabad","Gujarat Lions",
"Rising Pune Supergiants"]
for team1 in teams:
for team2 in teams:
if team1 != team2:
print("Team1=",team1,"team2=", team2)
getAllMatchesBetweenTeams(team1,team2,dir=dir1,save=True,odir=odir)
time.sleep(2) #Sleep before next save
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBatsmenPartnershiOppnAllMatches
# This function gets the partnetships for a team in all matches
#
###########################################################################################
def teamBatsmenPartnershiOppnAllMatches(matches,theTeam,report="summary",top=5):
'''
Team batting partnership against a opposition all IPL matches
Description
This function computes the performance of batsmen against all bowlers of an oppositions in
all matches. This function returns a dataframe
Usage
teamBatsmenPartnershiOppnAllMatches(matches,theTeam,report="summary")
Arguments
matches
All the matches of the team against the oppositions
theTeam
The team for which the the batting partnerships are sought
report
If the report="summary" then the list of top batsmen with the highest partnerships
is displayed. If report="detailed" then the detailed break up of partnership is returned
as a dataframe
top
The number of players to be displayed from the top
Value
partnerships The data frame of the partnerships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenVsBowlersOppnAllMatchesPlot
teamBatsmenPartnershipOppnAllMatchesChart
'''
df1 = matches[matches.team == theTeam]
df2 = df1[['batsman','non_striker','runs']]
# Compute partnerships
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
df3.columns = ['batsman','non_striker','partnershipRuns']
# Compute total partnerships
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('partnershipRuns',ascending=False)
df4.columns = ['batsman','totalPartnershipRuns']
# Select top 5
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
if report == 'summary':
return(df5)
elif report == 'detailed':
return(df6)
else:
print("Invalid option")
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBatsmenPartnershipOppnAllMatchesChart
# This function plots the partnetships for a team in all matches
#
###########################################################################################
def teamBatsmenPartnershipOppnAllMatchesChart(matches,main,opposition,plot=True,top=5,partnershipRuns=20,savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot of team partnership in all IPL matches against an opposition
Description
This function plots the batting partnership of a team againt all oppositions in all
matches This function also returns a dataframe with the batting partnerships
Usage
teamBatsmenPartnershipOppnAllMatchesChart(matches,main,opposition, plot=TRUE,top=5,partnershipRuns=20))
Arguments
matches
All the matches of the team against all oppositions
main
The main team for which the the batting partnerships are sought
opposition
The opposition team for which the the batting partnerships are sought
plot
Whether the partnerships have top be rendered as a plot. If plot=FALSE the data frame is returned
top
The number of players from the top to be included in chart
partnershipRuns
The minimum number of partnership runs to include for the chart
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or partnerships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenPartnershiplOppnAllMatches
saveAllMatchesBetween2IPLTeams
teamBatsmenVsBowlersAllOppnAllMatchesPlot
teamBatsmenVsBowlersOppnAllMatches
'''
df1 = matches[matches.team == main]
df2 = df1[['batsman','non_striker','runs']]
# Compute partnerships
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
df3.columns = ['batsman','non_striker','partnershipRuns']
# Compute total partnerships
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('partnershipRuns',ascending=False)
df4.columns = ['batsman','totalPartnershipRuns']
# Select top 5
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
df7 = df6[['batsman','non_striker','partnershipRuns']]
# Remove rows where partnershipRuns < partnershipRuns as there are too many
df8 = df7[df7['partnershipRuns'] > partnershipRuns]
df9=df8.groupby(['batsman','non_striker'])['partnershipRuns'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df8=df7.pivot(columns='non_striker',index='batsman').fillna(0)
if plot == True:
df9.plot(kind='bar',stacked=True,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Partnership runs between ' + main + '-' + opposition)
plt.xlabel('Batsman')
plt.ylabel('Partnership runs')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBatsmenVsBowlersOppnAllMatches
# This function plots the performance of batsmen against bowlers
#
###########################################################################################
def teamBatsmenVsBowlersOppnAllMatches(matches,main,opposition,plot=True,top=5,runsScored=20,savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes the performance of batsmen against the bowlers of an oppositions in all matches
Usage
teamBatsmenVsBowlersOppnAllMatches(matches,main,opposition,plot=TRUE,top=5,runsScored=20)
Arguments
matches
All the matches of the team against one specific opposition
main
The team for which the the batting partnerships are sought
opposition
The opposition team
plot
If plot=True then a plot will be displayed else a data frame will be returned
top
The number of players to be plotted or returned as a dataframe. The default is 5
runsScored
The cutfoff limit for runs scored for runs scored against bowler
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or dataframe
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenVsBowlersOppnAllMatchesPlot
teamBatsmenPartnershipOppnAllMatchesChart
teamBatsmenVsBowlersOppnAllMatches
'''
df1 = matches[matches.team == main]
df2 = df1[['batsman','bowler','runs']]
# Runs scored by bowler
df3=df2.groupby(['batsman','bowler']).sum().reset_index(inplace=False)
df3.columns = ['batsman','bowler','runsScored']
# Need to pick the 'top' number of bowlers
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('runsScored',ascending=False)
df4.columns = ['batsman','totalRunsScored']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
df7 = df6[['batsman','bowler','runsScored']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['runsScored'] >runsScored]
df9=df8.groupby(['batsman','bowler'])['runsScored'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df8=df7.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Runs against bowlers ' + main + '-' + opposition)
plt.xlabel('Batsman')
plt.ylabel('Runs scored')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBattingScorecardOppnAllMatches
# This function computes the batting scorecard for all matches
#
###########################################################################################
def teamBattingScorecardOppnAllMatches(matches,main,opposition):
'''
Team batting scorecard of a team in all matches against an opposition
Description
This function computes returns the batting scorecard (runs, fours, sixes, balls played)
for the team in all matches against an opposition
Usage
teamBattingScorecardOppnAllMatches(matches,main,opposition)
Arguments
matches
the data frame of all matches between a team and an opposition obtained with the call getAllMatchesBetweenteam()
main
The main team for which scorecard required
opposition
The opposition team
Value
scorecard The scorecard of all the matches
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenPartnershipAllOppnAllMatches
teamBowlingWicketKindOppositionAllMatches
'''
team=matches.loc[matches.team== main]
a1= getRuns(team)
b1= getFours(team)
c1= getSixes(team)
# Merge columns
d1=pd.merge(a1, b1, how='outer', on='batsman')
e=pd.merge(d1,c1,how='outer', on='batsman')
e=e.fillna(0)
e['4s']=e['4s'].astype(int)
e['6s']=e['6s'].astype(int)
e['SR']=(e['runs']/e['balls']) *100
scorecard = e[['batsman','runs','balls','4s','6s','SR']].sort_values('runs',ascending=False)
return(scorecard)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBattingScorecardOppnAllMatches
# This function computes the batting scorecard for all matches
#
###########################################################################################
def teamBowlingScorecardOppnAllMatches(matches,main,opposition):
'''
Team bowling scorecard opposition all matches
Description
This function computes returns the bowling dataframe of best bowlers
deliveries, maidens, overs, wickets against an IPL oppositions in all matches
Usage
teamBowlingScorecardOppnAllMatches(matches,main,opposition)
Arguments
matches
The matches of the team against all oppositions and all matches
main
Team for which bowling performance is required
opposition
The opposing IPL team
Value
l A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBowlingWicketKindOppositionAllMatches
teamBatsmenVsBowlersOppnAllMatches
plotWinsbyTossDecision
'''
team=matches.loc[matches.team== main]
# Compute overs bowled
a1= getOvers(team).reset_index(inplace=False)
# Compute runs conceded
b1= getRunsConceded(team).reset_index(inplace=False)
# Compute maidens
c1= getMaidens(team).reset_index(inplace=False)
# Compute wickets
d1= getWickets(team).reset_index(inplace=False)
e1=pd.merge(a1, b1, how='outer', on='bowler')
f1= pd.merge(e1,c1,how='outer', on='bowler')
g1= pd.merge(f1,d1,how='outer', on='bowler')
g1 = g1.fillna(0)
# Compute economy rate
g1['econrate'] = g1['runs']/g1['overs']
g1.columns=['bowler','overs','runs','maidens','wicket','econrate']
g1.maidens = g1.maidens.astype(int)
g1.wicket = g1.wicket.astype(int)
g2 = g1.sort_values('wicket',ascending=False)
return(g2)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBowlingWicketKindOppositionAllMatches
# This function plots the performance of bowlers and the kind of wickets
#
###########################################################################################
def teamBowlingWicketKindOppositionAllMatches(matches,main,opposition,plot=True,top=5,wickets=2,savePic=False, dir1=".",picFile="pic1.png"):
'''
Team bowlers wicket kind against an opposition in all matches
Description
This function computes performance of bowlers of a team and the wicket kind against
an opposition in all matches against the opposition
Usage
teamBowlersWicketKindOppnAllMatches(matches,main,opposition,plot=TRUE,top=5,wickets=2)
Arguments
matches
The data frame of all matches between a team the opposition. T
main
The team for which the performance is required
opposition
The opposing team
plot
If plot=True then a plot is displayed else a dataframe is returned
top
The top number of players to be considered
wickets
The minimum number of wickets as cutoff
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or dataframe The return depends on the value of the plot
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
plotWinsByRunOrWickets
teamBowlersVsBatsmenOppnAllMatches
'''
df1=matches.loc[matches.team== main]
df2= df1[['bowler','kind','player_out']]
# Find all rows where there was a wicket
df2=df2[df2.player_out != '0']
# Number of wickets taken by bowler
df3=df2.groupby(['bowler','kind']).count().reset_index(inplace=False)
df3.columns = ['bowler','kind','wickets']
# Need to pick the 'top' number of bowlers by wickets
df4 = df3.groupby('bowler').sum().reset_index(inplace=False).sort_values('wickets',ascending=False)
df4.columns = ['bowler','totalWickets']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='bowler')
df7 = df6[['bowler','kind','wickets']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['wickets'] >wickets]
df9=df8.groupby(['bowler','kind'])['wickets'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df9=df8.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Wicker kind by bowlers of ' + main + '-' + opposition)
plt.xlabel('Bowler')
plt.ylabel('Total wickets')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBowlersVsBatsmenOppnAllMatches
# This function plots the performance of the bowlers against batsmen
#
###########################################################################################
def teamBowlersVsBatsmenOppnAllMatches(matches,main,opposition,plot=True,top=5,runsConceded=10, savePic=False, dir1=".",picFile="pic1.png"):
'''
Team bowlers vs batsmen against an opposition in all matches
Description
This function computes performance of bowlers of a team against an opposition in all
matches against the opposition
Usage
teamBowlersVsBatsmenOppnAllMatches(matches,main,opposition,plot=True,top=5,runsConceded=10))
Arguments
matches
The data frame of all matches between a team the opposition.
main
The main team against which the performance is required
opposition
The opposition team against which the performance is require
plot
If true plot else return dataframe
top
The number of rows to be returned. 5 by default
runsConceded
The minimum numer runs to use as cutoff
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
dataframe The dataframe with all performances
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenPartnershipOppnAllMatches
teamBowlersVsBatsmenOppnAllMatchesRept
'''
df1=matches.loc[matches.team== main]
df2= df1[['bowler','batsman','runs']]
# Number of wickets taken by bowler
df3=df2.groupby(['bowler','batsman']).sum().reset_index(inplace=False)
df3.columns = ['bowler','batsman','runsConceded']
# Need to pick the 'top' number of bowlers by wickets
df4 = df3.groupby('bowler').sum().reset_index(inplace=False).sort_values('runsConceded',ascending=False)
df4.columns = ['bowler','totalRunsConceded']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='bowler')
df7 = df6[['bowler','batsman','runsConceded']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['runsConceded'] >runsConceded]
df9=df8.groupby(['bowler','batsman'])['runsConceded'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df9=df8.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Wicker kind by bowlers of ' + main + '-' + opposition)
plt.xlabel('Bowler')
plt.ylabel('Total runs')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: plotWinLossBetweenTeams
# This function plots the number of wins and losses in teams
#
###########################################################################################
def plotWinLossBetweenTeams(matches,team1,team2,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot wins for each team
Description
This function computes and plots number of wins for each team in all their encounters.
The plot includes the number of wins byteam1 each team and the matches with no result
Usage
plotWinLossBetweenTeams(matches)
Arguments
matches
The dataframe with all matches between 2 IPL teams
team1
The 1st team
team2
The 2nd team
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
teamBattingScorecardOppnAllMatches
teamBatsmenPartnershipOppnAllMatchesChart
getAllMatchesBetweenTeams
'''
a=matches[['date','winner']].groupby(['date','winner']).count().reset_index(inplace=False)
b=a.groupby('winner').count().reset_index(inplace=False)
b.columns = ['winner','number']
sns.barplot(x='winner',y='number',data=b)
plt.xlabel('Winner')
plt.ylabel('Number')
plt.title("Wins vs losses " + team1 + "-"+ team2)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: plotWinsByRunOrWickets
# This function plots how the win for the team was whether by runs or wickets
#
###########################################################################################
def plotWinsByRunOrWickets(matches,team1,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot whether the wins for the team was by runs or wickets
Description
This function computes and plots number the number of wins by runs vs number of wins
by wickets
Usage
plotWinsByRunOrWickets(matches,team1)
Arguments
matches
The dataframe with all matches between 2 IPL teams
team1
The team for which the plot has to be done
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>esh.<EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenPartnershipOppnAllMatchesChart
getAllMatchesBetweenTeams
'''
# Get the number of matches won
df= matches.loc[matches.winner == team1]
a=df[['date','winType']].groupby(['date','winType']).count().reset_index(inplace=False)
b=a.groupby('winType').count().reset_index(inplace=False)
b.columns = ['winType','number']
sns.barplot(x='winType',y='number',data=b)
plt.xlabel('Win Type - Runs or wickets')
plt.ylabel('Number')
plt.title("Win type for team -" + team1 )
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: plotWinsbyTossDecision
# This function plots the number of wins/losses for team based on its toss decision
#
###########################################################################################
def plotWinsbyTossDecision(matches,team1,tossDecision='bat', plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot whether the wins for the team was by runs or wickets
Description
This function computes and plots number the number of wins by runs vs number of wins
by wickets
Usage
plotWinsbyTossDecision(matches,team1,tossDecision='bat')
Arguments
matches
The dataframe with all matches between 2 IPL teams
team1
The team for which the plot has to be done
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenPartnershipOppnAllMatchesChart
teamBowlingWicketKindOppositionAllMatches
'''
df=matches.loc[(matches.tossDecision==tossDecision) & (matches.tossWinner==team1)]
a=df[['date','winner']].groupby(['date','winner']).count().reset_index(inplace=False)
b=a.groupby('winner').count().reset_index(inplace=False)
b.columns = ['winner','number']
sns.barplot(x='winner',y='number',data=b)
plt.xlabel('Winner ' + 'when toss decision was to :' + tossDecision)
plt.ylabel('Number')
plt.title('Wins vs losses for ' + team1 + ' when toss decision was to ' + tossDecision )
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: getAllMatchesAllOpposition
# This function gets all the matches between a IPL team and all opposition
#
###########################################################################################
def getAllMatchesAllOpposition(team1,dir=".",save=False,odir="."):
'''
Get data on all matches against all opposition
Description
This function gets all the matches for a particular IPL team for
against all other oppositions. It constructs a huge dataframe of
all these matches. This can be saved by the user which can be used in
function in which analyses are done for all matches and for all oppositions.
Usage
getAllMatchesAllOpposition(team,dir=".",save=FALSE)
Arguments
team
The team for which all matches and all opposition has to be obtained e.g. India, Pakistan
dir
The directory in which the saved .RData files exist
save
Default=False. This parameter indicates whether the combined data frame needs to be saved or not. It is recommended to save this large dataframe as the creation of this data frame takes a several seconds depending on the number of matches
Value
match The combined data frame
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
saveAllMatchesAllOppositionIPLT20
teamBatsmenPartnershiAllOppnAllMatches
'''
# Create the 2 combinations
t1 = '*' + team1 +'*.csv'
path= os.path.join(dir,t1)
files = glob.glob(path)
print(len(files))
# Save as CSV only if there are matches between the 2 teams
if len(files) !=0:
df = pd.DataFrame()
for file in files:
df1 = pd.read_csv(file)
df=pd.concat([df,df1])
if save==True:
dest= team1 + '-allMatchesAllOpposition.csv'
output=os.path.join(odir,dest)
df.to_csv(output)
else:
return(df)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: saveAllMatchesAllOppositionIPLT20
# This function saves all the matches between all IPL team and all opposition
#
###########################################################################################
def saveAllMatchesAllOppositionIPLT20(dir1,odir="."):
'''
Saves matches against all IPL teams as dataframe and CSV for an IPL team
Description
This function saves all IPL matches agaist all opposition as a single
dataframe in the current directory
Usage
saveAllMatchesAllOppositionIPLT20(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
convertYaml2PandasDataframeT20
teamBattingScorecardMatch
'''
teams = ["Chennai Super Kings","Deccan Chargers","Delhi Daredevils",
"Kings XI Punjab", 'Kochi Tuskers Kerala',"Kolkata Knight Riders",
"Mumbai Indians", "Pune Warriors","Rajasthan Royals",
"Royal Challengers Bangalore","Sunrisers Hyderabad","Gujarat Lions",
"Rising Pune Supergiants"]
for team in teams:
print("Team=",team)
getAllMatchesAllOpposition(team,dir=dir1,save=True,odir=odir)
time.sleep(2) #Sleep before next save
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBatsmenPartnershiAllOppnAllMatches
# This function computes the partnerships of an IPK team against all other IPL teams
#
###########################################################################################
def teamBatsmenPartnershiAllOppnAllMatches(matches,theTeam,report="summary",top=5):
'''
Team batting partnership against a opposition all IPL matches
Description
This function computes the performance of batsmen against all bowlers of an oppositions in
all matches. This function returns a dataframe
Usage
teamBatsmenPartnershiAllOppnAllMatches(matches,theTeam,report="summary")
Arguments
matches
All the matches of the team against the oppositions
theTeam
The team for which the the batting partnerships are sought
report
If the report="summary" then the list of top batsmen with the highest partnerships
is displayed. If report="detailed" then the detailed break up of partnership is returned
as a dataframe
top
The number of players to be displayed from the top
Value
partnerships The data frame of the partnerships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenVsBowlersOppnAllMatchesPlot
teamBatsmenPartnershipOppnAllMatchesChart
'''
df1 = matches[matches.team == theTeam]
df2 = df1[['batsman','non_striker','runs']]
# Compute partnerships
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
df3.columns = ['batsman','non_striker','partnershipRuns']
# Compute total partnerships
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('partnershipRuns',ascending=False)
df4.columns = ['batsman','totalPartnershipRuns']
# Select top 5
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
if report == 'summary':
return(df5)
elif report == 'detailed':
return(df6)
else:
print("Invalid option")
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBatsmenPartnershipAllOppnAllMatchesChart
# This function computes and plots the partnerships of an IPK team against all other IPL teams
#
###########################################################################################
def teamBatsmenPartnershipAllOppnAllMatchesChart(matches,main,plot=True,top=5,partnershipRuns=20, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plots team batting partnership all matches all oppositions
Description
This function plots the batting partnership of a team againt all oppositions in all matches This function also returns a dataframe with the batting partnerships
Usage
teamBatsmenPartnershipAllOppnAllMatchesChart(matches,theTeam,main,plot=True,top=5,partnershipRuns=20)
Arguments
matches
All the matches of the team against all oppositions
theTeam
The team for which the the batting partnerships are sought
main
The main team for which the the batting partnerships are sought
plot
Whether the partnerships have top be rendered as a plot. If plot=FALSE the data frame is returned
top
The number of players from the top to be included in chart
partnershipRuns
The minimum number of partnership runs to include for the chart
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or partnerships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
df1 = matches[matches.team == main]
df2 = df1[['batsman','non_striker','runs']]
# Compute partnerships
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
df3.columns = ['batsman','non_striker','partnershipRuns']
# Compute total partnerships
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('partnershipRuns',ascending=False)
df4.columns = ['batsman','totalPartnershipRuns']
# Select top 5
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
df7 = df6[['batsman','non_striker','partnershipRuns']]
# Remove rows where partnershipRuns < partnershipRuns as there are too many
df8 = df7[df7['partnershipRuns'] > partnershipRuns]
df9=df8.groupby(['batsman','non_striker'])['partnershipRuns'].sum().unstack(fill_value=0)
# Note: Can also use the below code -*************
#df8=df7.pivot(columns='non_striker',index='batsman').fillna(0)
if plot == True:
df9.plot(kind='bar',stacked=True,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Batting partnerships of' + main + 'against all teams')
plt.xlabel('Batsman')
plt.ylabel('Partnership runs')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBatsmenVsBowlersAllOppnAllMatches
# This function computes and plots the performance of batsmen
# of an IPL team against all other teams
#
###########################################################################################
def teamBatsmenVsBowlersAllOppnAllMatches(matches,main,plot=True,top=5,runsScored=20, savePic=False, dir1=".",picFile="pic1.png"):
'''
Report of team batsmen vs bowlers in all matches all oppositions
Description
This function computes the performance of batsmen against all bowlers of all oppositions in all matches
Usage
teamBatsmenVsBowlersAllOppnAllMatches(matches,main,plot=True,top=5,runsScored=20)
Arguments
matches
All the matches of the team against all oppositions
main
The team for which the the batting partnerships are sought
plot
Whether a plot is required or not
top
The number of top batsmen to be included
runsScored
The total runs scoed by batsmen
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
The data frame of the batsman and the runs against bowlers
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
df1 = matches[matches.team == main]
df2 = df1[['batsman','bowler','runs']]
# Runs scored by bowler
df3=df2.groupby(['batsman','bowler']).sum().reset_index(inplace=False)
df3.columns = ['batsman','bowler','runsScored']
print(df3.shape)
# Need to pick the 'top' number of bowlers
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('runsScored',ascending=False)
print(df4.shape)
df4.columns = ['batsman','totalRunsScored']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
df7 = df6[['batsman','bowler','runsScored']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['runsScored'] >runsScored]
df9=df8.groupby(['batsman','bowler'])['runsScored'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df8=df7.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
#ax.legend(fontsize=25)
plt.title('Runs by ' + main + ' against all T20 bowlers')
plt.xlabel('Batsman')
plt.ylabel('Runs scored')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBattingScorecardAllOppnAllMatches
# This function computes and batting scorecard of an IPL team against all other
# IPL teams
#
###########################################################################################
def teamBattingScorecardAllOppnAllMatches(matches,main):
'''
Team batting scorecard against all oppositions in all matches
Description
This function omputes and returns the batting scorecard of a team in all matches against all oppositions. The data frame has the ball played, 4's,6's and runs scored by batsman
Usage
teamBattingScorecardAllOppnAllMatches(matches,theTeam)
Arguments
matches
All matches of the team in all matches with all oppositions
main
The team for which the the batting partnerships are sought
Value
details The data frame of the scorecard of the team in all matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
team=matches.loc[matches.team== main]
a1= getRuns(team)
b1= getFours(team)
c1= getSixes(team)
# Merge columns
d1=pd.merge(a1, b1, how='outer', on='batsman')
e=pd.merge(d1,c1,how='outer', on='batsman')
e=e.fillna(0)
e['4s']=e['4s'].astype(int)
e['6s']=e['6s'].astype(int)
e['SR']=(e['runs']/e['balls']) *100
scorecard = e[['batsman','runs','balls','4s','6s','SR']].sort_values('runs',ascending=False)
return(scorecard)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBowlingScorecardAllOppnAllMatches
# This function computes and bowling scorecard of an IPL team against all other
# IPL teams
#
###########################################################################################
def teamBowlingScorecardAllOppnAllMatches(matches,main):
'''
Team bowling scorecard all opposition all matches
Description
This function computes returns the bowling dataframe of bowlers deliveries,
maidens, overs, wickets against all oppositions in all matches
Usage
teamBowlingScorecardAllOppnAllMatches(matches,theTeam)
Arguments
matches
The matches of the team against all oppositions and all matches
theTeam
Team for which bowling performance is required
Value
l A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
team=matches.loc[matches.team== main]
# Compute overs bowled
a1= getOvers(team).reset_index(inplace=False)
# Compute runs conceded
b1= getRunsConceded(team).reset_index(inplace=False)
# Compute maidens
c1= getMaidens(team).reset_index(inplace=False)
# Compute wickets
d1= getWickets(team).reset_index(inplace=False)
e1=pd.merge(a1, b1, how='outer', on='bowler')
f1= pd.merge(e1,c1,how='outer', on='bowler')
g1= pd.merge(f1,d1,how='outer', on='bowler')
g1 = g1.fillna(0)
# Compute economy rate
g1['econrate'] = g1['runs']/g1['overs']
g1.columns=['bowler','overs','runs','maidens','wicket','econrate']
g1.maidens = g1.maidens.astype(int)
g1.wicket = g1.wicket.astype(int)
g2 = g1.sort_values('wicket',ascending=False)
return(g2)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBowlingWicketKindAllOppnAllMatches
# This function computes and plots the wicket kind of an IPL team against all other
# IPL teams
#
###########################################################################################
def teamBowlingWicketKindAllOppnAllMatches(matches,main,plot=True,top=5,wickets=2,savePic=False, dir1=".",picFile="pic1.png"):
df1=matches.loc[matches.team== main]
df2= df1[['bowler','kind','player_out']]
# Find all rows where there was a wicket
df2=df2[df2.player_out != '0']
# Number of wickets taken by bowler
df3=df2.groupby(['bowler','kind']).count().reset_index(inplace=False)
df3.columns = ['bowler','kind','wickets']
# Need to pick the 'top' number of bowlers by wickets
df4 = df3.groupby('bowler').sum().reset_index(inplace=False).sort_values('wickets',ascending=False)
df4.columns = ['bowler','totalWickets']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='bowler')
df7 = df6[['bowler','kind','wickets']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['wickets'] >wickets]
df9=df8.groupby(['bowler','kind'])['wickets'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df9=df8.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Wicker kind by bowlers of ' + main + ' against all T20 teams')
plt.xlabel('Bowler')
plt.ylabel('Total wickets')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBowlersVsBatsmenAllOppnAllMatches
# This function computes and plots the performance of bowlers of an IPL team against all other
# IPL teams
#
###########################################################################################
def teamBowlersVsBatsmenAllOppnAllMatches(matches,main,plot=True,top=5,runsConceded=10,savePic=False, dir1=".",picFile="pic1.png"):
'''
Compute team bowlers vs batsmen all opposition all matches
Description
This function computes performance of bowlers of a team against all opposition in all matches
Usage
teamBowlersVsBatsmenAllOppnAllMatches(matches,,main,plot=True,top=5,runsConceded=10)
Arguments
matches
the data frame of all matches between a team and aall opposition and all obtained with the call getAllMatchesAllOpposition()
main
The team against which the performance is requires
plot
Whether a plot should be displayed or a dataframe to be returned
top
The top number of bowlers in result
runsConded
The number of runs conceded by bowlers
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
dataframe The dataframe with all performances
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
df1=matches.loc[matches.team== main]
df2= df1[['bowler','batsman','runs']]
# Number of wickets taken by bowler
df3=df2.groupby(['bowler','batsman']).sum().reset_index(inplace=False)
df3.columns = ['bowler','batsman','runsConceded']
# Need to pick the 'top' number of bowlers by wickets
df4 = df3.groupby('bowler').sum().reset_index(inplace=False).sort_values('runsConceded',ascending=False)
df4.columns = ['bowler','totalRunsConceded']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='bowler')
df7 = df6[['bowler','batsman','runsConceded']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['runsConceded'] >runsConceded]
df9=df8.groupby(['bowler','batsman'])['runsConceded'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df9=df8.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Performance of' + main + 'Bowlers vs Batsmen ' )
plt.xlabel('Bowler')
plt.ylabel('Total runs')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: plotWinLossByTeamAllOpposition
# This function computes and plots twins and lossed of IPL team against all other
# IPL teams
#
###########################################################################################
def plotWinLossByTeamAllOpposition(matches, team1, plot='summary',savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot wins for each team
Description
This function computes and plots number of wins for each team in all their encounters.
The plot includes the number of wins byteam1 each team and the matches with no result
Usage
plotWinLossByTeamAllOpposition(matches, main, plot='summary')
Arguments
matches
The dataframe with all matches between 2 IPL teams
main
The 1st team
plot
Summary or detailed
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
a=matches[['date','winner']].groupby(['date','winner']).count().reset_index(inplace=False)
# Plot the overall performance as wins and losses
if plot=="summary":
m= a.loc[a.winner==team1]['winner'].count()
n= a.loc[a.winner!=team1]['winner'].count()
df=pd.DataFrame({'outcome':['win','loss'],'number':[m,n]})
sns.barplot(x='outcome',y='number',data=df)
plt.xlabel('Outcome')
plt.ylabel('Number')
plt.title("Wins vs losses(summary) of " + team1 + ' against all Opposition' )
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
elif plot=="detailed" :
#Plot breakup by team
b=a.groupby('winner').count().reset_index(inplace=False)
# If 'winner' is '0' then the match is a tie.Set as 'tie'
b.loc[b.winner=='0','winner']='Tie'
b.columns = ['winner','number']
ax=sns.barplot(x='winner',y='number',data=b)
plt.xlabel('Winner')
plt.ylabel('Number')
plt.title("Wins vs losses(detailed) of " + team1 + ' against all Opposition' )
ax.set_xticklabels(ax.get_xticklabels(),rotation=60,fontsize=6)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
print("Unknown option")
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: plotWinsByRunOrWicketsAllOpposition
# This function computes and plots twins and lossed of IPL team against all other
# IPL teams
#
###########################################################################################
def plotWinsByRunOrWicketsAllOpposition(matches,team1,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot whether the wins for the team was by runs or wickets
Description
This function computes and plots number the number of wins by runs vs number of wins
by wickets against all Opposition
Usage
plotWinsByRunOrWicketsAllOpposition(matches,team1)
Arguments
matches
The dataframe with all matches between an IPL team and all IPL teams
team1
The team for which the plot has to be done
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
# Get the number of matches won
df= matches.loc[matches.winner == team1]
a=df[['date','winType']].groupby(['date','winType']).count().reset_index(inplace=False)
b=a.groupby('winType').count().reset_index(inplace=False)
b.columns = ['winType','number']
sns.barplot(x='winType',y='number',data=b)
plt.xlabel('Win Type - Runs or wickets')
plt.ylabel('Number')
plt.title("Win type for team -" + team1 + ' against all opposition' )
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: plotWinsbyTossDecisionAllOpposition
# This function computes and plots the win type of IPL team against all
# IPL teams
#
###########################################################################################
def plotWinsbyTossDecisionAllOpposition(matches,team1,tossDecision='bat',plot="summary", savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot whether the wins for the team was by runs or wickets
Description
This function computes and plots number the number of wins by runs vs number of wins
by wickets
Usage
plotWinsbyTossDecisionAllOpposition(matches,team1,tossDecision='bat',plot="summary")
Arguments
matches
The dataframe with all matches between 2 IPL teams
team1
The team for which the plot has to be done
plot
'summary' or 'detailed'
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenPartnershipOppnAllMatchesChart
teamBowlingWicketKindOppositionAllMatches
'''
df=matches.loc[(matches.tossDecision==tossDecision) & (matches.tossWinner==team1)]
a=df[['date','winner']].groupby(['date','winner']).count().reset_index(inplace=False)
if plot=="summary":
m= a.loc[a.winner==team1]['winner'].count()
n= a.loc[a.winner!=team1]['winner'].count()
df=pd.DataFrame({'outcome':['win','loss'],'number':[m,n]})
sns.barplot(x='outcome',y='number',data=df)
plt.xlabel('Outcome')
plt.ylabel('Number')
plt.title("Wins vs losses(summary) against all opposition when toss decision was to " + tossDecision + ' for ' + team1 )
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
elif plot=="detailed" :
#Plot breakup by team
b=a.groupby('winner').count().reset_index(inplace=False)
# If 'winner' is '0' then the match is a tie.Set as 'tie'
b.loc[b.winner=='0','winner']='Tie'
b.columns = ['winner','number']
ax=sns.barplot(x='winner',y='number',data=b)
plt.xlabel(team1 + ' chose to ' + tossDecision)
plt.ylabel('Number')
plt.title('Wins vs losses(detailed) against all opposition for ' + team1 + ' when toss decision was to ' + tossDecision )
ax.set_xticklabels(ax.get_xticklabels(),rotation=60, fontsize=6)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: Details
# This function computes the batting details of a team
# IPL teams
#
###########################################################################################
def getTeamBattingDetails(team,dir=".",save=False,odir="."):
'''
Description
This function gets the batting details of a team in all matchs against all oppositions. This gets all the details of the batsmen balls faced,4s,6s,strikerate, runs, venue etc. This function is then used for analyses of batsmen. This function calls teamBattingPerfDetails()
Usage
getTeamBattingDetails(team,dir=".",save=FALSE)
Arguments
team
The team for which batting details is required
dir
The source directory of RData files obtained with convertAllYaml2RDataframes()
save
Whether the data frame needs to be saved as RData or not. It is recommended to set save=TRUE as the data can be used for a lot of analyses of batsmen
Value
battingDetails The dataframe with the batting details
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
Examples
m=getTeamBattingDetails(team1,dir1,save=True)
'''
# Get all matches played by team
t1 = '*' + team +'*.csv'
path= os.path.join(dir,t1)
files = glob.glob(path)
# Create an empty dataframe
details = pd.DataFrame()
# Loop through all matches played by team
for file in files:
match=pd.read_csv(file)
scorecard,extras=teamBattingScorecardMatch(match,team)
if scorecard.empty:
continue
# Filter out only the rows played by team
match1 = match.loc[match.team==team]
# Check if there were wickets, you will 'bowled', 'caught' etc
if len(match1 !=0):
if isinstance(match1.kind.iloc[0],str):
b=match1.loc[match1.kind != '0']
# Get the details of the wicket
wkts= b[['batsman','bowler','fielders','kind','player_out']]
#date','team2','winner','result','venue']]
df=pd.merge(scorecard,wkts,how='outer',on='batsman')
# Fill NA as not outs
df =df.fillna('notOut')
# Set other info
if len(b) != 0:
df['date']= b['date'].iloc[0]
df['team2']= b['team2'].iloc[0]
df['winner']= b['winner'].iloc[0]
df['result']= b['result'].iloc[0]
df['venue']= b['venue'].iloc[0]
details= pd.concat([details,df])
details = details.sort_values(['batsman','date'])
if save==True:
fileName = "./" + team + "-BattingDetails.csv"
output=os.path.join(odir,fileName)
details.to_csv(output)
return(details)
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: getBatsmanDetails
# This function gets the batsman details
# IPL teams
#
###########################################################################################
def getBatsmanDetails(team, name,dir="."):
'''
Get batting details of batsman from match
Description
This function gets the batting details of a batsman given the match data as a RData file
Usage
getBatsmanDetails(team,name,dir=".")
Arguments
team
The team of the batsman e.g. India
name
Name of batsman
dir
The directory where the source file exists
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
batsmanRunsPredict
batsmanMovingAverage
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
## Not run:
name="<NAME>"
team='Chennai Super Kings'
#df=getBatsmanDetails(team, name,dir=".")
'''
path = dir + '/' + team + "-BattingDetails.csv"
battingDetails= pd.read_csv(path)
batsmanDetails = battingDetails.loc[battingDetails['batsman'].str.contains(name)]
return(batsmanDetails)
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: getBatsmanDetails
# This function plots runs vs deliveries for the batsman
#
###########################################################################################
def batsmanRunsVsDeliveries(df,name= "A Late Cut",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Runs versus deliveries faced
Description
This function plots the runs scored and the deliveries required. A regression smoothing function is used to fit the points
Usage
batsmanRunsVsDeliveries(df, name= "A Late Cut")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanFoursSixes
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanRunsVsDeliveries(df, name)
'''
rcParams['figure.figsize'] = 8, 5
plt.scatter(df.balls,df.runs)
sns.lmplot(x='balls',y='runs', data=df)
plt.xlabel("Balls faced",fontsize=8)
plt.ylabel('Runs',fontsize=8)
atitle=name + "- Runs vs balls faced"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanFoursSixes
# This function gets the batsman fours and sixes for batsman
#
#
###########################################################################################
def batsmanFoursSixes(df,name= "A Leg Glance", plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the total runs, fours and sixes of the batsman
Usage
batsmanFoursSixes(df,name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanDismissals batsmanRunsVsDeliveries batsmanRunsVsStrikeRate batsmanRunsVsStrikeRate batsmanRunsPredict
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanFoursSixes(df,"SK Raina")
'''
# Compute runs from fours and sixes
rcParams['figure.figsize'] = 8, 5
df['RunsFromFours']=df['4s']*4
df['RunsFromSixes']=df['6s']*6
df1 = df[['balls','runs','RunsFromFours','RunsFromSixes']]
# Total runs
sns.scatterplot('balls','runs',data=df1)
# Fit a linear regression line
balls=df1.balls.reshape(-1,1)
linreg = LinearRegression().fit(balls, df1.runs)
x=np.linspace(0,120,10)
#Plot regression line balls vs runs
plt.plot(x, linreg.coef_ * x + linreg.intercept_, color='blue',label="Total runs")
# Runs from fours
sns.scatterplot('balls','RunsFromFours',data=df1)
#Plot regression line balls vs Runs from fours
linreg = LinearRegression().fit(balls, df1.RunsFromFours)
plt.plot(x, linreg.coef_ * x + linreg.intercept_, color='red',label="Runs from fours")
# Runs from sixes
sns.scatterplot('balls','RunsFromSixes',data=df1)
#Plot regression line balls vs Runs from sixes
linreg = LinearRegression().fit(balls, df1.RunsFromSixes)
plt.plot(x, linreg.coef_ * x + linreg.intercept_, color='green',label="Runs from sixes")
plt.xlabel("Balls faced",fontsize=8)
plt.ylabel('Runs',fontsize=8)
atitle=name + "- Total runs, fours and sixes"
plt.title(atitle,fontsize=8)
plt.legend(loc="upper left")
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanDismissals
# This function plots the batsman dismissals
#
###########################################################################################
def batsmanDismissals(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the type of dismissals of the the batsman
Usage
batsmanDismissals(df,name="A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanFoursSixes
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanDismissals(df,"SK Raina")
'''
# Count dismissals
rcParams['figure.figsize'] = 8, 5
df1 = df[['batsman','kind']]
df2 = df1.groupby('kind').count().reset_index(inplace=False)
df2.columns = ['dismissals','count']
plt.pie(df2['count'], labels=df2['dismissals'],autopct='%.1f%%')
atitle= name + "-Dismissals"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanRunsVsStrikeRate
# This function plots the runs vs strike rate
#
#
###########################################################################################
def batsmanRunsVsStrikeRate (df,name= "A Late Cut", plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function plots the runs scored by the batsman and the runs scored by the batsman. A loess line is fitted over the points
Usage
batsmanRunsVsStrikeRate(df, name= "A Late Cut")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanDismissals
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
teamBatsmenPartnershipAllOppnAllMatches
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanRunsVsStrikeRate(df,"SK Raina")
'''
rcParams['figure.figsize'] = 8, 5
plt.scatter(df.runs,df.SR)
sns.lmplot(x='runs',y='SR', data=df,order=2)
plt.xlabel("Runs",fontsize=8)
plt.ylabel('Strike Rate',fontsize=8)
atitle=name + "- Runs vs Strike rate"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: movingaverage
# This computes the moving average
#
#
###########################################################################################
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanMovingAverage
# This function plots the moving average of runs
#
#
###########################################################################################
def batsmanMovingAverage(df, name, plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function plots the runs scored by the batsman over the career as a time series. A loess regression line is plotted on the moving average of the batsman the batsman
Usage
batsmanMovingAverage(df, name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanDismissals
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
teamBatsmenPartnershipAllOppnAllMatches
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanMovingAverage(df,"SK Raina")
'''
rcParams['figure.figsize'] = 8, 5
y_av = movingaverage(df.runs, 10)
date= pd.to_datetime(df['date'])
plt.plot(date, y_av,"b")
plt.xlabel('Date',fontsize=8)
plt.ylabel('Runs',fontsize=8)
plt.xticks(rotation=90)
atitle = name + "-Moving average of runs"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanCumulativeAverageRuns
# This functionplots the cumulative average runs
#
#
###########################################################################################
def batsmanCumulativeAverageRuns(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Batsman's cumulative average runs
Description
This function computes and plots the cumulative average runs of a batsman
Usage
batsmanCumulativeAverageRuns(df,name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanCumulativeStrikeRate bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets batsmanRunsVsStrikeRate batsmanRunsPredict
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanCumulativeAverageRuns(df,"SK Raina")
'''
rcParams['figure.figsize'] = 8, 5
cumAvgRuns = df['runs'].cumsum()/pd.Series(np.arange(1, len( df['runs'])+1), df['runs'].index)
plt.plot(cumAvgRuns)
plt.xlabel('No of matches',fontsize=8)
plt.ylabel('Cumulative Average Runs',fontsize=8)
plt.xticks(rotation=90)
atitle = name + "-Cumulative Average Runs vs matches"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanCumulativeStrikeRate
# This function plots the cumulative average Strike rate
#
#
###########################################################################################
def batsmanCumulativeStrikeRate(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the cumulative average strike rate of a batsman
Usage
batsmanCumulativeStrikeRate(df,name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets batsmanRunsVsStrikeRate batsmanRunsPredict
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
#batsmanCumulativeAverageRunsdf(df,name)
'''
rcParams['figure.figsize'] = 8, 5
cumAvgRuns = df['SR'].cumsum()/pd.Series(np.arange(1, len( df['SR'])+1), df['SR'].index)
plt.plot(cumAvgRuns)
plt.xlabel('No of matches',fontsize=8)
plt.ylabel('Cumulative Average Strike Rate',fontsize=8)
plt.xticks(rotation=70)
atitle = name + "-Cumulative Average Strike Rate vs matches"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanRunsAgainstOpposition
# This function plots the batsman's runs against opposition
#
#
###########################################################################################
def batsmanRunsAgainstOpposition(df,name= "A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the mean runs scored by the batsman against different oppositions
Usage
batsmanRunsAgainstOpposition(df, name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanFoursSixes
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
teamBatsmenPartnershipAllOppnAllMatches
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanRunsAgainstOpposition(df,name)
'''
rcParams['figure.figsize'] = 8, 5
df1 = df[['batsman', 'runs','team2']]
df2=df1.groupby('team2').agg(['sum','mean','count'])
df2.columns= ['_'.join(col).strip() for col in df2.columns.values]
# Reset index
df3=df2.reset_index(inplace=False)
ax=sns.barplot(x='team2', y="runs_mean", data=df3)
plt.xticks(rotation="vertical",fontsize=8)
plt.xlabel('Opposition',fontsize=8)
plt.ylabel('Mean Runs',fontsize=8)
atitle=name + "-Mean Runs against opposition"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanRunsVenue
# This function plos the batsman's runs at venues
#
#
###########################################################################################
def batsmanRunsVenue(df,name= "A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the mean runs scored by the batsman at different venues of the world
Usage
batsmanRunsVenue(df, name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanFoursSixes
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
teamBatsmenPartnershipAllOppnAllMatches
batsmanRunsAgainstOpposition
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
#batsmanRunsVenue(df,name)
'''
rcParams['figure.figsize'] = 8, 5
df1 = df[['batsman', 'runs','venue']]
df2=df1.groupby('venue').agg(['sum','mean','count'])
df2.columns= ['_'.join(col).strip() for col in df2.columns.values]
# Reset index
df3=df2.reset_index(inplace=False)
ax=sns.barplot(x='venue', y="runs_mean", data=df3)
plt.xticks(rotation="vertical",fontsize=8)
plt.xlabel('Venue',fontsize=8)
plt.ylabel('Mean Runs',fontsize=8)
atitle=name + "-Mean Runs at venues"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: teamBowlingPerDetails
# This function gets the bowling performances
#
#
###########################################################################################
def teamBowlingPerDetails(team):
# Compute overs bowled
a1= getOvers(team).reset_index(inplace=False)
# Compute runs conceded
b1= getRunsConceded(team).reset_index(inplace=False)
# Compute maidens
c1= getMaidens(team).reset_index(inplace=False)
# Compute wickets
d1= getWickets(team).reset_index(inplace=False)
e1=pd.merge(a1, b1, how='outer', on='bowler')
f1= pd.merge(e1,c1,how='outer', on='bowler')
g1= pd.merge(f1,d1,how='outer', on='bowler')
g1 = g1.fillna(0)
# Compute economy rate
g1['econrate'] = g1['runs']/g1['overs']
g1.columns=['bowler','overs','runs','maidens','wicket','econrate']
g1.maidens = g1.maidens.astype(int)
g1.wicket = g1.wicket.astype(int)
return(g1)
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: getTeamBowlingDetails
# This function gets the team bowling details
#
#
###########################################################################################
def getTeamBowlingDetails (team,dir=".",save=False,odir="."):
'''
Description
This function gets the bowling details of a team in all matchs against all oppositions. This gets all the details of the bowlers for e.g deliveries, maidens, runs, wickets, venue, date, winner ec
Usage
getTeamBowlingDetails(team,dir=".",save=FALSE)
Arguments
team
The team for which detailed bowling info is required
dir
The source directory of RData files obtained with convertAllYaml2RDataframes()
save
Whether the data frame needs to be saved as RData or not. It is recommended to set save=TRUE as the data can be used for a lot of analyses of batsmen
Value
bowlingDetails The dataframe with the bowling details
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
getBatsmanDetails
getBowlerWicketDetails
batsmanDismissals
getTeamBattingDetails
Examples
dir1= "C:\\software\\cricket-package\\yorkpyIPLData\\data"
eam1='Delhi Daredevils'
m=getTeamBowlingDetails(team1,dir1,save=True)
'''
# Get all matches played by team
t1 = '*' + team +'*.csv'
path= os.path.join(dir,t1)
files = glob.glob(path)
# Create an empty dataframe
details = pd.DataFrame()
# Loop through all matches played by team
for file in files:
match=pd.read_csv(file)
if(match.size != 0):
team1=match.loc[match.team != team]
else:
continue
if len(team1) !=0:
scorecard=teamBowlingPerDetails(team1)
scorecard['date']= match['date'].iloc[0]
scorecard['team2']= match['team2'].iloc[0]
scorecard['winner']= match['winner'].iloc[0]
scorecard['result']= match['result'].iloc[0]
scorecard['venue']= match['venue'].iloc[0]
details= pd.concat([details,scorecard])
details = details.sort_values(['bowler','date'])
else:
pass # The team did not bowl
if save==True:
fileName = "./" + team + "-BowlingDetails.csv"
output=os.path.join(odir,fileName)
details.to_csv(output,index=False)
return(details)
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: getBowlerWicketDetails
# This function gets the bowler wicket
#
#
###########################################################################################
def getBowlerWicketDetails (team, name,dir="."):
'''
Description
This function gets the bowling of a bowler (overs,maidens,runs,wickets,venue, opposition)
Usage
getBowlerWicketDetails(team,name,dir=".")
Arguments
team
The team to which the bowler belongs
name
The name of the bowler
dir
The source directory of the data
Value
dataframe The dataframe of bowling performance
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
bowlerMovingAverage
getTeamBowlingDetails
bowlerMeanRunsConceded
teamBowlersWicketRunsOppnAllMatches
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
'''
path = dir + '/' + team + "-BowlingDetails.csv"
bowlingDetails= pd.read_csv(path,index_col=False)
bowlerDetails = bowlingDetails.loc[bowlingDetails['bowler'].str.contains(name)]
return(bowlerDetails)
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerMeanEconomyRate
# This function gets the bowler mean economy rate
#
#
###########################################################################################
def bowlerMeanEconomyRate(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots mean economy rate and the number of overs bowled by the bowler
Usage
bowlerMeanEconomyRate(df, name)
Arguments
df
Data frame
name
Name of bowler
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage
bowlerWicketPlot
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerMeanEconomyRate(df, name)
'''
# Count dismissals
rcParams['figure.figsize'] = 8, 5
df2=df[['bowler','overs','econrate']].groupby('overs').mean().reset_index(inplace=False)
plt.xlabel('No of overs',fontsize=8)
plt.ylabel('Mean economy rate',fontsize=8)
sns.barplot(x='overs',y='econrate',data=df2)
atitle = name + "-Mean economy rate vs overs"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerMeanRunsConceded
# This function gets the mean runs conceded by bowler
#
#
###########################################################################################
def bowlerMeanRunsConceded (df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots mean runs conceded by the bowler for the number of overs bowled by the bowler
Usage
bowlerMeanRunsConceded(df, name)
Arguments
df
Data frame
name
Name of bowler
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage
bowlerWicketPlot
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerMeanRunsConceded(df, name)
'''
# Count dismissals
rcParams['figure.figsize'] = 8, 5
df2=df[['bowler','overs','runs']].groupby('overs').mean().reset_index(inplace=False)
plt.xlabel('No of overs',fontsize=8)
plt.ylabel('Mean runs conceded',fontsize=8)
sns.barplot(x='overs',y='runs',data=df2)
atitle = name + "-Mean runs conceded vs overs"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerMovingAverage
# This function gets the bowler moving average
#
#
###########################################################################################
def bowlerMovingAverage (df, name,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the wickets taken by the bowler over career. A loess regression fit plots the moving average of wickets taken by bowler
Usage
bowlerMovingAverage(df, name)
Arguments
df
Data frame
name
Name of bowler
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
bowlerMeanEconomyRate
bowlerWicketPlot
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerMeanEconomyRate(df, name)
'''
rcParams['figure.figsize'] = 8, 5
y_av = movingaverage(df.wicket, 30)
date= pd.to_datetime(df['date'])
plt.plot(date, y_av,"b")
plt.xlabel('Date',fontsize=8)
plt.ylabel('Wickets',fontsize=8)
plt.xticks(rotation=70)
atitle = name + "-Moving average of wickets"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerCumulativeAvgWickets
# This function gets the bowler cumulative average runs
#
#
###########################################################################################
def bowlerCumulativeAvgWickets(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the cumulative average wickets of a bowler
Usage
bowlerCumulativeAvgWickets(df,name)
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate batsmanCumulativeStrikeRate batsmanRunsVsStrikeRate batsmanRunsPredict
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerCumulativeAvgWickets(df, name)
'''
rcParams['figure.figsize'] = 8, 5
cumAvgRuns = df['wicket'].cumsum()/pd.Series(np.arange(1, len( df['wicket'])+1), df['wicket'].index)
plt.plot(cumAvgRuns)
plt.xlabel('No of matches',fontsize=8)
plt.ylabel('Cumulative Average wickets',fontsize=8)
plt.xticks(rotation=90)
atitle = name + "-Cumulative Average wickets vs matches"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerCumulativeAvgEconRate
# This function gets the bowler cumulative average economy rate
#
#
###########################################################################################
def bowlerCumulativeAvgEconRate(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the cumulative average economy rate of a bowler
Usage
bowlerCumulativeAvgEconRate(df,name)
Arguments
df
Data frame
name
Name of batsman
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgWickets batsmanCumulativeStrikeRate batsmanRunsVsStrikeRate batsmanRunsPredict
Examples
name="R Ashwin"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerMeanEconomyRate(df, name)
'''
rcParams['figure.figsize'] = 8, 5
cumAvgRuns = df['econrate'].cumsum()/pd.Series(np.arange(1, len( df['econrate'])+1), df['econrate'].index)
plt.plot(cumAvgRuns)
plt.xlabel('No of matches',fontsize=7)
plt.ylabel('Cumulative Average economy rate',fontsize=8)
plt.xticks(rotation=70)
atitle = name + "-Cumulative Average economy rate vs matches"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerWicketPlot
# This function gets the bowler wicket plot
#
#
###########################################################################################
def bowlerWicketPlot (df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the average wickets taken by the bowler versus the number of overs bowled
Usage
bowlerWicketPlot(df, name)
Arguments
df
Data frame
name
Name of bowler
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
bowlerMeanEconomyRate
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerMeanEconomyRate(df, name)
'''
rcParams['figure.figsize'] = 8, 5
# Count dismissals
df2=df[['bowler','overs','wicket']].groupby('overs').mean().reset_index(inplace=False)
plt.xlabel('No of overs',fontsize=8)
plt.ylabel('Mean wickets',fontsize=8)
sns.barplot(x='overs',y='wicket',data=df2)
atitle = name + "-Mean wickets vs overs"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerWicketsAgainstOpposition
# This function gets the bowler's performance against opposition
#
#
###########################################################################################
def bowlerWicketsAgainstOpposition (df,name= "A Leg Glance", plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots mean number of wickets taken by the bowler against different opposition
Usage
bowlerWicketsAgainstOpposition(df, name)
Arguments
df
Data frame
name
Name of bowler
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage
bowlerWicketPlot
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerWicketsAgainstOpposition(df, name)
'''
rcParams['figure.figsize'] = 8, 5
df1 = df[['bowler', 'wicket','team2']]
df2=df1.groupby('team2').agg(['sum','mean','count'])
df2.columns= ['_'.join(col).strip() for col in df2.columns.values]
# Reset index
df3=df2.reset_index(inplace=False)
ax=sns.barplot(x='team2', y="wicket_mean", data=df3)
plt.xticks(rotation=90,fontsize=7)
plt.xlabel('Opposition',fontsize=7)
plt.ylabel('Mean wickets',fontsize=8)
atitle=name + "-Mean wickets against opposition"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerWicketsVenue
# This function gets the bowler wickets at venues
#
#
###########################################################################################
def bowlerWicketsVenue (df,name= "A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Bowler performance at different venues
Description
This function computes and plots mean number of wickets taken by the bowler in different venues
Usage
bowlerWicketsVenue(df, name)
Arguments
df
Data frame
name
Name of bowler
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage
bowlerWicketPlot
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerWicketsVenue(df, name)
'''
rcParams['figure.figsize'] = 8, 5
df1 = df[['bowler', 'wicket','venue']]
df2=df1.groupby('venue').agg(['sum','mean','count'])
df2.columns= ['_'.join(col).strip() for col in df2.columns.values]
# Reset index
df3=df2.reset_index(inplace=False)
ax=sns.barplot(x='venue', y="wicket_mean", data=df3)
plt.xticks(rotation=90,fontsize=7)
plt.xlabel('Venue',fontsize=7)
plt.ylabel('Mean wickets',fontsize=8)
atitle=name + "-Mean wickets at different venues"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 March 2019
# Function: saveAllMatchesBetween2IntlT20s
# This function saves all the matches between 2 Intl T20 teams
#
###########################################################################################
def saveAllMatchesBetween2IntlT20s(dir1,odir="."):
'''
Saves all matches between 2 IPL teams as dataframe
Description
This function saves all matches between 2 Intl. T20 countries as a single dataframe in the
current directory
Usage
saveAllMatchesBetween2IntlT20s(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenVsBowlersOppnAllMatches
'''
teams = ["Afghanistan","Australia","Bangladesh","Bermuda","Canada","England",
"Hong Kong","India","Ireland", "Kenya","Nepal","Netherlands",
"New Zealand", "Oman","Pakistan","Scotland","South Africa",
"Sri Lanka", "United Arab Emirates","West Indies", "Zimbabwe"]
for team1 in teams:
for team2 in teams:
if team1 != team2:
print("Team1=",team1,"team2=", team2)
getAllMatchesBetweenTeams(team1,team2,dir=dir1,save=True,odir=odir)
time.sleep(2) #Sleep before next save
return
###########################################################################################
# Designed and developed by <NAME>
# Date : 2 Mar 2019
# Function: saveAllMatchesAllOppositionIntlT20
# This function saves all the matches between all Intl T20 teams
#
###########################################################################################
def saveAllMatchesAllOppositionIntlT20(dir1,odir="."):
'''
Saves matches against all Intl T20 teams as dataframe and CSV for an IPL team
Description
This function saves all Intl T20 matches agaist all opposition as a single
dataframe in the current directory
Usage
saveAllMatchesAllOppositionIntlT20(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
convertYaml2PandasDataframeT20
teamBattingScorecardMatch
'''
teams = ["Afghanistan","Australia","Bangladesh","Bermuda","Canada","England",
"Hong Kong","India","Ireland", "Kenya","Nepal","Netherlands",
"New Zealand", "Oman","Pakistan","Scotland","South Africa",
"Sri Lanka", "United Arab Emirates","West Indies", "Zimbabwe"]
for team in teams:
print("Team=",team)
getAllMatchesAllOpposition(team,dir=dir1,save=True,odir=odir)
time.sleep(2) #Sleep before next save
##########################################################################################
# Designed and developed by <NAME>
# Date : 2 March 2019
# Function: saveAllMatchesBetween2BBLTeams
# This function saves all the matches between 2 BBL Teams
#
###########################################################################################
def saveAllMatchesBetween2BBLTeams(dir1):
'''
Saves all matches between 2 BBLteams as dataframe
Description
This function saves all matches between 2 BBL T20 countries as a single dataframe in the
current directory
Usage
saveAllMatchesBetween2BBLTeams(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenVsBowlersOppnAllMatches
'''
teams = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>", "Sydney Sixers",
"Sydney Thunder"]
for team1 in teams:
for team2 in teams:
if team1 != team2:
print("Team1=",team1,"team2=", team2)
getAllMatchesBetweenTeams(team1,team2,dir=dir1,save=True)
time.sleep(2) #Sleep before next save
return
###########################################################################################
# Designed and developed by <NAME>
# Date : 2 Mar 2019
# Function: saveAllMatchesAllOppositionBBLT20
# This function saves all the matches between all BBL T20 teams
#
###########################################################################################
def saveAllMatchesAllOppositionBBLT20(dir1):
'''
Saves matches against all BBL T20 teams as dataframe and CSV for an IPL team
Description
This function saves all BBL T20 matches agaist all opposition as a single
dataframe in the current directory
Usage
saveAllMatchesAllOppositionBBLT20(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
convertYaml2PandasDataframeT20
teamBattingScorecardMatch
'''
teams = ["<NAME>", "<NAME>", "Hobart Hurricanes",
"Melbourne Renegades", "Perth Scorchers", "Sydney Sixers",
"Sydney Thunder"]
for team in teams:
print("Team=",team)
getAllMatchesAllOpposition(team,dir=dir1,save=True)
time.sleep(2) #Sleep before next save
##########################################################################################
# Designed and developed by <NAME>
# Date : 2 March 2019
# Function: saveAllMatchesBetween2NWBTeams
# This function saves all the matches between 2 NWB Teams
#
###########################################################################################
def saveAllMatchesBetween2NWBTeams(dir1):
'''
Saves all matches between 2 NWB teams as dataframe
Description
This function saves all matches between 2 NWB T20 countries as a single dataframe in the
current directory
Usage
saveAllMatchesBetween2NWBTeams(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenVsBowlersOppnAllMatches
'''
teams = ["Derbyshire", "Durham", "Essex", "Glamorgan",
"Gloucestershire", "Hampshire", "Kent","Lancashire",
"Leicestershire", "Middlesex","Northamptonshire",
"Nottinghamshire","Somerset","Surrey","Sussex","Warwickshire",
"Worcestershire","Yorkshire"]
for team1 in teams:
for team2 in teams:
if team1 != team2:
print("Team1=",team1,"team2=", team2)
getAllMatchesBetweenTeams(team1,team2,dir=dir1,save=True)
time.sleep(2) #Sleep before next save
return
###########################################################################################
# Designed and developed by <NAME>
# Date : 2 Mar 2019
# Function: saveAllMatchesAllOppositionNWBT20
# This function saves all the matches between all NWB T20 teams
#
###########################################################################################
def saveAllMatchesAllOppositionNWBT20(dir1):
'''
Saves matches against all NWB T20 teams as dataframe and CSV for an IPL team
Description
This function saves all NWBT20 matches agaist all opposition as a single
dataframe in the current directory
Usage
saveAllMatchesAllOppositionNWBT20(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
convertYaml2PandasDataframeT20
teamBattingScorecardMatch
'''
teams = ["Derbyshire", "Durham", "Essex", "Glamorgan",
"Gloucestershire", "Hampshire", "Kent","Lancashire",
"Leicestershire", "Middlesex","Northamptonshire",
"Nottinghamshire","Somerset","Surrey","Sussex","Warwickshire",
"Worcestershire","Yorkshire"]
for team in teams:
print("Team=",team)
getAllMatchesAllOpposition(team,dir=dir1,save=True)
time.sleep(2) #Sleep before next save
##########################################################################################
# Designed and developed by <NAME>
# Date : 28 Feb 2020
# Function: rankIntlT20Batting
# This function ranks Intl T20 batsman
#
###########################################################################################
def rankIntlT20Batting(dir1):
countries ={"India":"india", "United States of America":"usa", "Canada":"canada", "United Arab Emirates":"uae",
"Afghanistan":"afghanistan", "West Indies":"westindies","Oman":"oman","Germany":"germany",
"Namibia":"namibia","Germany":"germany","Sri Lanka":"sl","Singapore":"singapore",
"Malaysia":"malaysia","South Africa": "sa","Netherlands":"netherlands",
"Zimbabwe":"zimbabwe","Pakistan":"pakistan","Scotland":"scotland","Kuwait":"kuwait",
"New Zealand":"nz","Vanuatu":"vanuatu","Papua New Guinea": "png","Australia":"aus",
"Irelaand":"ireland","England":"england","South Korea":"sk","Japan":"japan","Bangladesh":"bangladesh",
"Nepal":"nepal","Cayman Island":"cayman","Rwanda":"rwanda","Qatar":"qatar","Botswana":"botswana",
"Rwanda":"rwanda","Uganda":"uganda","Maldives":"maldives","Fiji":"fiji","Mozambique":"mozam",
"Hong Kong":"hk","Denmark":"denmark","Norway":"norway"
}
df=pd.DataFrame()
for key in countries:
val = countries[key] + "_details"
val= getTeamBattingDetails(key,dir=dir1, save=False,odir=".")
df = pd.concat([df,val])
df1=df.groupby('batsman').agg(['count','mean'])
df1.columns = ['_'.join(col).strip() for col in df1.columns.values]
df2 =df1[['runs_count','runs_mean','SR_mean']]
df3=df2[df2['runs_count']>40]
df4=df3.sort_values(['runs_mean','SR_mean'],ascending=False)
df4.columns=['matches','runs_mean','SR_mean']
return(df4)
#########################################################################################
# Designed and developed by <NAME>esh
# Date : 28 Feb 2020
# Function: rankIntlT20Bowling
# This function ranks Intl T20 bowlers
#
###########################################################################################
def rankIntlT20Bowling(dir1):
countries ={"India":"india", "United States of America":"usa", "Canada":"canada", "United Arab Emirates":"uae",
"Afghanistan":"afghanistan", "West Indies":"westindies","Oman":"oman","Germany":"germany",
"Namibia":"namibia","Germany":"germany","Sri Lanka":"sl","Singapore":"singapore",
"Malaysia":"malaysia","South Africa": "sa","Netherlands":"netherlands",
"Zimbabwe":"zimbabwe","Pakistan":"pakistan","Scotland":"scotland","Kuwait":"kuwait",
"New Zealand":"nz","Vanuatu":"vanuatu","Papua New Guinea": "png","Australia":"aus",
"Irelaand":"ireland","England":"england","South Korea":"sk","Japan":"japan","Bangladesh":"bangladesh",
"Nepal":"nepal","Cayman Island":"cayman","Rwanda":"rwanda","Qatar":"qatar","Botswana":"botswana",
"Rwanda":"rwanda","Uganda":"uganda","Maldives":"maldives","Fiji":"fiji","Mozambique":"mozam",
"Hong Kong":"hk","Denmark":"denmark","Norway":"norway"
}
df=pd.DataFrame()
for key in countries:
val = countries[key] + "_details"
val= getTeamBowlingDetails(key,dir=dir1, save=False,odir=".")
df = pd.concat([df,val])
df1=df.groupby('bowler').agg(['count','mean'])
df1.columns = ['_'.join(col).strip() for col in df1.columns.values]
df2 =df1[['wicket_count','wicket_mean','econrate_mean']]
df3=df2[df2['wicket_count']>40]
df4=df3.sort_values(['wicket_mean','econrate_mean'],ascending=False)
df4.columns=['matches','wicket_mean','econrate_mean']
return(df4)
#########################################################################################
# Designed and developed by <NAME>
# Date : 28 Feb 2020
# Function: rankIPLT20Batting
# This function ranks IPL T20 batsmen
#
###########################################################################################
def rankIPLT20Batting(dir1):
iplTeams ={"Chennai Super Kings":"csk","Deccan Chargers":"dc","Delhi Daredevils":"dd",
"Kings XI Punjab":"kxip", 'Kochi Tuskers Kerala':"kct","Kolkata Knight Riders":"kkr",
"Mumbai Indians":"mi", "Pune Warriors":"pw","Rajasthan Royals":"rr",
"Royal Challengers Bangalore":"rps","Sunrisers Hyderabad":"sh","Gujarat Lions":"gl",
"Rising Pune Supergiants":"rps"}
df=
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Functions for saving proset reports to disk.
Copyright by <NAME>
Released under the MIT license - see LICENSE file for details
"""
from copy import deepcopy
import numpy as np
import pandas as pd
CELL_FORMAT = { # format definitions for xlsxwriter
"header_blue": {"font_name": "Calibri", "bold": True, "bg_color": "#95D0FC", "border": 1}, # light blue
"header_green": {"font_name": "Calibri", "bold": True, "bg_color": "#90e4c1", "border": 1}, # light teal
"float": {"font_name": "Calibri", "num_format": "#,##0.00;[Red]-#,##0.00", "border": 1},
"integer": {"font_name": "Calibri", "num_format": "#,##0", "border": 1},
"text": {"font_name": "Calibri", "border": 1},
}
COLUMN_FORMAT = { # assign widths and cell formats to report columns
"batch": {"width": 10, "header": "header_blue", "body": "integer"},
"sample": {"width": 10, "header": "header_blue", "body": "integer"},
"sample name": {"width": 50, "header": "header_blue", "body": "text"},
"target": {"width": 10, "header": "header_blue", "body": "integer"},
"prototype weight": {"width": 20, "header": "header_blue", "body": "float"},
"similarity": {"width": 20, "header": "header_blue", "body": "float"},
"impact": {"width": 20, "header": "header_blue", "body": "float"},
"dominant set": {"width": 20, "header": "header_blue", "body": "integer"},
"DEFAULT": {"width": 20, "header": "header_green", "body": "float"}
# use for all columns whose name does not match any key in this dictionary
}
def write_report(file_path, report, column_format=None, cell_format=None): # pragma: no cover
"""Save results of model.Model.export() or model.Model.explain() as formatted xlsx file.
:param file_path: string; file name with full or relative path
:param report: pandas data frame as generated by model.Model.export() or model.Model.explain()
:param column_format: dict or None; if not None, the dict is used to update the default column formats from
module-level constant COLUMN_FORMAT
:param cell_format: dict or None; if not None, the dict is used to update the default cell formats from module-level
constant CELL_FORMAT
:return: no return value, file created on disk
"""
column_format = _update_format(format_=column_format, default=COLUMN_FORMAT)
cell_format = _update_format(format_=cell_format, default=CELL_FORMAT)
writer = pd.ExcelWriter(file_path) # pylint: disable=abstract-class-instantiated
workbook = writer.book
cell_format = {key: workbook.add_format(value) for key, value in cell_format.items()}
worksheet = workbook.add_worksheet("export")
freeze_rows = np.sum(
|
pd.isna(report["batch"])
|
pandas.isna
|
import configparser
import datetime as dt
import logging
import os
import shutil
from pathlib import Path
from urllib.error import URLError
import matplotlib.image as mplimg
import pandas as pd
import pkg_resources as pr
from . import stats
from .exceptions import NoFilesFoundError
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
pkg_name = __name__.split('.')[0]
configpath = Path.home() / ".{}.ini".format(pkg_name)
LOGGER = logging.getLogger(__name__)
def get_config():
"""Read the configfile and return config dict.
Returns
-------
dict
Dictionary with the content of the configpath file.
"""
if not configpath.exists():
raise IOError("Config file {} not found.".format(str(configpath)))
else:
config = configparser.ConfigParser()
config.read(str(configpath))
return config
def set_database_path(dbfolder):
"""Use to write the database path into the config.
Parameters
----------
dbfolder : str or pathlib.Path
Path to where planet4 will store clustering results by default.
"""
try:
d = get_config()
except IOError:
d = configparser.ConfigParser()
d['planet4_db'] = {}
d['planet4_db']['path'] = dbfolder
with configpath.open('w') as f:
d.write(f)
print("Saved database path into {}.".format(configpath))
def get_data_root():
d = get_config()
data_root = Path(d['planet4_db']['path']).expanduser()
data_root.mkdir(exist_ok=True, parents=True)
return data_root
def get_ground_projection_root():
d = get_config()
gp_root = Path(d['ground_projection']['path'])
gp_root.mkdir(exist_ok=True)
return gp_root
if not configpath.exists():
print("No configuration file {} found.\n".format(configpath))
savepath = input(
"Please provide the path where you want to store planet4 results:")
set_database_path(savepath)
else:
data_root = get_data_root()
def dropbox():
return Path.home() / 'Dropbox'
def p4data():
return dropbox() / 'data' / 'planet4'
def analysis_folder():
name = 'p4_analysis'
if p4data().exists():
path = p4data() / name
else:
path = dropbox() / name
return path
def check_and_pad_id(imgid):
"Does NOT work with pd.Series item."
if imgid is None:
return None
imgid_template = "APF0000000"
if len(imgid) < len(imgid_template):
imgid = imgid_template[:-len(imgid)] + imgid
return imgid
def get_subframe(url):
"""Download image if not there yet and return numpy array.
Takes a data record (called 'line'), picks out the image_url.
First checks if the name of that image is already stored in
the image path. If not, it grabs it from the server.
Then uses matplotlib.image to read the image into a numpy-array
and finally returns it.
"""
targetpath = data_root / 'images' / os.path.basename(url)
targetpath.parent.mkdir(exist_ok=True)
if not targetpath.exists():
LOGGER.info("Did not find image in cache. Downloading ...")
try:
path = urlretrieve(url)[0]
except URLError:
msg = "Cannot receive subframe image. No internet?"
LOGGER.error(msg)
return None
LOGGER.debug("Done.")
shutil.move(path, str(targetpath))
else:
LOGGER.debug("Found image in cache.")
im = mplimg.imread(targetpath)
return im
class P4DBName(object):
def __init__(self, fname):
self.p = Path(fname)
date = str(self.name)[:10]
self.date = dt.datetime(*[int(i) for i in date.split('-')])
def __getattr__(self, name):
"looking up things in the Path object if not in `self`."
return getattr(self.p, name)
def get_latest_file(filenames):
fnames = list(filenames)
if len(fnames) == 0:
raise NoFilesFoundError
retval = P4DBName(fnames[0])
dtnow = retval.date
for fname in fnames[1:]:
dt_to_check = P4DBName(fname).date
if dt_to_check > dtnow:
dtnow = dt_to_check
retval = P4DBName(fname)
return retval.p
def get_latest_cleaned_db(datadir=None):
datadir = data_root if datadir is None else Path(datadir)
h5files = list(datadir.glob('201*_queryable_cleaned*.h5'))
if len(h5files) == 0:
LOGGER.error("No files found. Searching in %s", str(datadir))
raise NoFilesFoundError(f"No files found. Searching in {str(datadir)}")
return get_latest_file(h5files)
def get_latest_season23_dbase(datadir=None):
if datadir is None:
datadir = data_root
h5files = list(datadir.glob('201*_queryable_cleaned_seasons2and3.h5'))
return get_latest_file(h5files)
def get_test_database():
fname = pr.resource_filename('planet4', 'data/test_db.csv')
return pd.read_csv(fname)
def get_latest_tutorial_data(datadir=None):
if datadir is None:
datadir = data_root
tut_files = datadir.glob('/*_tutorials.h5')
tut_files = [i for i in tut_files if i.parent[:4].isdigit()]
if not tut_files:
raise NoFilesFoundError
return pd.read_hdf(str(get_latest_file(tut_files)), 'df')
def common_gold_ids():
# read the common gold_ids to check
with open('../data/gold_standard_commons.txt') as f:
gold_ids = f.read()
gold_ids = gold_ids.split('\n')
del gold_ids[-1] # last one is empty
return gold_ids
def get_image_names_from_db(dbfname):
"""Return arrary of HiRISE image_names from database file.
Parameters
----------
dbfname : pathlib.Path or str
Path to database file to be used.
Returns
-------
numpy.ndarray
Array of unique image names.
"""
path = Path(dbfname)
if path.suffix in ['.hdf', '.h5']:
with pd.HDFStore(str(dbfname)) as store:
return store.select_column('df', 'image_name').unique()
elif path.suffix == '.csv':
return pd.read_csv(dbfname).image_id.unique()
def get_latest_marked():
return pd.read_hdf(str(get_latest_cleaned_db()), 'df',
where='marking!=None')
def get_image_id_from_fname(fname):
"Return image_id from beginning of Path(fname).name"
fname = Path(fname)
name = fname.name
return name.split('_')[0]
def get_image_ids_in_folder(folder, extension='.csv'):
fnames = Path(folder).glob('*' + extension)
return [get_image_id_from_fname(i) for i in fnames]
class PathManager(object):
"""Manage file paths and folders related to the analysis pipeline.
Level definitions:
* L0 : Raw output of Planet Four
* L1A : Clustering of Blotches and Fans on their own
* L1B : Clustered blotches and fans combined into final fans, final blotches, and fnotches that
need to have a cut applied for the decision between fans or blotches.
* L1C : Derived database where a cut has been applied for fnotches to become either fan or
blotch.
Parameters
----------
id_ : str, optional
The data item id that is used to determine sub-paths. Can be set after
init.
datapath : str or pathlib.Path, optional
the base path from where to manage all derived paths. No default assumed
to prevent errors.
suffix : {'.hdf', '.h5', '.csv'}
The suffix that controls the reader function to be used.
obsid : str, optional
HiRISE obsid (i.e. P4 image_name), added as a folder inside path.
Can be set after init.
extra_path : str, pathlib.Path, optional
Any extra path element that needs to be added to the standard path.
Attributes
----------
cut_dir : pathlib.Path
Defined in `get_cut_folder`.
"""
def __init__(self, id_='', datapath='clustering', suffix='.csv', obsid='', cut=0.5,
extra_path=''):
self.id = id_
self.cut = cut
self._obsid = obsid
self.extra_path = extra_path
if datapath is None:
# take default path if none given
self._datapath = Path(data_root) / 'clustering'
elif Path(datapath).is_absolute():
# if given datapath is absolute, take only that:
self._datapath = Path(datapath)
else:
# if it is relative, add it to data_root
self._datapath = Path(data_root) / datapath
self.suffix = suffix
# point reader to correct function depending on required suffix
if suffix in ['.hdf', '.h5']:
self.reader = pd.read_hdf
elif suffix == '.csv':
self.reader = pd.read_csv
# making sure to warn the user here if the data isn't where it's expected to be
if id_ != '':
if not self.path_so_far.exists():
raise FileNotFoundError(f"{self.path_so_far} does not exist.")
@property
def id(self):
return self._id
@id.setter
def id(self, value):
if value is not None:
self._id = check_and_pad_id(value)
@property
def clustering_logfile(self):
return self.fanfile.parent / 'clustering_settings.yaml'
@property
def obsid(self):
if self._obsid is '':
if self.id is not '':
LOGGER.debug("Entering obsid search for known image_id.")
db = DBManager()
data = db.get_image_id_markings(self.id)
try:
obsid = data.image_name.iloc[0]
except IndexError:
raise IndexError("obsid access broken. Did you forget to use the `obsid` keyword"
" at initialization?")
LOGGER.debug("obsid found: %s", obsid)
self._obsid = obsid
return self._obsid
@obsid.setter
def obsid(self, value):
self._obsid = value
@property
def obsid_results_savefolder(self):
subfolder = 'p4_catalog' if self.datapath is None else self.datapath
savefolder = analysis_folder() / subfolder
savefolder.mkdir(exist_ok=True, parents=True)
return savefolder
@property
def obsid_final_fans_path(self):
return self.obsid_results_savefolder / f"{self.obsid}_fans.csv"
@property
def obsid_final_blotches_path(self):
return self.obsid_results_savefolder / f"{self.obsid}_blotches.csv"
@property
def datapath(self):
return self._datapath
@property
def path_so_far(self):
p = self.datapath
p /= self.extra_path
p /= self.obsid
return p
@property
def L1A_folder(self):
"Subfolder name for the clustered data before fnotching."
return 'L1A'
@property
def L1B_folder(self):
"Subfolder name for the fnotched data, before cut is applied."
return 'L1B'
@property
def L1C_folder(self):
"subfolder name for the final catalog after applying `cut`."
return 'L1C_cut_{:.1f}'.format(self.cut)
def get_path(self, marking, specific=''):
p = self.path_so_far
# now add the image_id
try:
p /= self.id
except TypeError:
logging.warning("self.id not set. Storing in obsid level.")
id_ = self.id if self.id != '' else self.obsid
# add the specific sub folder
p /= specific
if specific != '':
p /= f"{id_}_{specific}_{marking}{self.suffix}"
else:
# prepend the data level to file name if given.
p /= f"{id_}_{marking}{self.suffix}"
return p
def get_obsid_paths(self, level):
"""get all existing paths for a given data level.
Parameters
----------
level : {'L1A', 'L1B', 'L1C'}
"""
folder = self.path_so_far
# cast to upper case for the lazy... ;)
level = level.upper()
image_id_paths = [item for item in folder.glob('*') if item.is_dir()]
bucket = []
for p in image_id_paths:
try:
bucket.append(next(p.glob(f"{level}*")))
except StopIteration:
continue
return bucket
def get_df(self, fpath):
return self.reader(str(fpath))
@property
def fanfile(self):
return self.get_path('fans', self.L1A_folder)
@property
def fandf(self):
return self.get_df(self.fanfile)
@property
def reduced_fanfile(self):
return self.get_path('fans', self.L1B_folder)
@property
def reduced_fandf(self):
return self.get_df(self.reduced_fanfile)
@property
def final_fanfile(self):
return self.get_path('fans', self.L1C_folder)
@property
def final_fandf(self):
return self.get_df(self.final_fanfile)
@property
def blotchfile(self):
return self.get_path('blotches', self.L1A_folder)
@property
def blotchdf(self):
return self.get_df(self.blotchfile)
@property
def reduced_blotchfile(self):
return self.get_path('blotches', self.L1B_folder)
@property
def reduced_blotchdf(self):
return self.get_df(self.reduced_blotchfile)
@property
def final_blotchfile(self):
return self.get_path('blotches', self.L1C_folder)
@property
def final_blotchdf(self):
return self.get_df(self.final_blotchfile)
@property
def fnotchfile(self):
return self.get_path('fnotches', self.L1B_folder)
@property
def fnotchdf(self):
# the fnotchfile has an index, so i need to read that here:
return pd.read_csv(self.fnotchfile, index_col=0)
class DBManager(object):
"""Access class for database activities.
Provides easy access to often used data items.
Parameters
----------
dbname : str, optional
Path to database file to be used. Default: use get_latest_cleaned_db() to
find it.
Attributes
----------
image_names
image_ids
n_image_ids
n_image_names
obsids : Alias to image_ids
season2and3_image_names
"""
def __init__(self, dbname=None):
"""Initialize DBManager class.
Parameters
----------
dbname : <str>
Filename of database file to use. Default: Latest produced full
database.
"""
if dbname is None:
self.dbname = str(get_latest_cleaned_db())
else:
self.dbname = str(dbname)
def __repr__(self):
s = "Database root: {}\n".format(Path(self.dbname).parent)
s += "Database name: {}\n".format(Path(self.dbname).name)
return s
@property
def orig_csv(self):
p = Path(self.dbname)
return p.parent / (p.name[:38] + '.csv')
def set_latest_with_dupes_db(self, datadir=None):
datadir = data_root if datadir is None else Path(datadir)
h5files = datadir.glob('201*_queryable.h5')
dbname = get_latest_file(h5files)
print("Setting {} as dbname.".format(dbname.name))
self.dbname = str(dbname)
@property
def image_names(self):
"""Return list of unique obsids used in database.
See also
--------
get_image_names_from_db
"""
return get_image_names_from_db(self.dbname)
@property
def image_ids(self):
"Return list of unique image_ids in database."
with pd.HDFStore(self.dbname) as store:
return store.select_column('df', 'image_id').unique()
@property
def n_image_ids(self):
return len(self.image_ids)
@property
def n_image_names(self):
return len(self.image_names)
@property
def obsids(self):
"Alias to self.image_names."
return self.image_names
def get_all(self, datadir=None):
return pd.read_hdf(str(self.dbname), 'df')
def get_obsid_markings(self, obsid):
"Return marking data for given HiRISE obsid."
return pd.read_hdf(self.dbname, 'df', where='image_name=' + obsid)
def get_image_name_markings(self, image_name):
"Alias for get_obsid_markings."
return self.get_obsid_markings(image_name)
def get_image_id_markings(self, image_id):
"Return marking data for one Planet4 image_id"
image_id = check_and_pad_id(image_id)
return pd.read_hdf(self.dbname, 'df', where='image_id=' + image_id)
def get_data_for_obsids(self, obsids):
bucket = []
for obsid in obsids:
bucket.append(self.get_obsid_markings(obsid))
return pd.concat(bucket, ignore_index=True)
def get_classification_id_data(self, class_id):
"Return data for one classification_id"
return pd.read_hdf(self.dbname, 'df',
where="classification_id=='{}'".format(class_id))
@property
def season2and3_image_names(self):
"numpy.array : List of image_names for season 2 and 3."
image_names = self.image_names
metadf = pd.DataFrame(
|
pd.Series(image_names)
|
pandas.Series
|
import pickle
from abc import ABC, abstractmethod # abstract base class
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import torch
from .modelbuilder import (build_pytorch_nnet, default_skorch_nnet,
default_scaled_nnet)
from .preprocessing import LogNormaliser, FeatureSelect
def rmse(y_true, y_pred):
return np.sqrt(mean_squared_error(y_true, y_pred))
def mean_chisq(ydiff_sq, y_err):
return np.mean(ydiff_sq / np.square(y_err))
METRICS = {'r2': r2_score, 'mse': mean_squared_error,
'rmse': rmse, 'mean_chisq': mean_chisq}
class SinglePredictor(ABC):
"""Single model, trained on one train/test split.
Either a regressor or uncertainty estimator. """
def __init__(self, d_data):
if d_data is None:
with open('./data/d_data.pkl', 'rb') as ddf_file:
d_data = pickle.load(ddf_file)
self.d_data = d_data
self.X, self.Y = None, None
self.X_train, self.X_test = None, None
self.Y_train, self.Y_test = None, None
self.log_normaliser = None
self.model = None
# Extra factor for predictions (uncertainty estimator)
self.correction_factor = 1
def preprocess(self, idx_train=0.75, idx_test=None, **kwargs):
"""The default preprocessing for the predictor.
Parameters
----------
idx_train : array or float, default 0.75
Array of galaxy ids that are used for training.
If float, the fraction of samples used for training.
idx_test : array or None, default None
Array of galaxy ids used for testing.
If None, use the remaining samples.
Y_pred : DataFrame or None, default None
Only used (but mandatory) for uncertainty estimator.
The uncertainty estimator does not use Y directly, but
(Y_true - Y_pred)^2 as a target.
"""
# Select features and target
self.X = self._feature_select()
self.Y = FeatureSelect.select_y(self.d_data)
# Log normalise the fluxes
xcols = self.X.columns
ignore_bands = list(xcols[~xcols.isin(self.d_data['fullbay'].columns)])
kwargs.setdefault('ignore_bands', ignore_bands)
self.log_normaliser = LogNormaliser(**kwargs)
self.X, self.Y = self.log_normaliser.transform(self.X, self.Y)
self.train_test_split(idx_train, idx_test)
def train(self, model=None, apply_correction=True, **predictor_kwargs):
"""Train the model."""
if model is None:
model = self._get_default_model(**predictor_kwargs)
self.model = model
# Skorch only supports numpy arrays, no DataFrames
self.model.fit(self.to_array(self.X_train),
self.to_array(self.Y_train))
self.Y_pred = self.predict(self.X)
self.Y_pred_train = self.Y_pred.loc[self.X_train.index, :]
self.Y_pred_test = self.Y_pred.loc[self.X_test.index, :]
# Uncertainty estimator: correct to unit validation mean chisq
self._apply_correction(apply_correction)
def predict_idx(self, idx):
"""Predict on a set of indices (which are in X)"""
idx = pd.Index(idx)
if not np.all(idx.isin(X.index)):
raise ValueError("Not all indices in X!")
return self.predict(self.X.loc[idx, :])
def predict(self, X):
"""Predict on a given set of inputs"""
Y_pred = self.model.predict(self.to_array(X))
Y_pred = pd.DataFrame(Y_pred, index=X.index,
columns=self.Y_test.columns)
Y_pred = Y_pred * self.correction_factor
return Y_pred
def test(self, metric=None, tset='test', multi_band=True, **kwargs):
"""
Evaluate the model with a given metric
Parameters
----------
metric : string, callable, or None, default None
if string : a metric available in METRICS
if callable, a metric taking (y_t, y_p) as arguments
if None, use 'rmse' for reg and 'mean_chisq' for uncertainty estimator.
tset : 'test' or 'train', default 'test'
multi_band : bool
Return a pd.Series, with each target column having a metric
kwargs : keyword arguments passed to the metric function
"""
y_t, y_p = self.get_target_set(tset)
if metric is None:
metric = self._get_default_metric()
if metric in METRICS:
metric_name = metric
metric = METRICS[metric]
elif not callable(metric):
raise ValueError("Metric must be in METRICS or callable.")
else:
metric_name = 'score'
if multi_band:
li_score = [metric(y_t[band], y_p[band], **kwargs)
for band in self.Y.columns]
return
|
pd.Series(li_score, name=metric_name, index=self.Y.columns)
|
pandas.Series
|
####
#### July 2. This is a copy of the version we had from before. plotting one year.
#### Here we are extending it to 2 years. Since August of a given year to the end
#### of the next year.
####
import matplotlib.backends.backend_pdf
import csv
import numpy as np
import pandas as pd
# import geopandas as gpd
from IPython.display import Image
# from shapely.geometry import Point, Polygon
from math import factorial
import datetime
from datetime import date
import time
import scipy
import scipy.signal
import os, os.path
import matplotlib
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from sklearn.linear_model import LinearRegression
from patsy import cr
# from pprint import pprint
import matplotlib.pyplot as plt
import seaborn as sb
from pandas.plotting import register_matplotlib_converters
|
register_matplotlib_converters()
|
pandas.plotting.register_matplotlib_converters
|
'''
Simple vanilla LSTM multiclass classifier for raw EEG data
'''
import scipy.io as spio
import numpy as np
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.optimizers import Adam
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import gc
import h5py
def loadmat(filename):
def _check_keys(d):
'''
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
'''
for key in d:
if isinstance(d[key], spio.matlab.mio5_params.mat_struct):
d[key] = _todict(d[key])
return d
def _has_struct(elem):
"""Determine if elem is an array and if any array item is a struct"""
return isinstance(elem, np.ndarray) and any(isinstance(
e, spio.matlab.mio5_params.mat_struct) for e in elem)
def _todict(matobj):
'''
A recursive function which constructs from matobjects nested dictionaries
'''
d = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, spio.matlab.mio5_params.mat_struct):
d[strg] = _todict(elem)
elif _has_struct(elem):
d[strg] = _tolist(elem)
else:
d[strg] = elem
return d
def _tolist(ndarray):
'''
A recursive function which constructs lists from cellarrays
(which are loaded as numpy ndarrays), recursing into the elements
if they contain matobjects.
'''
elem_list = []
for sub_elem in ndarray:
if isinstance(sub_elem, spio.matlab.mio5_params.mat_struct):
elem_list.append(_todict(sub_elem))
elif _has_struct(sub_elem):
elem_list.append(_tolist(sub_elem))
else:
elem_list.append(sub_elem)
return elem_list
data = spio.loadmat(filename, struct_as_record=False, squeeze_me=True)
return _check_keys(data)
"""Helper function to truncate dataframes to a specified shape - usefull to reduce all EEG trials to the same number
of time stamps.
"""
def truncate(arr, shape):
desired_size_factor = np.prod([n for n in shape if n != -1])
if -1 in shape: # implicit array size
desired_size = arr.size // desired_size_factor * desired_size_factor
else:
desired_size = desired_size_factor
return arr.flat[:desired_size].reshape(shape)
def main():
PATH = "G:\\UWA_MDS\\2021SEM1\\Research_Project\\KARA_ONE_Data\\ImaginedSpeechData\\"
subjects = ['MM05', 'MM08', 'MM09', 'MM10', 'MM11', 'MM12', 'MM14', 'MM15', 'MM16', 'MM18', 'MM19', 'MM20', 'MM21', 'P02']
for subject in subjects:
print("Working on Subject: " + subject)
print("Loading .set data")
""" Load EEG data with loadmat() function"""
SubjectData = loadmat(PATH + subject + '\\EEG_data.mat')
print("Setting up dataframes")
""" Setup target and EEG dataframes"""
targets = pd.DataFrame(SubjectData['EEG_Data']['prompts'])
targets.columns = ['prompt']
sequences = pd.DataFrame(SubjectData['EEG_Data']['activeEEG'])
sequences.columns = ['trials']
EEG = pd.concat([sequences.reset_index(drop=True),targets.reset_index(drop=True)], axis=1)
words = ['gnaw', 'pat', 'knew', 'pot']
EEG = EEG.loc[EEG['prompt'].isin(words)]
EEG = EEG.reset_index(drop=True)
sequences = pd.DataFrame(EEG['trials'])
targets = pd.DataFrame(EEG['prompt'])
seq = np.asarray(sequences['trials'])
for i in range(0,len(seq)):
seq[i] = seq[i].transpose()
i=i+1
sequences['trials'] = seq
print("Train / Test splitting data")
#Stratified train test splits
train_x, test_x, train_y, test_y = train_test_split(sequences, targets, stratify=targets, test_size=0.2, random_state=9)
#Encode target prompts to 0/1
train_y=
|
pd.get_dummies(train_y['prompt'])
|
pandas.get_dummies
|
import numpy as np
import pandas as pd
import anndata
import matplotlib.pyplot as plt
import seaborn as sns
from natsort import natsorted
def plot_adt_hist(adt, attr, out_file, alpha=0.5, dpi=500, figsize=None):
idx_signal = np.isin(adt.obs[attr], "signal")
signal = adt.obs.loc[idx_signal, "counts"]
background = adt.obs.loc[~idx_signal, "counts"]
bins = np.logspace(0, np.log10(max(signal.max(), background.max())), 501)
plt.hist(background, bins, alpha=alpha, label="background", log=True)
plt.hist(signal, bins, alpha=alpha, label="signal", log=True)
plt.legend(loc="upper right")
ax = plt.gca()
ax.set_xscale("log")
ax.set_xlabel("Number of hashtag UMIs (log10 scale)")
ax.set_ylabel("Number of cellular barcodes (log10 scale)")
if figsize is not None:
plt.gcf().set_size_inches(*figsize)
plt.savefig(out_file, dpi=dpi)
plt.close()
def plot_rna_hist(
data, out_file, plot_attr="n_counts", cat_attr="demux_type", dpi=500, figsize=None
):
bins = np.logspace(
np.log10(min(data.obs[plot_attr])), np.log10(max(data.obs[plot_attr])), 101
)
cat_vec = data.obs[cat_attr]
ax = plt.gca()
if cat_attr == "demux_type":
ax.hist(
data.obs.loc[np.isin(cat_vec, "singlet"), plot_attr],
bins,
alpha=0.5,
label="singlet",
)
ax.hist(
data.obs.loc[np.isin(cat_vec, "doublet"), plot_attr],
bins,
alpha=0.5,
label="doublet",
)
ax.hist(
data.obs.loc[np.isin(cat_vec, "unknown"), plot_attr],
bins,
alpha=0.5,
label="unknown",
)
ax.legend(loc="upper right")
ax.set_xscale("log")
ax.set_xlabel("Number of RNA UMIs (log10 scale)")
ax.set_ylabel("Number of cellular barcodes")
if figsize is not None:
plt.gcf().set_size_inches(*figsize)
plt.savefig(out_file, dpi=dpi)
plt.close()
def plot_bar(heights, tick_labels, xlabel, ylabel, out_file, dpi=500, figsize=None):
plt.bar(
x=np.linspace(0.5, heights.size - 0.5, heights.size),
height=heights,
tick_label=tick_labels,
)
ax = plt.gca()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if figsize is not None:
plt.gcf().set_size_inches(*figsize)
rotation = 90 if max([len(x) for x in tick_labels]) > 6 else 0
plt.tick_params(axis="x", labelsize=7, labelrotation=rotation)
plt.tight_layout()
plt.savefig(out_file, dpi=dpi)
plt.close()
def plot_dataframe_bar(df, ylabel, out_file, dpi=500, figsize=None):
if df.shape[1] == 1:
df.plot.bar(legend=False)
else:
df.plot.bar()
ax = plt.gca()
ax.set_ylabel(ylabel)
if figsize is not None:
plt.gcf().set_size_inches(*figsize)
plt.savefig(out_file, dpi=dpi)
plt.close()
# attrs is a dict with name: attr format; if this is a gene violin, attrs == {gene: gene_name}
def plot_violin(
data,
attrs,
out_file,
xlabel=None,
ylabel=None,
title=None,
dpi=500,
figsize=None,
linewidth=None,
log=False,
inner="box",
):
df = None
if "gene" in attrs:
df = pd.DataFrame(
data[:, attrs["gene"]].X.toarray(),
index=data.obs_names,
columns=[attrs["gene"]],
)
df["assignment"] = data.obs["demux_type"].astype(str)
idx_singlet = np.isin(data.obs["demux_type"], "singlet")
singlets = data.obs.loc[idx_singlet, "assignment"].astype(str)
df.loc[idx_singlet, "assignment"] = singlets
categories = natsorted(singlets.unique())
categories.extend(["doublet", "unknown"])
df["assignment"] = pd.Categorical(df["assignment"], categories=categories)
xlabel = "assignment"
ylabel = attrs["gene"]
else:
dfs = []
if isinstance(data, anndata.base.AnnData):
for name, attr in attrs.items():
dfs.append(pd.DataFrame({xlabel: name, ylabel: data.obs[attr].values}))
else:
for arr, name in zip(data, attrs):
dfs.append(pd.DataFrame({xlabel: name, ylabel: arr}))
df =
|
pd.concat(dfs)
|
pandas.concat
|
#################################################################
#################################################################
############### Clustergrammer
#################################################################
#################################################################
#############################################
########## 1. Load libraries
#############################################
##### 1. General support #####
import requests
import os
import numpy as np
from IPython.display import display, Markdown, IFrame
import tempfile
import scipy.stats as ss
import pandas as pd
##### 2. Other libraries #####
#######################################################
#######################################################
########## S1. Function
#######################################################
#######################################################
#############################################
########## 1. Run
#############################################
def run(dataset, normalization='logCPM', z_score=True, nr_genes=1500, metadata_cols=None, filter_samples=True):
# Get data
data = dataset[normalization].copy()
# Filter columns
if filter_samples and dataset.get('signature_metadata'):
selected_samples = [sample for samples in list(dataset['signature_metadata'].values())[0].values() for sample in samples]
data = data[selected_samples]
# Get tempfile
(fd, filename) = tempfile.mkstemp()
filename = filename+'.txt'
try:
# Get variable subset
data = data.loc[data.var(axis=1).sort_values(ascending=False).index[:nr_genes]]
# Z-score
if z_score == True or z_score == 'True':
data = data.T.apply(ss.zscore, axis=0).T
# Sample metadata
sample_metadata = dataset['sample_metadata'].copy()
# For uploaded files
if sample_metadata.index.name == 'Sample' or dataset['dataset_metadata']['source'] == 'gtex':
sample_metadata =
|
pd.Series(index=sample_metadata.index, data=sample_metadata.index, name='Sample')
|
pandas.Series
|
# License: Apache-2.0
import databricks.koalas as ks
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from gators.feature_generation.elementary_arithmethics import ElementaryArithmetics
@pytest.fixture
def data_add():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"))
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A__-__B", "A__-__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), coef=-2.0, operator="+"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_float32_add():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"))
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A__-__B", "A__-__C"],
).astype(np.float32)
obj = ElementaryArithmetics(
columns_a=list("AA"),
columns_b=list("BC"),
coef=-2.0,
operator="+",
dtype=np.float32,
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_name_add():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A+B", "A+C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"),
columns_b=list("BC"),
coef=-2.0,
operator="+",
column_names=["A+B", "A+C"],
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_mult():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, 0.0, 0.0],
[3.0, 4.0, 5.0, 12.0, 15.0],
[6.0, 7.0, 8.0, 42.0, 48.0],
]
),
columns=["A", "B", "C", "A__*__B", "A__*__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), operator="*"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_div():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, 0.0, 0],
[3.0, 4.0, 5.0, 0.75, 0.59999988],
[6.0, 7.0, 8.0, 0.85714286, 0.7499999],
]
),
columns=["A", "B", "C", "A__/__B", "A__/__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), operator="/"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_add_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"))
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A__-__B", "A__-__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), coef=-2.0, operator="+"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_float32_add_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"))
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A__-__B", "A__-__C"],
).astype(np.float32)
obj = ElementaryArithmetics(
columns_a=list("AA"),
columns_b=list("BC"),
coef=-2.0,
operator="+",
dtype=np.float32,
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_name_add_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A+B", "A+C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"),
columns_b=list("BC"),
coef=-2.0,
operator="+",
column_names=["A+B", "A+C"],
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_mult_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, 0.0, 0.0],
[3.0, 4.0, 5.0, 12.0, 15.0],
[6.0, 7.0, 8.0, 42.0, 48.0],
]
),
columns=["A", "B", "C", "A__*__B", "A__*__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), operator="*"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_div_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, 0.0, 0],
[3.0, 4.0, 5.0, 0.75, 0.59999988],
[6.0, 7.0, 8.0, 0.85714286, 0.7499999],
]
),
columns=["A", "B", "C", "A__/__B", "A__/__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), operator="/"
).fit(X)
return obj, X, X_expected
def test_add_pd(data_add):
obj, X, X_expected = data_add
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_add_ks(data_add_ks):
obj, X, X_expected = data_add_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_add_pd_np(data_add):
obj, X, X_expected = data_add
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_add_ks_np(data_add_ks):
obj, X, X_expected = data_add_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_float32_add_pd(data_float32_add):
obj, X, X_expected = data_float32_add
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_float32_add_ks_ks(data_float32_add_ks):
obj, X, X_expected = data_float32_add_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_float32_add_pd_np(data_float32_add):
obj, X, X_expected = data_float32_add
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_float32_add_ks_np_ks(data_float32_add_ks):
obj, X, X_expected = data_float32_add_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_mult_pd(data_mult):
obj, X, X_expected = data_mult
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_mult_ks(data_mult_ks):
obj, X, X_expected = data_mult_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_mult_pd_np(data_mult):
obj, X, X_expected = data_mult
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_mult_ks_np(data_mult_ks):
obj, X, X_expected = data_mult_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_div_pd(data_div):
obj, X, X_expected = data_div
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_div_ks(data_div_ks):
obj, X, X_expected = data_div_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_div_pd_np(data_div):
obj, X, X_expected = data_div
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_div_ks_np(data_div_ks):
obj, X, X_expected = data_div_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_name_add_pd(data_name_add):
obj, X, X_expected = data_name_add
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_name_add_ks_ks(data_name_add_ks):
obj, X, X_expected = data_name_add_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_name_add_pd_np(data_name_add):
obj, X, X_expected = data_name_add
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_name_add_ks_np_ks(data_name_add_ks):
obj, X, X_expected = data_name_add_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new =
|
pd.DataFrame(X_numpy_new)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/4/10 17:42
Desc: 东方财富网-数据中心-特色数据-股权质押
东方财富网-数据中心-特色数据-股权质押-股权质押市场概况: http://data.eastmoney.com/gpzy/marketProfile.aspx
东方财富网-数据中心-特色数据-股权质押-上市公司质押比例: http://data.eastmoney.com/gpzy/pledgeRatio.aspx
东方财富网-数据中心-特色数据-股权质押-重要股东股权质押明细: http://data.eastmoney.com/gpzy/pledgeDetail.aspx
东方财富网-数据中心-特色数据-股权质押-质押机构分布统计-证券公司: http://data.eastmoney.com/gpzy/distributeStatistics.aspx
东方财富网-数据中心-特色数据-股权质押-质押机构分布统计-银行: http://data.eastmoney.com/gpzy/distributeStatistics.aspx
东方财富网-数据中心-特色数据-股权质押-行业数据: http://data.eastmoney.com/gpzy/industryData.aspx
"""
import math
import pandas as pd
import requests
from tqdm import tqdm
from akshare.utils import demjson
def stock_gpzy_profile_em() -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-股权质押-股权质押市场概况
http://data.eastmoney.com/gpzy/marketProfile.aspx
:return: 股权质押市场概况
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "TRADE_DATE",
"sortTypes": "-1",
"pageSize": "5000",
"pageNumber": "1",
"reportName": "RPT_CSDC_STATISTICS",
"columns": "ALL",
"quoteColumns": "",
"source": "WEB",
"client": "WEB",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
temp_df.columns = [
"交易日期",
"质押总股数",
"质押总市值",
"沪深300指数",
"涨跌幅",
"A股质押总比例",
"质押公司数量",
"质押笔数",
]
temp_df = temp_df[
[
"交易日期",
"A股质押总比例",
"质押公司数量",
"质押笔数",
"质押总股数",
"质押总市值",
"沪深300指数",
"涨跌幅",
]
]
temp_df["交易日期"] = pd.to_datetime(temp_df["交易日期"]).dt.date
temp_df["A股质押总比例"] = pd.to_numeric(temp_df["A股质押总比例"])
temp_df["质押公司数量"] = pd.to_numeric(temp_df["质押公司数量"])
temp_df["质押笔数"] = pd.to_numeric(temp_df["质押笔数"])
temp_df["质押总股数"] = pd.to_numeric(temp_df["质押总股数"])
temp_df["质押总市值"] = pd.to_numeric(temp_df["质押总市值"])
temp_df["沪深300指数"] = pd.to_numeric(temp_df["沪深300指数"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["A股质押总比例"] = temp_df["A股质押总比例"] / 100
temp_df.sort_values(["交易日期"], inplace=True)
temp_df.reset_index(inplace=True, drop=True)
return temp_df
def stock_gpzy_pledge_ratio_em(date: str = "20220408") -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-股权质押-上市公司质押比例
http://data.eastmoney.com/gpzy/pledgeRatio.aspx
:param date: 指定交易日, 访问 http://data.eastmoney.com/gpzy/pledgeRatio.aspx 查询
:type date: str
:return: 上市公司质押比例
:rtype: pandas.DataFrame
"""
trade_date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "PLEDGE_RATIO",
"sortTypes": "-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_CSDC_LIST",
"columns": "ALL",
"quoteColumns": "",
"source": "WEB",
"client": "WEB",
"filter": f"(TRADE_DATE='{trade_date}')",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"-",
"股票代码",
"股票简称",
"交易日期",
"质押比例",
"质押股数",
"质押笔数",
"无限售股质押数",
"限售股质押数",
"质押市值",
"所属行业",
"近一年涨跌幅",
"-",
]
big_df = big_df[
[
"序号",
"股票代码",
"股票简称",
"交易日期",
"所属行业",
"质押比例",
"质押股数",
"质押市值",
"质押笔数",
"无限售股质押数",
"限售股质押数",
"近一年涨跌幅",
]
]
big_df["质押比例"] = pd.to_numeric(big_df["质押比例"])
big_df["质押股数"] = pd.to_numeric(big_df["质押股数"])
big_df["质押市值"] = pd.to_numeric(big_df["质押市值"])
big_df["质押笔数"] = pd.to_numeric(big_df["质押笔数"])
big_df["无限售股质押数"] = pd.to_numeric(big_df["无限售股质押数"])
big_df["限售股质押数"] = pd.to_numeric(big_df["限售股质押数"])
big_df["近一年涨跌幅"] = pd.to_numeric(big_df["近一年涨跌幅"])
big_df["交易日期"] = pd.to_datetime(big_df["交易日期"]).dt.date
return big_df
def _get_page_num_gpzy_market_pledge_ratio_detail() -> int:
"""
东方财富网-数据中心-特色数据-股权质押-重要股东股权质押明细
http://data.eastmoney.com/gpzy/pledgeDetail.aspx
:return: int 获取 重要股东股权质押明细 的总页数
"""
url = "http://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "NOTICE_DATE",
"sortTypes": "-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPTA_APP_ACCUMDETAILS",
"columns": "ALL",
"quoteColumns": "",
"source": "WEB",
"client": "WEB",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = math.ceil(int(data_json["result"]["count"]) / 500)
return total_page
def stock_gpzy_pledge_ratio_detail_em() -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-股权质押-重要股东股权质押明细
http://data.eastmoney.com/gpzy/pledgeDetail.aspx
:return: pandas.DataFrame
"""
url = "http://datacenter-web.eastmoney.com/api/data/v1/get"
total_page = _get_page_num_gpzy_market_pledge_ratio_detail()
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params = {
"sortColumns": "NOTICE_DATE",
"sortTypes": "-1",
"pageSize": "500",
"pageNumber": page,
"reportName": "RPTA_APP_ACCUMDETAILS",
"columns": "ALL",
"quoteColumns": "",
"source": "WEB",
"client": "WEB",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"股票简称",
"_",
"股票代码",
"股东名称",
"_",
"_",
"_",
"公告日期",
"质押机构",
"质押股份数量",
"占所持股份比例",
"占总股本比例",
"质押日收盘价",
"质押开始日期",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"预估平仓线",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"最新价",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
big_df = big_df[
[
"序号",
"股票代码",
"股票简称",
"股东名称",
"质押股份数量",
"占所持股份比例",
"占总股本比例",
"质押机构",
"最新价",
"质押日收盘价",
"预估平仓线",
"质押开始日期",
"公告日期",
]
]
big_df["质押股份数量"] = pd.to_numeric(big_df["质押股份数量"])
big_df["占所持股份比例"] = pd.to_numeric(big_df["占所持股份比例"])
big_df["占总股本比例"] = pd.to_numeric(big_df["占总股本比例"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["质押日收盘价"] = pd.to_numeric(big_df["质押日收盘价"])
big_df["预估平仓线"] = pd.to_numeric(big_df["预估平仓线"])
big_df["公告日期"] = pd.to_datetime(big_df["公告日期"]).dt.date
big_df["质押开始日期"] = pd.to_datetime(big_df["质押开始日期"]).dt.date
return big_df
def _get_page_num_gpzy_distribute_statistics_company() -> int:
"""
东方财富网-数据中心-特色数据-股权质押-质押机构分布统计-证券公司
http://data.eastmoney.com/gpzy/distributeStatistics.aspx
:return: int 获取 质押机构分布统计-证券公司 的总页数
"""
url = "http://dcfm.eastmoney.com/EM_MutiSvcExpandInterface/api/js/get"
params = {
"type": "GDZY_ZYJG_SUM",
"token": "7<PASSWORD>",
"cmd": "",
"st": "scode_count",
"sr": "-1",
"p": "1",
"ps": "5000",
"js": "var bLnpEFtJ={pages:(tp),data:(x),font:(font)}",
"filter": "(hy_name='券商信托')",
"rt": "52584592",
}
res = requests.get(url, params=params)
data_json = demjson.decode(res.text[res.text.find("={") + 1 :])
return data_json["pages"]
def stock_em_gpzy_distribute_statistics_company() -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-股权质押-质押机构分布统计-证券公司
http://data.eastmoney.com/gpzy/distributeStatistics.aspx
:return: pandas.DataFrame
"""
url = "http://dcfm.eastmoney.com/EM_MutiSvcExpandInterface/api/js/get"
page_num = _get_page_num_gpzy_distribute_statistics_company()
temp_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=True):
params = {
"type": "GDZY_ZYJG_SUM",
"token": "7<PASSWORD>",
"cmd": "",
"st": "scode_count",
"sr": "-1",
"p": str(page),
"ps": "5000",
"js": "var bLnpEFtJ={pages:(tp),data:(x),font:(font)}",
"filter": "(hy_name='券商信托')",
"rt": "52584592",
}
res = requests.get(url, params=params)
data_text = res.text
data_json = demjson.decode(data_text[data_text.find("={") + 1 :])
map_dict = dict(
zip(
pd.DataFrame(data_json["font"]["FontMapping"])["code"],
pd.DataFrame(data_json["font"]["FontMapping"])["value"],
)
)
for key, value in map_dict.items():
data_text = data_text.replace(key, str(value))
data_json = demjson.decode(data_text[data_text.find("={") + 1 :])
temp_df = temp_df.append(pd.DataFrame(data_json["data"]), ignore_index=True)
temp_df.columns = [
"质押公司股票代码",
"_",
"jg_yjx_type_1",
"jg_yjx_type_2",
"质押机构",
"行业名称",
"质押公司数量",
"质押笔数",
"质押数量(股)",
"未达预警线比例(%)",
"达到预警线未达平仓线比例(%)",
"达到平仓线比例(%)",
]
temp_df = temp_df[
[
"质押公司股票代码",
"质押机构",
"行业名称",
"质押公司数量",
"质押笔数",
"质押数量(股)",
"未达预警线比例(%)",
"达到预警线未达平仓线比例(%)",
"达到平仓线比例(%)",
]
]
return temp_df
def _get_page_num_gpzy_distribute_statistics_bank() -> int:
"""
东方财富网-数据中心-特色数据-股权质押-质押机构分布统计-银行
http://data.eastmoney.com/gpzy/distributeStatistics.aspx
:return: int 获取 质押机构分布统计-银行 的总页数
"""
url = "http://dcfm.eastmoney.com/EM_MutiSvcExpandInterface/api/js/get"
params = {
"type": "GDZY_ZYJG_SUM",
"token": "7<PASSWORD>",
"cmd": "",
"st": "scode_count",
"sr": "-1",
"p": "1",
"ps": "5000",
"js": "var AQxIdDuK={pages:(tp),data:(x),font:(font)}",
"filter": "(hy_name='银行')",
"rt": "52584617",
}
res = requests.get(url, params=params)
data_json = demjson.decode(res.text[res.text.find("={") + 1 :])
return data_json["pages"]
def stock_em_gpzy_distribute_statistics_bank() -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-股权质押-质押机构分布统计-银行
http://data.eastmoney.com/gpzy/distributeStatistics.aspx
:return: pandas.DataFrame
"""
url = "http://dcfm.eastmoney.com/EM_MutiSvcExpandInterface/api/js/get"
page_num = _get_page_num_gpzy_distribute_statistics_company()
temp_df = pd.DataFrame()
for page in range(1, page_num + 1):
print(f"一共{page_num}页, 正在下载第{page}页")
params = {
"type": "GDZY_ZYJG_SUM",
"token": "70f12f2f4f091e459a279469fe49eca5",
"cmd": "",
"st": "scode_count",
"sr": "-1",
"p": str(page),
"ps": "5000",
"js": "var AQxIdDuK={pages:(tp),data:(x),font:(font)}",
"filter": "(hy_name='银行')",
"rt": "52584617",
}
res = requests.get(url, params=params)
data_text = res.text
data_json = demjson.decode(data_text[data_text.find("={") + 1 :])
map_dict = dict(
zip(
pd.DataFrame(data_json["font"]["FontMapping"])["code"],
pd.DataFrame(data_json["font"]["FontMapping"])["value"],
)
)
for key, value in map_dict.items():
data_text = data_text.replace(key, str(value))
data_json = demjson.decode(data_text[data_text.find("={") + 1 :])
temp_df = temp_df.append(pd.DataFrame(data_json["data"]), ignore_index=True)
temp_df.columns = [
"质押公司股票代码",
"_",
"jg_yjx_type_1",
"jg_yjx_type_2",
"质押机构",
"行业名称",
"质押公司数量",
"质押笔数",
"质押数量(股)",
"未达预警线比例(%)",
"达到预警线未达平仓线比例(%)",
"达到平仓线比例(%)",
]
temp_df = temp_df[
[
"质押公司股票代码",
"质押机构",
"行业名称",
"质押公司数量",
"质押笔数",
"质押数量(股)",
"未达预警线比例(%)",
"达到预警线未达平仓线比例(%)",
"达到平仓线比例(%)",
]
]
return temp_df
def stock_gpzy_industry_data_em() -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-股权质押-上市公司质押比例-行业数据
http://data.eastmoney.com/gpzy/industryData.aspx
:return: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "AVERAGE_PLEDGE_RATIO",
"sortTypes": "-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_CSDC_INDUSTRY_STATISTICS",
"columns": "INDUSTRY_CODE,INDUSTRY,TRADE_DATE,AVERAGE_PLEDGE_RATIO,ORG_NUM,PLEDGE_TOTAL_NUM,TOTAL_PLEDGE_SHARES,PLEDGE_TOTAL_MARKETCAP",
"quoteColumns": "",
"source": "WEB",
"client": "WEB",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.columns = [
"序号",
"-",
"行业",
"统计时间",
"平均质押比例",
"公司家数",
"质押总笔数",
"质押总股本",
"最新质押市值",
]
temp_df = temp_df[["序号", "行业", "平均质押比例", "公司家数", "质押总笔数", "质押总股本", "最新质押市值", "统计时间"]]
temp_df["统计时间"] = pd.to_datetime(temp_df["统计时间"]).dt.date
temp_df['平均质押比例'] = pd.to_numeric(temp_df['平均质押比例'])
temp_df['公司家数'] = pd.to_numeric(temp_df['公司家数'])
temp_df['质押总笔数'] = pd.to_numeric(temp_df['质押总笔数'])
temp_df['质押总股本'] = pd.to_numeric(temp_df['质押总股本'])
temp_df['最新质押市值'] = pd.to_numeri
|
c(temp_df['最新质押市值'])
|
pandas.to_numeric
|
import pandas as pd
import numpy as np
import dateutil
import networkx as nx
ADULT_AGE = 18
def get_hmis_cp():
"""
Pull in relevant CSVs from `../data/`, merge them, clean them, and return a tuple containing the cleaned HMIS data
and the cleaned Connecting Point data.
"""
# get raw dataframes
hmis = get_raw_hmis()
cp = get_raw_cp()
# convert dates
hmis = hmis_convert_dates(hmis)
cp = cp_convert_dates(cp)
# compute client and family ids across the dataframes
(hmis, cp) = get_client_family_ids(hmis, cp)
# get child status
hmis = hmis_child_status(hmis)
cp = cp_child_status(cp)
# generate family characteristics
hmis_generate_family_characteristics(hmis)
cp_generate_family_characteristics(cp)
return (hmis, cp)
###################
# get_raw methods #
###################
def get_raw_hmis():
"""
Pull in relevant CSVs from `../data/`, merge them, and return the raw HMIS dataframe.
"""
program = pd.read_csv('../data/hmis/program with family.csv')
client = pd.read_csv('../data/hmis/client de-identified.csv')
# NOTE we're taking an inner join here because the program csv got pulled after
# the client csv, because we added the family site identifier column to program
program = program.merge(client, on='Subject Unique Identifier', how='inner')
return program
def get_raw_cp():
"""
Pull in relevant CSVs from `../data/`, merge them, and return the raw Connecting Point dataframe.
"""
case = pd.read_csv("../data/connecting_point/case.csv")
case = case.rename(columns={'caseid': 'Caseid'})
client = pd.read_csv("../data/connecting_point/client.csv")
case = case.merge(client, on='Caseid', how='left')
return case
#############################################
# get_client_family_ids and related methods #
#############################################
def get_client_family_ids(hmis, cp):
"""
Given raw HMIS and Connecting Point dataframes, de-duplicate individuals and determine families across time.
See the README for more information about rationale and methodology.
The graph contains IDs from both HMIS and Connecting Point, so each vertex is represented as a tuple `(c, id)`,
where `c` is either `'h'` or `'c'`, to indicate whether the `id` corresponds to a row in HMIS or Connecting Point.
For example, `('h', 1234)` represents the row(s) in HMIS with individual ID `1234`, and `('c',5678)` represents the
row(s) in Connecting Point with individual ID `5678`.
:param hmis: HMIS dataframe.
:type hmis: Pandas.Dataframe.
:param cp: Connecting Point dataframe.
:type cp: Pandas.Dataframe.
"""
hmis = hmis.rename(columns={'Subject Unique Identifier': 'Raw Subject Unique Identifier'})
cp = cp.rename(columns={'Clientid': 'Raw Clientid'})
# create graph of individuals
G_individuals = nx.Graph()
G_individuals.add_nodes_from([('h', v) for v in hmis['Raw Subject Unique Identifier'].values])
G_individuals.add_nodes_from([('c', v) for v in cp['Raw Clientid'].values])
# add edges between same individuals
G_individuals.add_edges_from(group_edges('h', pd.read_csv('../data/hmis/hmis_client_duplicates_link_plus.csv'), ['Set ID'], 'Subject Unique Identifier'))
G_individuals.add_edges_from(group_edges('c',
|
pd.read_csv('../data/connecting_point/cp_client_duplicates_link_plus.csv')
|
pandas.read_csv
|
import os
import glob
import collections
import cv2
import numpy as np
import pandas as pd
import pickle
import time
import settings
IMG_DIR = settings.IMG_DIR
VAL_FILE = settings.VAL_FILE
CLASS_FILE = settings.CLASS_FILE
BBOX_FILE = settings.BBOX_FILE
BBOX_BIN_FILE = os.path.join(settings.DATA_DIR, 'bbox.pk')
BBOX_BIN_FILE_SMALL = os.path.join(settings.DATA_DIR, 'bbox_small.pk')
BAD_IMG_IDS = set([])
MC_CSV = 'mc.csv'
MBB_CSV = 'mbb.csv'
def get_classes():
classes = []
with open(CLASS_FILE, 'r') as f:
for line in f:
classes.append(line.strip().split(',')[0])
return classes
def get_class_dict():
class_dict = {}
with open(CLASS_FILE, 'r') as f:
for line in f:
k, v = line.strip().split(',')
class_dict[k] = v
return class_dict
def get_class_id_converters():
itos = get_classes()
stoi = {itos[i]: i for i in range(len(itos))}
return itos, stoi
def get_class_names(ids):
c_dict = get_class_dict()
itos, stoi = get_class_id_converters()
return [c_dict[itos[i]] for i in ids]
def get_val_ids():
val_ids = []
with open(VAL_FILE, 'r') as f:
for i, line in enumerate(f):
if i == 0:
continue
val_ids.append(line.strip())
return val_ids
def get_train_ids(img_dir = IMG_DIR):
filenames = glob.glob(os.path.join(img_dir, '*.jpg'))
#print(len(filenames))
img_ids = [os.path.basename(fn).split('.')[0] for fn in filenames]
valset = set(get_val_ids())
img_ids = [img_id for img_id in img_ids if not (img_id in valset or img_id in BAD_IMG_IDS)]
#print(len(img_ids))
return img_ids
def get_test_ids():
df = pd.read_csv(settings.SAMPLE_SUB_FILE)
return df.values[:, 0].tolist()
def get_boxed_train_ids(bbox_dict, img_dir=IMG_DIR, max_num = None):
img_ids = get_train_ids(img_dir)
img_ids = [img_id for img_id in img_ids if img_id in bbox_dict]
if not (max_num is None):
return img_ids[:max_num]
return img_ids
def build_bbox_dict(cls_stoi):
bbox_dict = {} #collections.defaultdict(lambda: [])
with open(BBOX_FILE, 'r') as f:
for i, line in enumerate(f):
if i == 0:
continue
row = line.strip().split(',')
value = (cls_stoi[row[2]], [float(row[4]), float(row[6]), float(row[5]), float(row[7])])
if row[0] in bbox_dict:
# return (class, [x1, y1, x2, y2])
bbox_dict[row[0]].append(value)
else:
bbox_dict[row[0]] = [value]
with open(BBOX_BIN_FILE, 'wb') as f:
pickle.dump(bbox_dict, f)
return bbox_dict
def build_small_bbox_dict(img_dir=IMG_DIR, num=1000):
bbox_dict = load_bbox_dict()
img_ids = get_boxed_train_ids(bbox_dict)[:num]
val_img_ids = get_val_ids()[:num]
img_ids.extend(val_img_ids)
small_dict = {k: bbox_dict[k] for k in img_ids if k in bbox_dict}
with open(BBOX_BIN_FILE_SMALL, 'wb') as f:
pickle.dump(small_dict, f)
print(len(small_dict))
def load_small_train_ids():
with open(BBOX_BIN_FILE_SMALL, 'rb') as f:
small_dict = pickle.load(f)
img_ids = list(small_dict.keys())
return small_dict, img_ids
def load_bbox_dict():
with open(BBOX_BIN_FILE, 'rb') as f:
return pickle.load(f)
def draw_img(image, name = '', resize=1):
H,W = image.shape[0:2]
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.imshow(name, image.astype(np.uint8))
cv2.resizeWindow(name, round(resize*W), round(resize*H))
def draw_screen_rect(image, bbox, color=[0,0,255], alpha=0.5):
H, W = image.shape[:2]
x1, y1 = round(bbox[0]*W), round(bbox[1]*H)
x2, y2 = round(bbox[2]*W), round(bbox[3]*H)
#image[y1:y2,x1:x2,:] = (1-alpha)*image[y1:y2,x1:x2,:] + (alpha)*np.array(color, np.uint8)
cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
def draw_shadow_text(img, text, pt, color=(255, 0, 0), fontScale=0.5, thickness=1):
#if color1 is None: color1=(0,0,0)
#if thickness1 is None: thickness1 = thickness+2
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, text, pt, font, fontScale, color, thickness, cv2.LINE_AA)
#cv2.putText(img, text, pt, font, fontScale, color, thickness, cv2.LINE_AA)
def build_csvs_from_subset_dir(subset_path):
bbox_dict = build_bbox_dict()
filenames = glob.glob(os.path.join(IMG_DIR, '*.jpg'))
print(len(filenames))
fns = [os.path.basename(o) for o in filenames]
mcs = [' '.join([str(o[0]) for o in bbox_dict[fn.split('.')[0]]]) for fn in fns]
df1 = pd.DataFrame({'fn': fns, 'clas': mcs}, columns=['fn', 'clas'])
df1.to_csv(MC_CSV, index=False)
mbb = [' '.join([' '.join([str(i) for i in o[1]]) for o in bbox_dict[fn.split('.')[0]]]) for fn in fns]
df2 =
|
pd.DataFrame({'fn': fns, 'bbox': mbb}, columns=['fn','bbox'])
|
pandas.DataFrame
|
from backlight.strategies import filter as module
import pytest
import pandas as pd
import numpy as np
import backlight
import backlight.trades
from backlight.strategies.amount_based import simple_entry_and_exit
from backlight.asset.currency import Currency
@pytest.fixture
def symbol():
return "USDJPY"
@pytest.fixture
def currency_unit():
return Currency.JPY
@pytest.fixture
def signal(symbol, currency_unit):
periods = 22
df = pd.DataFrame(
index=pd.date_range(start="2018-06-06", freq="1min", periods=periods),
data=[
[1, 0, 0],
[0, 0, 1],
[0, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
],
columns=["up", "neutral", "down"],
)
signal = backlight.signal.from_dataframe(df, symbol, currency_unit)
return signal
@pytest.fixture
def market(symbol, currency_unit):
periods = 22
df = pd.DataFrame(
index=pd.date_range(start="2018-06-06", freq="1min", periods=periods),
data=np.arange(periods)[:, None],
columns=["mid"],
)
market = backlight.datasource.from_dataframe(df, symbol, currency_unit)
return market
@pytest.fixture
def askbid(symbol, currency_unit):
periods = 22
df = pd.DataFrame(
index=pd.date_range(start="2018-06-06", freq="1min", periods=periods),
data=[[i + i % 3, i - i % 3] for i in range(periods)],
columns=["ask", "bid"],
)
market = backlight.datasource.from_dataframe(df, symbol, currency_unit)
return market
@pytest.fixture
def trades(market, signal):
max_holding_time = pd.Timedelta("3min")
trades = simple_entry_and_exit(market, signal, max_holding_time)
return trades
def test_limit_max_amount(market, trades):
max_amount = 2.0
limited = module.limit_max_amount(trades, max_amount)
expected = pd.DataFrame(
index=market.index,
data=[
[True, 1.0], # 1.0
[True, -1.0], # 0.0
[False, 0.0], # 0.0
[True, 0.0], # 0.0
[True, 2.0], # 2.0
[True, -1.0], # 1.0
[True, -2.0], # -1.0
[True, -1.0], # -2.0
[True, 1.0], # -1.0
[True, 2.0], # 1.0
[True, 1.0], # 2.0
[False, 0.0], # 2.0
[True, -2.0], # 0.0
[True, -2.0], # -2.0
[False, 0.0], # -2.0
[True, 1.0], # -1.0
[True, 1.0], # 0.0
[False, 0.0], # 0.0
[True, 1.0], # 1.0
[True, 1.0], # 2.0
[False, 0.0], # 2.0
[True, -2.0], # 0.0
],
columns=["exist", "amount"],
)
assert (limited.amount == expected.amount[expected.exist]).all()
def test_skip_entry_by_spread(trades, askbid):
spread = 2.0
limited = module.skip_entry_by_spread(trades, askbid, spread)
expected = pd.DataFrame(
index=askbid.index,
data=[
[True, 1.0], # 1.0
[True, -1.0], # 0.0
[False, 0.0], # 0.0
[True, 0.0], # 0.0
[True, 2.0], # 2.0
[False, 0.0], # 2.0
[True, -2.0], # 0.0
[True, -1.0], # -1.0
[False, 0.0], # -1.0
[True, 2.0], # 1.0
[True, 1.0], # 2.0
[False, 0.0], # 2.0
[True, -2.0], # 0.0
[True, -2.0], # -2.0
[False, 0.0], # 0.0
[True, 1.0], # -1.0
[True, 1.0], # -2.0
[False, 0.0], # 0.0
[True, 1.0], # 1.0
[True, 1.0], # 2.0
[False, 0.0], # 2.0
[True, -2.0], # 0.0
],
columns=["exist", "amount"],
)
assert (limited.amount == expected.amount[expected.exist]).all()
def test_filter_entry_by_time(trades, symbol, currency_unit):
result = module.filter_entry_by_time(trades, "minute", [1, 3, 8, 12])
df = pd.DataFrame(
data=[
[1.0, 0.0],
[-1.0, 1.0],
[-1.0, 0.0],
[1.0, 2.0],
[1.0, 1.0],
[-1.0, 4.0],
[-1.0, 2.0],
[1.0, 4.0],
[1.0, 6.0],
[-1.0, 6.0],
[-1.0, 9.0],
[1.0, 9.0],
],
index=pd.DatetimeIndex(
[
pd.Timestamp("2018-06-06 00:00:00"),
pd.Timestamp("2018-06-06 00:01:00"),
pd.Timestamp("2018-06-06 00:03:00"),
pd.Timestamp("2018-06-06 00:03:00"),
|
pd.Timestamp("2018-06-06 00:04:00")
|
pandas.Timestamp
|
import pandas as pd
import numpy as np
import math
Ratings=
|
pd.read_csv("/home/4/16B09737/Documents/src/user-collaborative-filtering/tour_score.csv")
|
pandas.read_csv
|
class Pywedge_Charts():
'''
Makes 8 different types of interactive Charts with interactive axis selection widgets in a single line of code for the given dataset.
Different types of Charts viz,
1. Scatter Plot
2. Pie Chart
3. Bar Plot
4. Violin Plot
5. Box Plot
6. Distribution Plot
7. Histogram
8. Correlation Plot
Inputs:
1. Dataframe
2. c = any redundant column to be removed (like ID column etc., at present supports a single column removal, subsequent version will provision multiple column removal requirements)
3. y = target column name as a string
Returns:
Charts widget
'''
def __init__(self, train, c, y, manual=True):
self.train = train
self.c = c
self.y = y
self.X = self.train.drop(self.y,1)
self.manual = manual
def make_charts(self):
import pandas as pd
import ipywidgets as widgets
import plotly.express as px
import plotly.figure_factory as ff
import plotly.offline as pyo
from ipywidgets import HBox, VBox, Button
from ipywidgets import interact, interact_manual, interactive
import plotly.graph_objects as go
from plotly.offline import iplot
header = widgets.HTML(value="<h2>Pywedge Make_Charts </h2>")
display(header)
if len(self.train) > 500:
from sklearn.model_selection import train_test_split
test_size = 500/len(self.train)
if self.c!=None:
data = self.X.drop(self.c,1)
else:
data = self.X
target = self.train[self.y]
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=test_size, random_state=1)
train_mc = pd.concat([X_test, y_test], axis=1)
else:
train_mc = self.train
train_numeric = train_mc.select_dtypes('number')
train_cat = train_mc.select_dtypes(exclude='number')
out1 = widgets.Output()
out2 = widgets.Output()
out3 = widgets.Output()
out4 = widgets.Output()
out5 = widgets.Output()
out6 = widgets.Output()
out7 = widgets.Output()
out8 = widgets.Output()
out8 = widgets.Output()
tab = widgets.Tab(children = [out1, out2, out3, out4, out5, out6, out7, out8])
tab.set_title(0, 'Scatter Plot')
tab.set_title(1, 'Pie Chart')
tab.set_title(2, 'Bar Plot')
tab.set_title(3, 'Violin Plot')
tab.set_title(4, 'Box Plot')
tab.set_title(5, 'Distribution Plot')
tab.set_title(6, 'Histogram')
tab.set_title(7, 'Correlation plot')
display(tab)
with out1:
header = widgets.HTML(value="<h1>Scatter Plots </h1>")
display(header)
x = widgets.Dropdown(options=list(train_mc.select_dtypes('number').columns))
def scatter_plot(X_Axis=list(train_mc.select_dtypes('number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[1:],
Color=list(train_mc.select_dtypes('number').columns)):
fig = go.FigureWidget(data=go.Scatter(x=train_mc[X_Axis],
y=train_mc[Y_Axis],
mode='markers',
text=list(train_cat),
marker_color=train_mc[Color]))
fig.update_layout(title=f'{Y_Axis.title()} vs {X_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
yaxis_title=f'{Y_Axis.title()}',
autosize=False,width=600,height=600)
fig.show()
widgets.interact_manual.opts['manual_name'] = 'Make_Chart'
one = interactive(scatter_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(scatter_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(scatter_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(scatter_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out2:
header = widgets.HTML(value="<h1>Pie Charts </h1>")
display(header)
def pie_chart(Labels=list(train_mc.select_dtypes(exclude='number').columns),
Values=list(train_mc.select_dtypes('number').columns)[0:]):
fig = go.FigureWidget(data=[go.Pie(labels=train_mc[Labels], values=train_mc[Values])])
fig.update_layout(title=f'{Values.title()} vs {Labels.title()}',
autosize=False,width=500,height=500)
fig.show()
one = interactive(pie_chart, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(pie_chart, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(pie_chart, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(pie_chart, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out3:
header = widgets.HTML(value="<h1>Bar Plots </h1>")
display(header)
def bar_plot(X_Axis=list(train_mc.select_dtypes(exclude='number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[1:],
Color=list(train_mc.select_dtypes(exclude='number').columns)):
fig1 = px.bar(train_mc, x=train_mc[X_Axis], y=train_mc[Y_Axis], color=train_mc[Color])
fig1.update_layout(barmode='group',
title=f'{X_Axis.title()} vs {Y_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
yaxis_title=f'{Y_Axis.title()}',
autosize=False,width=600,height=600)
fig1.show()
one = interactive(bar_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(bar_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(bar_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(bar_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out4:
header = widgets.HTML(value="<h1>Violin Plots </h1>")
display(header)
def viol_plot(X_Axis=list(train_mc.select_dtypes('number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[1:],
Color=list(train_mc.select_dtypes(exclude='number').columns)):
fig2 = px.violin(train_mc, X_Axis, Y_Axis, Color, box=True, hover_data=train_mc.columns)
fig2.update_layout(title=f'{X_Axis.title()} vs {Y_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
autosize=False,width=600,height=600)
fig2.show()
one = interactive(viol_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(viol_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(viol_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(viol_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out5:
header = widgets.HTML(value="<h1>Box Plots </h1>")
display(header)
def box_plot(X_Axis=list(train_mc.select_dtypes(exclude='number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[0:],
Color=list(train_mc.select_dtypes(exclude='number').columns)):
fig4 = px.box(train_mc, x=X_Axis, y=Y_Axis, color=Color, points="all")
fig4.update_layout(barmode='group',
title=f'{X_Axis.title()} vs {Y_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
yaxis_title=f'{Y_Axis.title()}',
autosize=False,width=600,height=600)
fig4.show()
one = interactive(box_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(box_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(box_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(box_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out6:
header = widgets.HTML(value="<h1>Distribution Plots </h1>")
display(header)
def dist_plot(X_Axis=list(train_mc.select_dtypes('number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[1:],
Color=list(train_mc.select_dtypes(exclude='number').columns)):
fig2 = px.histogram(train_mc, X_Axis, Y_Axis, Color, marginal='violin', hover_data=train_mc.columns)
fig2.update_layout(title=f'{X_Axis.title()} vs {Y_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
autosize=False,width=600,height=600)
fig2.show()
one = interactive(dist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(dist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(dist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(dist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out7:
header = widgets.HTML(value="<h1>Histogram </h1>")
display(header)
def hist_plot(X_Axis=list(train_mc.columns)):
fig2 = px.histogram(train_mc, X_Axis)
fig2.update_layout(title=f'{X_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
autosize=False,width=600,height=600)
fig2.show()
one = interactive(hist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(hist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(hist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(hist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out8:
header = widgets.HTML(value="<h1>Correlation Plots </h1>")
display(header)
import plotly.figure_factory as ff
corrs = train_mc.corr()
colorscale = ['Greys', 'Greens', 'Bluered', 'RdBu',
'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',
'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis', 'Cividis']
@interact_manual
def plot_corrs(colorscale=colorscale):
figure = ff.create_annotated_heatmap(z = corrs.round(2).values,
x =list(corrs.columns),
y=list(corrs.index),
colorscale=colorscale,
annotation_text=corrs.round(2).values)
iplot(figure)
class baseline_model():
'''
Cleans the raw dataframe to fed into ML models and runs various baseline models. Following data pre_processing will be carried out,
1) segregating numeric & categorical columns
2) missing values imputation for numeric & categorical columns
3) standardization
4) feature importance
5) SMOTE
6) baseline model
Inputs:
1) train = train dataframe
2) test = stand out test dataframe (without target column)
2) c = any redundant column to be removed (like ID column etc., at present supports a single column removal, subsequent version will provision multiple column removal requirements)
3) y = target column name as a string
4) type = Classification / Regression
Returns:
1) Various classification/regressions models & model performances
2) new_X (cleaned feature columns in dataframe)
3) new_y (cleaned target column in dataframe)
4) new_test (cleaned stand out test dataframe
'''
def __init__(self, train, test, c, y, type="Classification"):
self.train = train
self.test = test
self.c = c
self.y = y
self.type = type
self.X = train.drop(self.y,1)
def classification_summary(self):
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button
from IPython.display import display, Markdown, clear_output
header = widgets.HTML(value="<h2>Pywedge Baseline Models </h2>")
display(header)
out1 = widgets.Output()
out2 = widgets.Output()
tab = widgets.Tab(children = [out1, out2])
tab.set_title(0,'Baseline Models')
tab.set_title(1, 'Predict Baseline Model')
display(tab)
with out1:
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button
from IPython.display import display, Markdown, clear_output
header = widgets.HTML(value="<h2>Pre_processing </h2>")
display(header)
import pandas as pd
cat_info = widgets.Dropdown(
options = [('cat_codes', '1'), ('get_dummies', '2')],
value = '1',
description = 'Select categorical conversion',
style = {'description_width': 'initial'},
disabled=False)
std_scr = widgets.Dropdown(
options = [('StandardScalar', '1'), ('RobustScalar', '2'), ('MinMaxScalar', '3'), ('No Standardization', 'n')],
value = 'n',
description = 'Select Standardization methods',
style = {'description_width': 'initial'},
disabled=False)
apply_smote = widgets.Dropdown(
options = [('Yes', 'y'), ('No', 'n')],
value = 'y',
description = 'Do you want to apply SMOTE?',
style = {'description_width': 'initial'},
disabled=False)
pp_class = widgets.VBox([cat_info, std_scr, apply_smote])
pp_reg = widgets.VBox([cat_info, std_scr])
if self.type == 'Classification':
display(pp_class)
else:
display(pp_reg)
test_size = widgets.BoundedFloatText(
value=0.20,
min=0.05,
max=0.5,
step=0.05,
description='Text Size %',
disabled=False)
display(test_size)
button_1 = widgets.Button(description = 'Run Baseline models')
out = widgets.Output()
def on_button_clicked(_):
with out:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if self.type=="Classification":
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
print('\nStarting classification_summary...')
print('TOP 10 FEATURE IMPORTANCE - USING ADABOOST CLASSIFIER')
from sklearn.ensemble import AdaBoostClassifier
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
ab = AdaBoostClassifier().fit(self.new_X, self.new_y)
print(pd.Series(ab.feature_importances_, index=self.new_X.columns).sort_values(ascending=False).head(10))
from sklearn.model_selection import train_test_split
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=test_size.value, random_state=1)
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, HistGradientBoostingClassifier
from catboost import CatBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
from sklearn.metrics import accuracy_score, f1_score
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings('ignore')
from tqdm.notebook import trange, tqdm
classifiers = {
"Logistic" : LogisticRegression(n_jobs=-1),
"KNN(3)" : KNeighborsClassifier(3, n_jobs=-1),
"Decision Tree": DecisionTreeClassifier(max_depth=7),
"Random Forest": RandomForestClassifier(max_depth=7, n_estimators=10, max_features=4, n_jobs=-1),
"AdaBoost" : AdaBoostClassifier(),
"GB Classifier": GradientBoostingClassifier(),
"ExtraTree Cls": ExtraTreesClassifier(n_jobs=-1),
"Hist GB Cls" : HistGradientBoostingClassifier(),
"MLP Cls." : MLPClassifier(alpha=1),
"XGBoost" : xgb.XGBClassifier(max_depth=4, n_estimators=10, learning_rate=0.1, n_jobs=-1),
"CatBoost" : CatBoostClassifier(silent=True),
"Naive Bayes" : GaussianNB(),
"QDA" : QuadraticDiscriminantAnalysis(),
"Linear SVC" : LinearSVC(),
}
from time import time
k = 14
head = list(classifiers.items())[:k]
for name, classifier in tqdm(head):
start = time()
classifier.fit(self.X_train, self.y_train)
train_time = time() - start
start = time()
predictions = classifier.predict(self.X_test)
predict_time = time()-start
acc_score= (accuracy_score(self.y_test,predictions))
roc_score= (roc_auc_score(self.y_test,predictions))
f1_macro= (f1_score(self.y_test, predictions, average='macro'))
print("{:<15}| acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f} | Train time = {:,.3f}s | Pred. time = {:,.3f}s".format(name, acc_score, roc_score, f1_macro, train_time, predict_time))
button_1.on_click(on_button_clicked)
a = widgets.VBox([button_1, out])
display(a)
with out2:
base_model = widgets.Dropdown(
options=['Logistic Regression', 'KNN', 'Decision Tree', 'Random Forest', 'MLP Classifier', 'AdaBoost', 'CatBoost', 'GB Classifier', 'ExtraTree Cls', 'Hist GB Cls' ],
value='Logistic Regression',
description='Choose Base Model: ',
style = {'description_width': 'initial'},
disabled=False)
display(base_model)
button_2 = widgets.Button(description = 'Predict Baseline models')
out2 = widgets.Output()
def on_pred_button_clicked(_):
with out2:
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, HistGradientBoostingClassifier
from catboost import CatBoostClassifier
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
clear_output()
print(base_model.value)
if base_model.value == 'Logistic Regression':
classifier = LogisticRegression(max_iter=1000, n_jobs=-1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('> Prediction completed. \n> Use dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'KNN':
classifier = KNeighborsClassifier(3, n_jobs=-1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Decision Tree':
classifier = DecisionTreeClassifier(max_depth=7)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Random Forest':
classifier = RandomForestClassifier(max_depth=7, n_estimators=10, max_features=4, n_jobs=-1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'MLP Classifier':
classifier = MLPClassifier(alpha=1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'AdaBoost':
classifier = AdaBoostClassifier()
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'CatBoost':
classifier = CatBoostClassifier(silent=True)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'GB Classifier':
classifier = GradientBoostingClassifier()
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
self.predict_proba_baseline = classifier.predict_proba(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline (for predictions) & blm.predict_proba_baseline (for predict_proba), where blm is pywedge_baseline_model class object')
if base_model.value == 'ExtraTree Cls':
classifier = ExtraTreesClassifier(n_jobs=-1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
self.predict_proba_baseline = classifier.predict_proba(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline (for predictions) & blm.predict_proba_baseline (for predict_proba), where blm is pywedge_baseline_model class object')
if base_model.value == 'Hist GB Cls':
classifier = HistGradientBoostingClassifier()
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
self.predict_proba_baseline = classifier.predict_proba(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline (for predictions) & blm.predict_proba_baseline (for predict_proba), where blm is pywedge_baseline_model class object')
button_2.on_click(on_pred_button_clicked)
b = widgets.VBox([button_2, out2])
display(b)
def Regression_summary(self):
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button
from IPython.display import display, Markdown, clear_output
header = widgets.HTML(value="<h2>Pywedge Baseline Models </h2>")
display(header)
out1 = widgets.Output()
out2 = widgets.Output()
tab = widgets.Tab(children = [out1, out2])
tab.set_title(0,'Baseline Models')
tab.set_title(1, 'Predict Baseline Model')
display(tab)
with out1:
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button
from IPython.display import display, Markdown, clear_output
header = widgets.HTML(value="<h2>Pre_processing </h2>")
display(header)
import pandas as pd
cat_info = widgets.Dropdown(
options = [('cat_codes', '1'), ('get_dummies', '2')],
value = '1',
description = 'Select categorical conversion',
style = {'description_width': 'initial'},
disabled=False)
std_scr = widgets.Dropdown(
options = [('StandardScalar', '1'), ('RobustScalar', '2'), ('MinMaxScalar', '3'), ('No Standardization', 'n')],
value = 'n',
description = 'Select Standardization methods',
style = {'description_width': 'initial'},
disabled=False)
apply_smote = widgets.Dropdown(
options = [('Yes', 'y'), ('No', 'n')],
value = 'y',
description = 'Do you want to apply SMOTE?',
style = {'description_width': 'initial'},
disabled=False)
pp_class = widgets.VBox([cat_info, std_scr, apply_smote])
pp_reg = widgets.VBox([cat_info, std_scr])
if self.type == 'Classification':
display(pp_class)
else:
display(pp_reg)
test_size = widgets.BoundedFloatText(
value=0.20,
min=0.05,
max=0.5,
step=0.05,
description='Text Size %',
disabled=False)
display(test_size)
button_1 = widgets.Button(description = 'Run Baseline models')
out = widgets.Output()
def on_button_clicked(_):
with out:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
print('Starting regression summary...')
print('TOP 10 FEATURE IMPORTANCE TABLE')
from sklearn.ensemble import AdaBoostRegressor
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
ab = AdaBoostRegressor().fit(self.new_X, self.new_y)
print(pd.Series(ab.feature_importances_, index=self.new_X.columns).sort_values(ascending=False).head(10))
from sklearn.model_selection import train_test_split
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=test_size.value, random_state=1)
from time import time
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.svm import LinearSVR
from sklearn.linear_model import Lasso, Ridge
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, ExtraTreesRegressor, HistGradientBoostingRegressor
from catboost import CatBoostRegressor
from sklearn.neural_network import MLPRegressor
import xgboost as xgb
from math import sqrt
from tqdm.notebook import trange, tqdm
import warnings
warnings.filterwarnings('ignore')
print('--------------------------LINEAR MODELS---------------------------------')
lin_regressors = {
'Linear Reg' : LinearRegression(n_jobs=-1),
'KNN' : KNeighborsRegressor(n_jobs=-1),
'LinearSVR' : LinearSVR(),
'Lasso' : Lasso(),
'Ridge' : Ridge(),
}
from time import time
k = 10
head = list(lin_regressors.items())[:k]
for name, lin_regressors in tqdm(head):
start = time()
lin_regressors.fit(self.X_train, self.y_train)
train_time = time() - start
start = time()
predictions = lin_regressors.predict(self.X_test)
predict_time = time()-start
exp_var = explained_variance_score(self.y_test, predictions)
mae = mean_absolute_error(self.y_test, predictions)
rmse = sqrt(mean_absolute_error(self.y_test, predictions))
r2 = r2_score(self.y_test, predictions)
print("{:<15}| exp_var = {:.3f} | mae = {:,.3f} | rmse = {:,.3f} | r2 = {:,.3f} | Train time = {:,.3f}s | Pred. time = {:,.3f}s".format(name, exp_var, mae, rmse, r2, train_time, predict_time))
print('------------------------NON LINEAR MODELS----------------------------------')
print('---------------------THIS MIGHT TAKE A WHILE-------------------------------')
non_lin_regressors = {
#'SVR' : SVR(),
'Decision Tree' : DecisionTreeRegressor(max_depth=5),
'Random Forest' : RandomForestRegressor(max_depth=10, n_jobs=-1),
'GB Regressor' : GradientBoostingRegressor(n_estimators=200),
'CB Regressor' : CatBoostRegressor(silent=True),
'ADAB Regressor': AdaBoostRegressor(),
'MLP Regressor' : MLPRegressor(),
'XGB Regressor' : xgb.XGBRegressor(n_jobs=-1),
'Extra tree Reg': ExtraTreesRegressor(n_jobs=-1),
'Hist GB Reg' : HistGradientBoostingRegressor()
}
from time import time
k = 10
head = list(non_lin_regressors.items())[:k]
for name, non_lin_regressors in tqdm(head):
start = time()
non_lin_regressors.fit(self.X_train, self.y_train)
train_time = time() - start
start = time()
predictions = non_lin_regressors.predict(self.X_test)
predict_time = time()-start
exp_var = explained_variance_score(self.y_test, predictions)
mae = mean_absolute_error(self.y_test, predictions)
rmse = sqrt(mean_absolute_error(self.y_test, predictions))
r2 = r2_score(self.y_test, predictions)
print("{:<15}| exp_var = {:.3f} | mae = {:,.3f} | rmse = {:,.3f} | r2 = {:,.3f} | Train time = {:,.3f}s | Pred. time = {:,.3f}s".format(name, exp_var, mae, rmse, r2, train_time, predict_time))
button_1.on_click(on_button_clicked)
a = widgets.VBox([button_1, out])
display(a)
with out2:
base_model = widgets.Dropdown(
options=['Linear Regression', 'KNN', 'Decision Tree', 'Random Forest', 'MLP Regressor', 'AdaBoost', 'Grad-Boost''CatBoost'],
value='Linear Regression',
description='Choose Base Model: ',
style = {'description_width': 'initial'},
disabled=False)
display(base_model)
button_2 = widgets.Button(description = 'Predict Baseline models')
out2 = widgets.Output()
def on_pred_button_clicked(_):
with out2:
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor
from catboost import CatBoostRegressor
from sklearn.neural_network import MLPRegressor
import xgboost as xgb
clear_output()
print(base_model.value)
if base_model.value == 'Linear Regression':
regressor = LinearRegression()
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'KNN':
regressor = KNeighborsRegressor()
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Decision Tree':
regressor = DecisionTreeRegressor(max_depth=5)
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Random Forest':
regressor = RandomForestRegressor(max_depth=10)
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'MLP Regressor':
regressor = MLPRegressor()
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'AdaBoost':
regressor = AdaBoostRegressor()
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Grad-Boost':
regressor = GradientBoostingRegressor(n_estimators=200)
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'CatBoost':
regressor = CatBoostRegressor(silent=True)
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
button_2.on_click(on_pred_button_clicked)
b = widgets.VBox([button_2, out2])
display(b)
class Pywedge_HP():
'''
Creates interative widget based Hyperparameter selection tool for both Classification & Regression.
For Classification, following baseline estimators are covered in Gridsearch & Randomized search options
1) Logistic Regression
2) Decision Tree
3) Random Forest
4) KNN Classifier
For Regression, following baseline estimators are covered in Gridsearch & Randomized search options
1) Linear Regression
2) Decision Tree Regressor
3) Random Forest Regressor
4) KNN Regressor
Inputs:
1) train = train dataframe
2) test = stand out test dataframe (without target column)
3) c = any redundant column to be removed (like ID column etc., at present supports a single column removal,
subsequent version will provision multiple column removal requirements)
4) y = target column name as a string
Ouputs:
1) Hyperparameter tuning results
2) Prediction on standout test dataset
'''
def __init__(self, train, test, c, y, tracking=False):
self.train = train
self.test = test
self.c = c
self.y = y
self.X = train.drop(self.y,1)
self.tracking = tracking
def HP_Tune_Classification(self):
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button, Label
from ipywidgets import interact_manual, interactive, interact
import logging
from IPython.display import display, Markdown, clear_output
import warnings
warnings.filterwarnings('ignore')
header_1 = widgets.HTML(value="<h2>Pywedge HP_Tune</h2>")
display(header_1)
out1 = widgets.Output()
out2 = widgets.Output()
out3 = widgets.Output()
tab = widgets.Tab(children = [out1, out2, out3])
tab.set_title(0, 'Input')
tab.set_title(1, 'Output')
tab.set_title(2, 'Helper Page')
display(tab)
with out1:
header = widgets.HTML(value="<h3>Base Estimator</h3>")
display(header)
import pandas as pd
cat_info = widgets.Dropdown(
options = [('cat_codes', '1'), ('get_dummies', '2')],
value = '1',
description = 'Select categorical conversion',
style = {'description_width': 'initial'},
disabled=False)
std_scr = widgets.Dropdown(
options = [('StandardScalar', '1'), ('RobustScalar', '2'), ('MinMaxScalar', '3'), ('No Standardization', 'n')],
value = 'n',
description = 'Select Standardization methods',
style = {'description_width': 'initial'},
disabled=False)
apply_smote = widgets.Dropdown(
options = [('Yes', 'y'), ('No', 'n')],
value = 'y',
description = 'Do you want to apply SMOTE?',
style = {'description_width': 'initial'},
disabled=False)
pp_class = widgets.HBox([cat_info, std_scr, apply_smote])
header_2 = widgets.HTML(value="<h3>Pre_processing </h3>")
base_estimator = widgets.Dropdown(
options=['Logistic Regression', 'Decision Tree', 'Random Forest','AdaBoost', 'ExtraTree Classifier', 'KNN Classifier'],
value='Logistic Regression',
description='Choose Base Estimator: ',
style = {'description_width': 'initial'},
disabled=False)
display(base_estimator)
button = widgets.Button(description='Select Base Estimator')
out = widgets.Output()
# Logistic Regression Hyperparameters _Start
penalty_L = widgets.SelectMultiple(
options = ['l1', 'l2', 'elasticnet', 'none'],
value = ['none'],
rows = 4,
description = 'Penalty',
disabled = False)
dual_L = widgets.SelectMultiple(
options = [True, False],
value = [False],
rows = 2,
description = 'Dual',
disabled = False)
tol_L = widgets.Text(
value='0.0001',
placeholder='enter any float value',
description='Tolerence (tol)',
style = {'description_width': 'initial'},
disabled=False)
g = widgets.HBox([penalty_L, dual_L, tol_L])
C_L = widgets.Text(
value='1.0',
placeholder='enter any float value',
description='C',
disabled=False)
fit_intercept_L = widgets.SelectMultiple(
options = [True, False],
value = [False],
rows = 2,
description = 'Fit_intercept',
disabled = False)
intercept_scaling_L = widgets.Text(
value='1.0',
placeholder='enter any float value',
description='Intercept_scaling',
style = {'description_width': 'initial'},
disabled=False)
h = widgets.HBox([C_L, fit_intercept_L, intercept_scaling_L])
class_weight_L = widgets.SelectMultiple(
options = ['balanced', 'None'],
value = ['None'],
rows = 2,
description = 'Class_weight',
disabled = False)
random_state_L = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
style = {'description_width': 'initial'},
disabled=False)
solver_L = widgets.SelectMultiple(
options = ['newton-cg', 'lbfgs', 'sag', 'saga'],
value = ['lbfgs'],
rows = 4,
description = 'Solver',
disabled = False)
i= widgets.HBox([class_weight_L, random_state_L, solver_L])
max_iter_L = widgets.Text(
value='100',
placeholder='enter any integer value',
description='Max_Iterations',
style = {'description_width': 'initial'},
disabled=False)
verbose_L = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Verbose',
disabled=False)
warm_state_L = widgets.SelectMultiple(
options = [True, False],
value = [False],
rows = 2,
description = 'Warm_State',
disabled = False)
j= widgets.HBox([max_iter_L, verbose_L, warm_state_L])
L1_Ratio_L = widgets.Text(
value='None',
placeholder='enter any integer value',
description='L1_Ratio',
style = {'description_width': 'initial'},
disabled=False)
k = widgets.HBox([L1_Ratio_L])
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_L = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_L = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_L = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_L, cv_L, scoring_L])
n_iter_L = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_jobs_L = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_jobs_L, n_iter_L, n_iter_text])
null = widgets.HTML('<br></br>')
button_2 = widgets.Button(description='Submit HP_Tune')
out_res = widgets.Output()
def on_out_res_clicked(_):
with out_res:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
param_grid = {'penalty': list(penalty_L.value),
'dual': list(dual_L.value),
'tol': [float(item) for item in tol_L.value.split(',')],
'C' : [float(item) for item in C_L.value.split(',')],
'fit_intercept' : list(fit_intercept_L.value),
'intercept_scaling' : [float(item) for item in intercept_scaling_L.value.split(',')],
'class_weight' : list(class_weight_L.value),
'random_state' : [int(item) for item in random_state_L.value.split(',')],
'solver' : list(solver_L.value),
'max_iter' : [float(item) for item in max_iter_L.value.split(',')],
# 'multi_class' : list(multiclass.value),
'verbose' : [float(item) for item in verbose_L.value.split(',')],
# 'n_jobs' : [float(item) for item in n_jobs.value.split(',')]
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
warnings.filterwarnings("ignore")
estimator = LogisticRegression()
if search_param_L.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_L.value),
scoring = scoring_L.value)
if search_param_L.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_iter = int(n_iter_L.value),
n_jobs = int(n_jobs_L.value),
scoring = scoring_L.value)
with mlflow.start_run() as run:
warnings.filterwarnings("ignore")
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
mlflow.log_param("acc_score", acc_score)
mlflow.log_param("roc_score", roc_score)
mlflow.log_param("f1_macro", f1_macro)
mlflow.log_param("Best Estimator", self.classifier.best_estimator_)
if self.tracking == False:
estimator = LogisticRegression()
if search_param_L.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
scoring = scoring_L.value)
if search_param_L.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_iter = int(n_iter_L.value),
scoring = scoring_L.value)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from ipywidgets import interact, interactive
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.classifier.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.classifier.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f}".format(acc_score, roc_score, f1_macro))
Pred = widgets.HTML(value='<h3><em>Predictions on stand_out test data</em></h3>')
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_2.on_click(on_out_res_clicked)
b = widgets.VBox([button_2, out_res])
h1 = widgets.HTML('<h3>Select Logistic Regression Hyperparameters</h3>')
aa = widgets.VBox([header_2, pp_class, h1, g,h,i,j,k, h5, l, m, null, b])
# Logistic Regression Hyperpameter - Ends
# Decision Tree Hyperparameter - Starts
criterion_D = widgets.SelectMultiple(
options = ['gini', 'entropy'],
value = ['gini'],
description = 'Criterion',
rows = 2,
disabled = False)
splitter_D = widgets.SelectMultiple(
options = ['best', 'random'],
value = ['best'],
rows = 2,
description = 'Splitter',
disabled = False)
max_depth_D = widgets.Text(
value='5',
placeholder='enter any integer value',
description='Max_Depth',
disabled=False)
min_samples_split_D = widgets.Text(
value='2',
placeholder='enter any integer value',
description='min_samples_split',
style = {'description_width': 'initial'},
disabled=False)
min_samples_leaf_D = widgets.Text(
value='1',
placeholder='enter any integer value',
description='min_samples_leaf',
style = {'description_width': 'initial'},
disabled=False)
min_weight_fraction_D = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='min_weight_fraction',
style = {'description_width': 'initial'},
disabled=False)
max_features_D = widgets.SelectMultiple(
options = ['auto', 'sqrt', 'log2'],
value = ['auto'],
description = 'Max_Features',
style = {'description_width': 'initial'},
rows = 3,
disabled = False)
random_state_D = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
disabled=False)
max_leaf_nodes_D = widgets.Text(
value='2',
placeholder='enter any integer value',
description='Max_leaf_nodes',
style = {'description_width': 'initial'},
disabled=False)
min_impurity_decrease_D = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='Min_impurity_decrease',
style = {'description_width': 'initial'},
disabled=False)
class_weight_D = widgets.SelectMultiple(
options = ['balanced', 'None'],
value = ['balanced'],
rows = 2,
description = 'Class_weight',
style = {'description_width': 'initial'},
disabled = False)
ccp_alpha_D = widgets.Text(
value='0.0',
placeholder='enter any non-negative float value',
description='ccp_alpha',
disabled=False)
first_row = widgets.HBox([criterion_D, splitter_D, max_features_D])
second_row = widgets.HBox([min_samples_split_D, min_weight_fraction_D, max_depth_D])
third_row = widgets.HBox([random_state_D, max_leaf_nodes_D, min_impurity_decrease_D])
fourth_row = widgets.HBox([ccp_alpha_D, class_weight_D, min_samples_leaf_D])
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_D = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_D = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_D = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_D, cv_D, scoring_D])
n_iter_D = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_jobs_D = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_jobs_D, n_iter_D, n_iter_text])
button_3 = widgets.Button(description='Submit HP_Tune')
out_res_DT = widgets.Output()
def on_out_res_clicked_DT(_):
with out_res_DT:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
# print(criterion_D.value)
param_grid = {'criterion': list(criterion_D.value),
'splitter': list(splitter_D.value),
'max_depth': [int(item) for item in max_depth_D.value.split(',')],
'min_samples_split' : [int(item) for item in min_samples_split_D.value.split(',')],
'min_samples_leaf' : [int(item) for item in min_samples_leaf_D.value.split(',')],
# 'min_weight_fraction' : [float(item) for item in min_weight_fraction.value.split(',')],
'max_features' : list(max_features_D.value),
'random_state' : [int(item) for item in random_state_D.value.split(',')],
'max_leaf_nodes' : [int(item) for item in max_leaf_nodes_D.value.split(',')],
'min_impurity_decrease' : [float(item) for item in min_impurity_decrease_D.value.split(',')],
'ccp_alpha' : [float(item) for item in ccp_alpha_D.value.split(',')],
'class_weight' : list(class_weight_D.value)
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
warnings.filterwarnings("ignore")
estimator = DecisionTreeClassifier()
if search_param_D.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_D.value),
n_jobs = int(n_jobs_D.value),
scoring = scoring_D.value)
if search_param_D.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_D.value),
n_jobs = int(n_jobs_D.value),
n_iter = int(n_iter_D.value),
scoring = scoring_D.value)
with mlflow.start_run() as run:
warnings.filterwarnings("ignore", category=Warning)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
mlflow.log_param("acc_score", acc_score)
mlflow.log_param("roc_score", roc_score)
mlflow.log_param("f1_macro", f1_macro)
mlflow.log_param("Best Estimator", self.classifier.best_estimator_)
if self.tracking == False:
estimator = DecisionTreeClassifier()
if search_param_D.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
n_jobs = int(n_jobs_D.value),
cv = int(cv_D.value),
scoring = scoring_D.value)
if search_param_D.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_D.value),
n_jobs = int(n_jobs_D.value),
n_iter = int(n_iter_D.value),
scoring = scoring_D.value)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.classifier.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.classifier.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f}".format(acc_score, roc_score, f1_macro))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_2.on_click(on_out_res_clicked)
button_3.on_click(on_out_res_clicked_DT)
b = widgets.VBox([button_3, out_res_DT])
h1 = widgets.HTML('<h3>Select Decision Tree Hyperparameters</h3>')
frame = widgets.VBox([header_2, pp_class, h1, first_row, second_row, third_row, fourth_row, h5, l, m, b])
# Decision Tree Hyperparameter Ends
# Random Forest Hyperparameter Starts
n_estimators_R = widgets.Text(
value='100',
placeholder='enter any integer value',
description='n_estimators',
disabled=False)
criterion_R = widgets.SelectMultiple(
options = ['gini', 'entropy'],
value = ['gini'],
rows = 2,
description = 'Criterion',
disabled = False)
max_depth_R = widgets.Text(
value='5',
placeholder='enter any integer value',
description='Max_Depth',
disabled=False)
min_samples_split_R = widgets.Text(
value='2',
placeholder='enter any integer value',
description='min_samples_split',
style = {'description_width': 'initial'},
disabled=False)
min_samples_leaf_R = widgets.Text(
value='1',
placeholder='enter any integer value',
description='min_samples_leaf',
style = {'description_width': 'initial'},
disabled=False)
min_weight_fraction_leaf_R = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='min_weight_fraction',
style = {'description_width': 'initial'},
disabled=False)
max_features_R = widgets.SelectMultiple(
options = ['auto', 'sqrt', 'log2'],
value = ['auto'],
description = 'Max_Features',
style = {'description_width': 'initial'},
rows = 3,
disabled = False)
random_state_R = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
style = {'description_width': 'initial'},
disabled=False)
max_leaf_nodes_R = widgets.Text(
value='2',
placeholder='enter any integer value',
description='Max_leaf_nodes',
style = {'description_width': 'initial'},
disabled=False)
min_impurity_decrease_R = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='Min_impurity_decrease',
style = {'description_width': 'initial'},
disabled=False)
bootstrap_R = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'Bootstrap',
rows = 2,
disabled = False)
oob_score_R = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'oob_score',
rows = 2,
disabled = False)
verbose_R = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Verbose',
disabled=False)
warm_state_R = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'Warm_State',
style = {'description_width': 'initial'},
rows = 2,
disabled = False)
class_weight_R = widgets.SelectMultiple(
options = ['balanced', 'balanced_subsample', 'None'],
value = ['balanced'],
description = 'Class_weight',
rows = 3,
style = {'description_width': 'initial'},
disabled = False)
ccp_alpha_R = widgets.Text(
value='0.0',
placeholder='enter any non-negative float value',
description='ccp_alpha',
disabled=False)
max_samples_R = widgets.Text(
value='2',
placeholder='enter any float value',
description='max_samples',
style = {'description_width': 'initial'},
disabled=False)
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_R = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_R = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_R = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_R, cv_R, scoring_R])
n_jobs_R = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_R = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_jobs_R, n_iter_R, n_iter_text])
first_row = widgets.HBox([n_estimators_R, criterion_R, max_depth_R])
second_row = widgets.HBox([min_samples_split_R, min_samples_leaf_R, min_weight_fraction_leaf_R])
third_row = widgets.HBox([max_features_R, max_leaf_nodes_R, min_impurity_decrease_R])
fourth_row = widgets.HBox([max_samples_R, bootstrap_R, oob_score_R])
fifth_row = widgets.HBox([warm_state_R, random_state_R, verbose_R])
sixth_row = widgets.HBox([class_weight_R, ccp_alpha_R])
button_4 = widgets.Button(description='Submit RF GridSearchCV')
out_res_RF = widgets.Output()
def on_out_res_clicked_RF(_):
with out_res_RF:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
param_grid = {'n_estimators' : [int(item) for item in n_estimators_R.value.split(',')],
'criterion': list(criterion_R.value),
'max_depth': [int(item) for item in max_depth_R.value.split(',')],
'min_samples_split' : [int(item) for item in min_samples_split_R.value.split(',')],
'min_samples_leaf' : [int(item) for item in min_samples_leaf_R.value.split(',')],
'min_weight_fraction_leaf' : [float(item) for item in min_weight_fraction_leaf_R.value.split(',')],
'max_features' : list(max_features_R.value),
'random_state' : [int(item) for item in random_state_R.value.split(',')],
'max_leaf_nodes' : [int(item) for item in max_leaf_nodes_R.value.split(',')],
'min_impurity_decrease' : [float(item) for item in min_impurity_decrease_R.value.split(',')],
'bootstrap' : list(bootstrap_R.value),
'oob_score' : list(oob_score_R.value),
'verbose' : [int(item) for item in verbose_R.value.split(',')],
'class_weight' : list(class_weight_R.value),
'ccp_alpha' : [float(item) for item in ccp_alpha_R.value.split(',')],
'max_samples' : [int(item) for item in max_samples_R.value.split(',')]
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
estimator = RandomForestClassifier()
if search_param_R.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_R.value),
scoring = scoring_L.value)
if search_param_R.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_R.value),
n_iter = int(n_iter_L.value),
scoring = scoring_L.value)
with mlflow.start_run() as run:
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
mlflow.log_param("acc_score", acc_score)
mlflow.log_param("roc_score", roc_score)
mlflow.log_param("f1_macro", f1_macro)
mlflow.log_param("Best Estimator", self.classifier.best_estimator_)
if self.tracking == False:
estimator = RandomForestClassifier()
if search_param_R.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
n_jobs = int(n_jobs_R.value),
cv = int(cv_L.value),
scoring = scoring_L.value)
if search_param_R.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_R.value),
n_iter = int(n_iter_L.value),
scoring = scoring_L.value)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.classifier.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.classifier.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f}".format(acc_score, roc_score, f1_macro))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_4.on_click(on_out_res_clicked_RF)
b = widgets.VBox([button_4, out_res_RF])
h1 = widgets.HTML('<h3>Select Random Forest Hyperparameters</h3>')
frame_RF = widgets.VBox([header_2, pp_class, h1, first_row, second_row, third_row, fourth_row, fifth_row, sixth_row, h5, l, m, b])
# Random Forest Hyperparameter ends
# KNN Classifier Hyperparameter Starts
n_neighbors_k = widgets.Text(
value='5',
placeholder='enter any integer value',
description='n_neighbors',
disabled=False)
weights_k = widgets.SelectMultiple(
options = ['uniform', 'distance'],
value = ['uniform'],
rows = 2,
description = 'Weights',
disabled = False)
algorithm_k = widgets.SelectMultiple(
options = ['auto', 'ball_tree', 'kd_tree', 'brute'],
value = ['auto'],
rows = 4,
description = 'Algorithm',
disabled = False)
leaf_size_k = widgets.Text(
value='30',
placeholder='enter any integer value',
description='Leaf_Size',
disabled=False)
p_k = widgets.Text(
value='2',
placeholder='enter any integer value',
description='p (Power param)',
disabled=False)
metric_k = widgets.SelectMultiple(
options = ['euclidean', 'manhattan', 'chebyshev', 'minkowski'],
value = ['minkowski'],
rows = 4,
description = 'Metric',
disabled = False)
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_K = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_K = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_K = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_K, cv_K, scoring_K])
n_iter_K = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_jobs_K = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_iter_K, n_iter_text])
first_row = widgets.HBox([n_neighbors_k, weights_k, algorithm_k])
second_row = widgets.HBox([leaf_size_k, p_k, metric_k])
button_5 = widgets.Button(description='Submit RF GridSearchCV')
out_res_K = widgets.Output()
def on_out_res_clicked_K(_):
with out_res_K:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
# print(n_neighbors_k.value)
param_grid = {'n_neighbors' : [int(item) for item in n_neighbors_k.value.split(',')],
'weights': list(weights_k.value),
'algorithm': list(algorithm_k.value),
'leaf_size' : [int(item) for item in leaf_size_k.value.split(',')],
'p' : [int(item) for item in p_k.value.split(',')],
'metric' : list(metric_k.value),
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
estimator = KNeighborsClassifier()
if search_param_K.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_K.value),
scoring = scoring_K.value)
if search_param_K.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_K.value),
n_iter = int(n_iter_K.value),
n_jobs = int(n_jobs_K.value),
scoring = scoring_K.value)
with mlflow.start_run() as run:
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
mlflow.log_param("acc_score", acc_score)
mlflow.log_param("roc_score", roc_score)
mlflow.log_param("f1_macro", f1_macro)
mlflow.log_param("Best Estimator", self.classifier.best_estimator_)
if self.tracking == False:
estimator = KNeighborsClassifier()
if search_param_K.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
n_jobs = int(n_jobs_K.value),
cv = int(cv_K.value),
scoring = scoring_K.value)
if search_param_K.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_K.value),
n_jobs = int(n_jobs_K.value),
n_iter = int(n_iter_K.value),
scoring = scoring_K.value)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.classifier.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.classifier.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f}".format(acc_score, roc_score, f1_macro))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_5.on_click(on_out_res_clicked_K)
b = widgets.VBox([button_5, out_res_K])
h1 = widgets.HTML('<h3>Select KNN Classifier Hyperparameters</h3>')
frame_K = widgets.VBox([header_2, pp_class, h1, first_row, second_row, h5, l, m, b])
#KNN Classifier Hyperparameter ends
# Adaboost Classifier Hyperparameter Starts
n_estimators_A = widgets.Text(
value='50',
placeholder='enter any integer value',
description='n_estimators',
disabled=False)
learning_rate_A = widgets.Text(
value='1',
placeholder='enter any float value',
description='learning_rate',
disabled=False)
algorithm_A = widgets.SelectMultiple(
options = ['SAMME', 'SAMME.R'],
value = ['SAMME.R'],
rows = 2,
description = 'Algorithm',
disabled = False)
random_state_A = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
style = {'description_width': 'initial'},
disabled=False)
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_A = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_A = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_A = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_A, cv_A, scoring_A])
n_jobs_A = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_A = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_jobs_A, n_iter_A, n_iter_text])
first_row = widgets.HBox([n_estimators_A, learning_rate_A, algorithm_A])
second_row = widgets.HBox([random_state_A])
button_6 = widgets.Button(description='Submit Adaboost HPTune')
out_res_ADA = widgets.Output()
def on_out_res_clicked_ADA(_):
with out_res_ADA:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y=
|
pd.DataFrame(self.new_y, columns=new_y_cols)
|
pandas.DataFrame
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
try:
import fastparquet as fp
except ImportError: # pragma: no cover
fp = None
from .... import dataframe as md
from .... import tensor as mt
from ...datasource.read_csv import DataFrameReadCSV
from ...datasource.read_sql import DataFrameReadSQL
from ...datasource.read_parquet import DataFrameReadParquet
@pytest.mark.parametrize('chunk_size', [2, (2, 3)])
def test_set_index(setup, chunk_size):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=chunk_size)
expected = df1.set_index('y', drop=True)
df3 = df2.set_index('y', drop=True)
pd.testing.assert_frame_equal(
expected, df3.execute().fetch())
expected = df1.set_index('y', drop=False)
df4 = df2.set_index('y', drop=False)
pd.testing.assert_frame_equal(
expected, df4.execute().fetch())
expected = df1.set_index('y')
df2.set_index('y', inplace=True)
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
def test_iloc_getitem(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1.iloc[1]
df3 = df2.iloc[1]
result = df3.execute(extra_config={'check_series_name': False}).fetch()
pd.testing.assert_series_equal(
expected, result)
# plain index on axis 1
expected = df1.iloc[:2, 1]
df4 = df2.iloc[:2, 1]
pd.testing.assert_series_equal(
expected, df4.execute().fetch())
# slice index
expected = df1.iloc[:, 2:4]
df5 = df2.iloc[:, 2:4]
pd.testing.assert_frame_equal(
expected, df5.execute().fetch())
# plain fancy index
expected = df1.iloc[[0], [0, 1, 2]]
df6 = df2.iloc[[0], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, df6.execute().fetch())
# plain fancy index with shuffled order
expected = df1.iloc[[0], [1, 2, 0]]
df7 = df2.iloc[[0], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, df7.execute().fetch())
# fancy index
expected = df1.iloc[[1, 2], [0, 1, 2]]
df8 = df2.iloc[[1, 2], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, df8.execute().fetch())
# fancy index with shuffled order
expected = df1.iloc[[2, 1], [1, 2, 0]]
df9 = df2.iloc[[2, 1], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, df9.execute().fetch())
# one fancy index
expected = df1.iloc[[2, 1]]
df10 = df2.iloc[[2, 1]]
pd.testing.assert_frame_equal(
expected, df10.execute().fetch())
# plain index
expected = df1.iloc[1, 2]
df11 = df2.iloc[1, 2]
assert expected == df11.execute().fetch()
# bool index array
expected = df1.iloc[[True, False, True], [2, 1]]
df12 = df2.iloc[[True, False, True], [2, 1]]
pd.testing.assert_frame_equal(
expected, df12.execute().fetch())
# bool index array on axis 1
expected = df1.iloc[[2, 1], [True, False, True]]
df14 = df2.iloc[[2, 1], [True, False, True]]
pd.testing.assert_frame_equal(
expected, df14.execute().fetch())
# bool index
expected = df1.iloc[[True, False, True], [2, 1]]
df13 = df2.iloc[md.Series([True, False, True], chunk_size=1), [2, 1]]
pd.testing.assert_frame_equal(
expected, df13.execute().fetch())
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3).iloc[:3]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[:3])
series = md.Series(data, chunk_size=3).iloc[4]
assert series.execute().fetch() == data.iloc[4]
series = md.Series(data, chunk_size=3).iloc[[2, 3, 4, 9]]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[[2, 3, 4, 9]])
series = md.Series(data, chunk_size=3).iloc[[4, 3, 9, 2]]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[[4, 3, 9, 2]])
series = md.Series(data).iloc[5:]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
series = md.Series(data).iloc[selection]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[selection])
# bool index
series = md.Series(data).iloc[md.Series(selection, chunk_size=4)]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[selection])
# test index
data = pd.Index(np.arange(10))
index = md.Index(data, chunk_size=3)[:3]
pd.testing.assert_index_equal(
index.execute().fetch(), data[:3])
index = md.Index(data, chunk_size=3)[4]
assert index.execute().fetch() == data[4]
index = md.Index(data, chunk_size=3)[[2, 3, 4, 9]]
pd.testing.assert_index_equal(
index.execute().fetch(), data[[2, 3, 4, 9]])
index = md.Index(data, chunk_size=3)[[4, 3, 9, 2]]
pd.testing.assert_index_equal(
index.execute().fetch(), data[[4, 3, 9, 2]])
index = md.Index(data)[5:]
pd.testing.assert_index_equal(
index.execute().fetch(), data[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
index = md.Index(data)[selection]
pd.testing.assert_index_equal(
index.execute().fetch(), data[selection])
index = md.Index(data)[mt.tensor(selection, chunk_size=4)]
pd.testing.assert_index_equal(
index.execute().fetch(), data[selection])
def test_iloc_setitem(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1
expected.iloc[1] = 100
df2.iloc[1] = 100
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# slice index
expected.iloc[:, 2:4] = 1111
df2.iloc[:, 2:4] = 1111
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# plain fancy index
expected.iloc[[0], [0, 1, 2]] = 2222
df2.iloc[[0], [0, 1, 2]] = 2222
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# fancy index
expected.iloc[[1, 2], [0, 1, 2]] = 3333
df2.iloc[[1, 2], [0, 1, 2]] = 3333
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# plain index
expected.iloc[1, 2] = 4444
df2.iloc[1, 2] = 4444
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3)
series.iloc[:3] = 1
data.iloc[:3] = 1
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[4] = 2
data.iloc[4] = 2
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[[2, 3, 4, 9]] = 3
data.iloc[[2, 3, 4, 9]] = 3
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[5:] = 4
data.iloc[5:] = 4
pd.testing.assert_series_equal(
series.execute().fetch(), data)
# test Index
data = pd.Index(np.arange(10))
index = md.Index(data, chunk_size=3)
with pytest.raises(TypeError):
index[5:] = 4
def test_loc_getitem(setup):
rs = np.random.RandomState(0)
# index and columns are labels
raw1 = pd.DataFrame(rs.randint(10, size=(5, 4)),
index=['a1', 'a2', 'a3', 'a4', 'a5'],
columns=['a', 'b', 'c', 'd'])
# columns are labels
raw2 = raw1.copy()
raw2.reset_index(inplace=True, drop=True)
# columns are non unique and monotonic
raw3 = raw1.copy()
raw3.columns = ['a', 'b', 'b', 'd']
# columns are non unique and non monotonic
raw4 = raw1.copy()
raw4.columns = ['b', 'a', 'b', 'd']
# index that is timestamp
raw5 = raw1.copy()
raw5.index = pd.date_range('2020-1-1', periods=5)
raw6 = raw1[:0]
df1 = md.DataFrame(raw1, chunk_size=2)
df2 = md.DataFrame(raw2, chunk_size=2)
df3 = md.DataFrame(raw3, chunk_size=2)
df4 = md.DataFrame(raw4, chunk_size=2)
df5 = md.DataFrame(raw5, chunk_size=2)
df6 = md.DataFrame(raw6)
df = df2.loc[3, 'b']
result = df.execute().fetch()
expected = raw2.loc[3, 'b']
assert result == expected
df = df1.loc['a3', 'b']
result = df.execute(extra_config={'check_shape': False}).fetch()
expected = raw1.loc['a3', 'b']
assert result == expected
# test empty list
df = df1.loc[[]]
result = df.execute().fetch()
expected = raw1.loc[[]]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[[]]
result = df.execute().fetch()
expected = raw2.loc[[]]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[1:4, 'b':'d']
result = df.execute().fetch()
expected = raw2.loc[1:4, 'b': 'd']
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:4, 'b':]
result = df.execute().fetch()
expected = raw2.loc[:4, 'b':]
pd.testing.assert_frame_equal(result, expected)
# slice on axis index whose index_value does not have value
df = df1.loc['a2':'a4', 'b':]
result = df.execute().fetch()
expected = raw1.loc['a2':'a4', 'b':]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:, 'b']
result = df.execute().fetch()
expected = raw2.loc[:, 'b']
pd.testing.assert_series_equal(result, expected)
# 'b' is non-unique
df = df3.loc[:, 'b']
result = df.execute().fetch()
expected = raw3.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# 'b' is non-unique, and non-monotonic
df = df4.loc[:, 'b']
result = df.execute().fetch()
expected = raw4.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# label on axis 0
df = df1.loc['a2', :]
result = df.execute().fetch()
expected = raw1.loc['a2', :]
pd.testing.assert_series_equal(result, expected)
# label-based fancy index
df = df2.loc[[3, 0, 1], ['c', 'a', 'd']]
result = df.execute().fetch()
expected = raw2.loc[[3, 0, 1], ['c', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index, asc sorted
df = df2.loc[[0, 1, 3], ['a', 'c', 'd']]
result = df.execute().fetch()
expected = raw2.loc[[0, 1, 3], ['a', 'c', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index in which non-unique exists
selection = rs.randint(2, size=(5,), dtype=bool)
df = df3.loc[selection, ['b', 'a', 'd']]
result = df.execute().fetch()
expected = raw3.loc[selection, ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
df = df3.loc[md.Series(selection), ['b', 'a', 'd']]
result = df.execute().fetch()
expected = raw3.loc[selection, ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index on index
# whose index_value does not have value
df = df1.loc[['a3', 'a1'], ['b', 'a', 'd']]
result = df.execute(extra_config={'check_nsplits': False}).fetch()
expected = raw1.loc[['a3', 'a1'], ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# get timestamp by str
df = df5.loc['20200101']
result = df.execute(extra_config={'check_series_name': False}).fetch(
extra_config={'check_series_name': False})
expected = raw5.loc['20200101']
pd.testing.assert_series_equal(result, expected)
# get timestamp by str, return scalar
df = df5.loc['2020-1-1', 'c']
result = df.execute().fetch()
expected = raw5.loc['2020-1-1', 'c']
assert result == expected
# test empty df
df = df6.loc[[]]
result = df.execute().fetch()
expected = raw6.loc[[]]
pd.testing.assert_frame_equal(result, expected)
def test_dataframe_getitem(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
data2 = data.copy()
data2.index = pd.date_range('2020-1-1', periods=10)
mdf = md.DataFrame(data2, chunk_size=3)
series1 = df['c2']
pd.testing.assert_series_equal(
series1.execute().fetch(), data['c2'])
series2 = df['c5']
pd.testing.assert_series_equal(
series2.execute().fetch(), data['c5'])
df1 = df[['c1', 'c2', 'c3']]
pd.testing.assert_frame_equal(
df1.execute().fetch(), data[['c1', 'c2', 'c3']])
df2 = df[['c3', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df2.execute().fetch(), data[['c3', 'c2', 'c1']])
df3 = df[['c1']]
pd.testing.assert_frame_equal(
df3.execute().fetch(), data[['c1']])
df4 = df[['c3', 'c1', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df4.execute().fetch(), data[['c3', 'c1', 'c2', 'c1']])
df5 = df[np.array(['c1', 'c2', 'c3'])]
pd.testing.assert_frame_equal(
df5.execute().fetch(), data[['c1', 'c2', 'c3']])
df6 = df[['c3', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df6.execute().fetch(), data[['c3', 'c2', 'c1']])
df7 = df[1:7:2]
pd.testing.assert_frame_equal(
df7.execute().fetch(), data[1:7:2])
series3 = df['c1'][0]
assert series3.execute().fetch() == data['c1'][0]
df8 = mdf[3:7]
pd.testing.assert_frame_equal(
df8.execute().fetch(), data2[3:7])
df9 = mdf['2020-1-2': '2020-1-5']
pd.testing.assert_frame_equal(
df9.execute().fetch(), data2['2020-1-2': '2020-1-5'])
def test_dataframe_getitem_bool(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
mask_data = data.c1 > 0.5
mask = md.Series(mask_data, chunk_size=2)
# getitem by mars series
assert df[mask].execute().fetch().shape == data[mask_data].shape
pd.testing.assert_frame_equal(
df[mask].execute().fetch(), data[mask_data])
# getitem by pandas series
pd.testing.assert_frame_equal(
df[mask_data].execute().fetch(), data[mask_data])
# getitem by mars series with alignment but no shuffle
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True],
index=range(9, -1, -1))
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch(), data[mask_data])
# getitem by mars series with shuffle alignment
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True],
index=[0, 3, 6, 2, 9, 8, 5, 7, 1, 4])
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch().sort_index(), data[mask_data])
# getitem by mars series with shuffle alignment and extra element
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True, False],
index=[0, 3, 6, 2, 9, 8, 5, 7, 1, 4, 10])
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch().sort_index(), data[mask_data])
# getitem by DataFrame with all bool columns
r = df[df > 0.5]
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, data[data > 0.5])
# getitem by tensor mask
r = df[(df['c1'] > 0.5).to_tensor()]
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, data[data['c1'] > 0.5])
def test_dataframe_getitem_using_attr(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'key', 'dtypes', 'size'])
df = md.DataFrame(data, chunk_size=2)
series1 = df.c2
pd.testing.assert_series_equal(
series1.execute().fetch(), data.c2)
# accessing column using attribute shouldn't overwrite existing attributes
assert df.key == getattr(getattr(df, '_data'), '_key')
assert df.size == data.size
pd.testing.assert_series_equal(df.dtypes, data.dtypes)
# accessing non-existing attributes should trigger exception
with pytest.raises(AttributeError):
_ = df.zzz # noqa: F841
def test_series_getitem(setup):
data = pd.Series(np.random.rand(10))
series = md.Series(data)
assert series[1].execute().fetch() == data[1]
data = pd.Series(np.random.rand(10), name='a')
series = md.Series(data, chunk_size=4)
for i in range(10):
series1 = series[i]
assert series1.execute().fetch() == data[i]
series2 = series[[0, 1, 2, 3, 4]]
pd.testing.assert_series_equal(
series2.execute().fetch(), data[[0, 1, 2, 3, 4]])
series3 = series[[4, 3, 2, 1, 0]]
pd.testing.assert_series_equal(
series3.execute().fetch(), data[[4, 3, 2, 1, 0]])
series4 = series[[1, 2, 3, 2, 1, 0]]
pd.testing.assert_series_equal(
series4.execute().fetch(), data[[1, 2, 3, 2, 1, 0]])
#
index = ['i' + str(i) for i in range(20)]
data = pd.Series(np.random.rand(20), index=index, name='a')
series = md.Series(data, chunk_size=3)
for idx in index:
series1 = series[idx]
assert series1.execute().fetch() == data[idx]
selected = ['i1', 'i2', 'i3', 'i4', 'i5']
series2 = series[selected]
pd.testing.assert_series_equal(
series2.execute().fetch(), data[selected])
selected = ['i4', 'i7', 'i0', 'i1', 'i5']
series3 = series[selected]
pd.testing.assert_series_equal(
series3.execute().fetch(), data[selected])
selected = ['i0', 'i1', 'i5', 'i4', 'i0', 'i1']
series4 = series[selected]
pd.testing.assert_series_equal(
series4.execute().fetch(), data[selected])
selected = ['i0']
series5 = series[selected]
pd.testing.assert_series_equal(
series5.execute().fetch(), data[selected])
data = pd.Series(np.random.rand(10,))
series = md.Series(data, chunk_size=3)
selected = series[:2]
pd.testing.assert_series_equal(
selected.execute().fetch(), data[:2])
selected = series[2:8:2]
pd.testing.assert_series_equal(
selected.execute().fetch(), data[2:8:2])
data = pd.Series(np.random.rand(9), index=['c' + str(i) for i in range(9)])
series = md.Series(data, chunk_size=3)
selected = series[:'c2']
pd.testing.assert_series_equal(
selected.execute().fetch(), data[:'c2'])
selected = series['c2':'c9']
pd.testing.assert_series_equal(
selected.execute().fetch(), data['c2':'c9'])
def test_head(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
pd.testing.assert_frame_equal(
df.head().execute().fetch(), data.head())
pd.testing.assert_frame_equal(
df.head(3).execute().fetch(), data.head(3))
pd.testing.assert_frame_equal(
df.head(-3).execute().fetch(), data.head(-3))
pd.testing.assert_frame_equal(
df.head(8).execute().fetch(), data.head(8))
pd.testing.assert_frame_equal(
df.head(-8).execute().fetch(), data.head(-8))
pd.testing.assert_frame_equal(
df.head(13).execute().fetch(), data.head(13))
pd.testing.assert_frame_equal(
df.head(-13).execute().fetch(), data.head(-13))
def test_tail(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
pd.testing.assert_frame_equal(
df.tail().execute().fetch(), data.tail())
pd.testing.assert_frame_equal(
df.tail(3).execute().fetch(), data.tail(3))
pd.testing.assert_frame_equal(
df.tail(-3).execute().fetch(), data.tail(-3))
pd.testing.assert_frame_equal(
df.tail(8).execute().fetch(), data.tail(8))
pd.testing.assert_frame_equal(
df.tail(-8).execute().fetch(), data.tail(-8))
pd.testing.assert_frame_equal(
df.tail(13).execute().fetch(), data.tail(13))
pd.testing.assert_frame_equal(
df.tail(-13).execute().fetch(), data.tail(-13))
def test_at(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
df = md.DataFrame(data, chunk_size=3)
data2 = data.copy()
data2.index = np.arange(10)
df2 = md.DataFrame(data2, chunk_size=3)
with pytest.raises(ValueError):
_ = df.at[['i3, i4'], 'c1']
result = df.at['i3', 'c1'].execute().fetch()
assert result == data.at['i3', 'c1']
result = df['c1'].at['i2'].execute().fetch()
assert result == data['c1'].at['i2']
result = df2.at[3, 'c2'].execute().fetch()
assert result == data2.at[3, 'c2']
result = df2.loc[3].at['c2'].execute().fetch()
assert result == data2.loc[3].at['c2']
def test_iat(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
df = md.DataFrame(data, chunk_size=3)
with pytest.raises(ValueError):
_ = df.iat[[1, 2], 3]
result = df.iat[3, 4].execute().fetch()
assert result == data.iat[3, 4]
result = df.iloc[:, 2].iat[3].execute().fetch()
assert result == data.iloc[:, 2].iat[3]
def test_setitem(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
data2 = np.random.rand(10)
data3 = np.random.rand(10, 2)
df = md.DataFrame(data, chunk_size=3)
df['c3'] = df['c3'] + 1
df['c10'] = 10
df[4] = mt.tensor(data2, chunk_size=4)
df['d1'] = df['c4'].mean()
df['e1'] = data2 * 2
result = df.execute().fetch()
expected = data.copy()
expected['c3'] = expected['c3'] + 1
expected['c10'] = 10
expected[4] = data2
expected['d1'] = data['c4'].mean()
expected['e1'] = data2 * 2
pd.testing.assert_frame_equal(result, expected)
# test set multiple cols with scalar
df = md.DataFrame(data, chunk_size=3)
df[['c0', 'c2']] = 1
df[['c1', 'c10']] = df['c4'].mean()
df[['c11', 'c12']] = mt.tensor(data3, chunk_size=4)
result = df.execute().fetch()
expected = data.copy()
expected[['c0', 'c2']] = 1
expected[['c1', 'c10']] = expected['c4'].mean()
expected[['c11', 'c12']] = data3
pd.testing.assert_frame_equal(result, expected)
# test set multiple rows
df = md.DataFrame(data, chunk_size=3)
df[['c1', 'c4', 'c10']] = df[['c2', 'c3', 'c4']] * 2
result = df.execute().fetch()
expected = data.copy()
expected[['c1', 'c4', 'c10']] = expected[['c2', 'c3', 'c4']] * 2
pd.testing.assert_frame_equal(result, expected)
# test setitem into empty DataFrame
df = md.DataFrame()
df['a'] = md.Series(np.arange(1, 11), chunk_size=3)
pd.testing.assert_index_equal(df.index_value.to_pandas(),
pd.RangeIndex(10))
result = df.execute().fetch()
expected = pd.DataFrame()
expected['a'] = pd.Series(np.arange(1, 11))
pd.testing.assert_frame_equal(result, expected)
df['b'] = md.Series(np.arange(2, 12), index=pd.RangeIndex(1, 11),
chunk_size=3)
result = df.execute().fetch()
expected['b'] = pd.Series(np.arange(2, 12), index=pd.RangeIndex(1, 11))
pd.testing.assert_frame_equal(result, expected)
def test_reset_index_execution(setup):
data = pd.DataFrame([('bird', 389.0),
('bird', 24.0),
('mammal', 80.5),
('mammal', np.nan)],
index=['falcon', 'parrot', 'lion', 'monkey'],
columns=('class', 'max_speed'))
df = md.DataFrame(data)
df2 = df.reset_index()
result = df2.execute().fetch()
expected = data.reset_index()
pd.testing.assert_frame_equal(result, expected)
df = md.DataFrame(data, chunk_size=2)
df2 = df.reset_index()
result = df2.execute().fetch()
expected = data.reset_index()
pd.testing.assert_frame_equal(result, expected)
df = md.DataFrame(data, chunk_size=1)
df2 = df.reset_index(drop=True)
result = df2.execute().fetch()
expected = data.reset_index(drop=True)
pd.testing.assert_frame_equal(result, expected)
index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
('bird', 'parrot'),
('mammal', 'lion'),
('mammal', 'monkey')],
names=['class', 'name'])
data = pd.DataFrame([('bird', 389.0),
('bird', 24.0),
('mammal', 80.5),
('mammal', np.nan)],
index=index,
columns=('type', 'max_speed'))
df = md.DataFrame(data, chunk_size=1)
df2 = df.reset_index(level='class')
result = df2.execute().fetch()
expected = data.reset_index(level='class')
pd.testing.assert_frame_equal(result, expected)
columns = pd.MultiIndex.from_tuples([('speed', 'max'), ('species', 'type')])
data.columns = columns
df = md.DataFrame(data, chunk_size=2)
df2 = df.reset_index(level='class', col_level=1, col_fill='species')
result = df2.execute().fetch()
expected = data.reset_index(level='class', col_level=1, col_fill='species')
pd.testing.assert_frame_equal(result, expected)
df = md.DataFrame(data, chunk_size=3)
df.reset_index(level='class', col_level=1, col_fill='species', inplace=True)
result = df.execute().fetch()
expected = data.reset_index(level='class', col_level=1, col_fill='species')
pd.testing.assert_frame_equal(result, expected)
# Test Series
s = pd.Series([1, 2, 3, 4], name='foo',
index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
series = md.Series(s)
s2 = series.reset_index(name='bar')
result = s2.execute().fetch()
expected = s.reset_index(name='bar')
pd.testing.assert_frame_equal(result, expected)
series = md.Series(s, chunk_size=2)
s2 = series.reset_index(drop=True)
result = s2.execute().fetch()
expected = s.reset_index(drop=True)
pd.testing.assert_series_equal(result, expected)
# Test Unknown shape
data1 = pd.DataFrame(np.random.rand(10, 3), index=[0, 10, 2, 3, 4, 5, 6, 7, 8, 9])
df1 = md.DataFrame(data1, chunk_size=5)
data2 = pd.DataFrame(np.random.rand(10, 3), index=[11, 1, 2, 5, 7, 6, 8, 9, 10, 3])
df2 = md.DataFrame(data2, chunk_size=6)
df = (df1 + df2).reset_index(incremental_index=True)
result = df.execute().fetch()
pd.testing.assert_index_equal(result.index, pd.RangeIndex(12))
# Inconsistent with Pandas when input dataframe's shape is unknown.
result = result.sort_values(by=result.columns[0])
expected = (data1 + data2).reset_index()
np.testing.assert_array_equal(result.to_numpy(), expected.to_numpy())
data1 = pd.Series(np.random.rand(10,), index=[0, 10, 2, 3, 4, 5, 6, 7, 8, 9])
series1 = md.Series(data1, chunk_size=3)
data2 = pd.Series(np.random.rand(10,), index=[11, 1, 2, 5, 7, 6, 8, 9, 10, 3])
series2 = md.Series(data2, chunk_size=3)
df = (series1 + series2).reset_index(incremental_index=True)
result = df.execute().fetch()
pd.testing.assert_index_equal(result.index, pd.RangeIndex(12))
# Inconsistent with Pandas when input dataframe's shape is unknown.
result = result.sort_values(by=result.columns[0])
expected = (data1 + data2).reset_index()
np.testing.assert_array_equal(result.to_numpy(), expected.to_numpy())
series1 = md.Series(data1, chunk_size=3)
series1.reset_index(inplace=True, drop=True)
result = series1.execute().fetch()
pd.testing.assert_index_equal(result.index, pd.RangeIndex(10))
# case from https://github.com/mars-project/mars/issues/1286
data = pd.DataFrame(np.random.rand(10, 3), columns=list('abc'))
df = md.DataFrame(data, chunk_size=3)
r = df.sort_values('a').reset_index(drop=True, incremental_index=True)
result = r.execute().fetch()
expected = data.sort_values('a').reset_index(drop=True)
pd.testing.assert_frame_equal(result, expected)
def test_rename(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.rand(10, 4), columns=['A', 'B', 'C', 'D'])
df = md.DataFrame(raw, chunk_size=3)
with pytest.warns(Warning):
df.rename(str, errors='raise')
with pytest.raises(NotImplementedError):
df.rename({"A": "a", "B": "b"}, axis=1, copy=False)
r = df.rename(str)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.rename(str))
r = df.rename({"A": "a", "B": "b"}, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.rename({"A": "a", "B": "b"}, axis=1))
df.rename({"A": "a", "B": "b"}, axis=1, inplace=True)
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.rename({"A": "a", "B": "b"}, axis=1))
raw = pd.DataFrame(rs.rand(10, 4),
columns=pd.MultiIndex.from_tuples((('A', 'C'), ('A', 'D'), ('B', 'E'), ('B', 'F'))))
df = md.DataFrame(raw, chunk_size=3)
r = df.rename({"C": "a", "D": "b"}, level=1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.rename({"C": "a", "D": "b"}, level=1, axis=1))
raw = pd.Series(rs.rand(10), name='series')
series = md.Series(raw, chunk_size=3)
r = series.rename('new_series')
pd.testing.assert_series_equal(r.execute().fetch(),
raw.rename('new_series'))
r = series.rename(lambda x: 2 ** x)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.rename(lambda x: 2 ** x))
with pytest.raises(TypeError):
series.name = {1: 10, 2: 20}
series.name = 'new_series'
pd.testing.assert_series_equal(series.execute().fetch(),
raw.rename('new_series'))
raw = pd.MultiIndex.from_frame(pd.DataFrame(rs.rand(10, 2), columns=['A', 'B']))
idx = md.Index(raw)
r = idx.rename(['C', 'D'])
pd.testing.assert_index_equal(r.execute().fetch(),
raw.rename(['C', 'D']))
r = idx.set_names('C', level=0)
pd.testing.assert_index_equal(r.execute().fetch(),
raw.set_names('C', level=0))
def test_rename_axis(setup):
rs = np.random.RandomState(0)
# test dataframe cases
raw = pd.DataFrame(rs.rand(10, 4), columns=['A', 'B', 'C', 'D'])
df = md.DataFrame(raw, chunk_size=3)
r = df.rename_axis('idx')
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.rename_axis('idx'))
r = df.rename_axis('cols', axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.rename_axis('cols', axis=1))
df.rename_axis('c', axis=1, inplace=True)
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.rename_axis('c', axis=1))
df.columns.name = 'df_cols'
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.rename_axis('df_cols', axis=1))
# test dataframe cases with MultiIndex
raw = pd.DataFrame(
rs.rand(10, 4), columns=pd.MultiIndex.from_tuples([('A', 1), ('B', 2), ('C', 3), ('D', 4)]))
df = md.DataFrame(raw, chunk_size=3)
df.columns.names = ['c1', 'c2']
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.rename_axis(['c1', 'c2'], axis=1))
df.columns.set_names('c2_1', level=1, inplace=True)
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.rename_axis(['c1', 'c2_1'], axis=1))
# test series cases
raw = pd.Series(rs.rand(10))
s = md.Series(raw, chunk_size=3)
r = s.rename_axis('idx')
pd.testing.assert_series_equal(r.execute().fetch(),
raw.rename_axis('idx'))
s.index.name = 'series_idx'
pd.testing.assert_series_equal(s.execute().fetch(),
raw.rename_axis('series_idx'))
def test_insert(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.rand(10, 4), columns=['A', 'B', 'C', 'D'])
with pytest.raises(ValueError):
tensor = mt.tensor(rs.rand(10, 10), chunk_size=4)
df = md.DataFrame(raw.copy(deep=True), chunk_size=3)
df.insert(4, 'E', tensor)
df = md.DataFrame(raw.copy(deep=True), chunk_size=3)
df.insert(4, 'E', 0)
raw_dup = raw.copy(deep=True)
raw_dup.insert(4, 'E', 0)
pd.testing.assert_frame_equal(df.execute().fetch(),
raw_dup)
raw_tensor = rs.rand(10)
tensor = mt.tensor(raw_tensor, chunk_size=4)
df = md.DataFrame(raw.copy(deep=True), chunk_size=3)
df.insert(4, 'E', tensor)
raw_dup = raw.copy(deep=True)
raw_dup.insert(4, 'E', raw_tensor)
pd.testing.assert_frame_equal(df.execute().fetch(),
raw_dup)
def _wrap_execute_data_source(limit, op_cls):
def _execute_data_source(ctx, op):
op_cls.execute(ctx, op)
result = ctx[op.outputs[0].key]
if len(result) > limit:
raise RuntimeError('have data more than expected') # pragma: no cover
return _execute_data_source
def _wrap_execute_data_source_usecols(usecols, op_cls):
def _execute_data_source(ctx, op): # pragma: no cover
op_cls.execute(ctx, op)
result = ctx[op.outputs[0].key]
if not isinstance(usecols, list):
if not isinstance(result, pd.Series):
raise RuntimeError('Out data should be a Series, '
f'got {type(result)}')
elif len(result.columns) > len(usecols):
params = dict((k, getattr(op, k, None)) for k in op._keys_
if k not in op._no_copy_attrs_)
raise RuntimeError(f'have data more than expected, got {result.columns}, '
f'result {result}, op params {params}')
return _execute_data_source
def _wrap_execute_data_source_mixed(limit, usecols, op_cls):
def _execute_data_source(ctx, op): # pragma: no cover
op_cls.execute(ctx, op)
result = ctx[op.outputs[0].key]
if not isinstance(usecols, list):
if not isinstance(result, pd.Series):
raise RuntimeError('Out data should be a Series')
elif len(result.columns) > len(usecols):
raise RuntimeError('have data more than expected')
if len(result) > limit:
raise RuntimeError('have data more than expected')
return _execute_data_source
def test_optimization(setup):
import sqlalchemy as sa
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, 'test_head.csv')
rs = np.random.RandomState(0)
pd_df = pd.DataFrame({'a': rs.randint(1000, size=(2000,)).astype(np.int64),
'b': rs.randint(1000, size=(2000,)).astype(np.int64),
'c': ['sss' for _ in range(2000)],
'd': ['eeee' for _ in range(2000)]})
pd_df.to_csv(filename, index=False)
size = os.path.getsize(filename)
chunk_bytes = size / 3 - 2
df = md.read_csv(filename, chunk_bytes=chunk_bytes)
cols = ['b', 'a', 'c']
r = df[cols]
operand_executors = {
DataFrameReadCSV: _wrap_execute_data_source_usecols(cols, DataFrameReadCSV)}
result = r.execute(extra_config={'operand_executors': operand_executors}).fetch()
expected = pd_df[cols]
result.reset_index(drop=True, inplace=True)
pd.testing.assert_frame_equal(result, expected)
cols = ['b', 'a', 'b']
r = df[cols].head(20)
operand_executors = {
DataFrameReadCSV: _wrap_execute_data_source_usecols(cols, DataFrameReadCSV)}
result = r.execute(extra_config={'operand_executors': operand_executors}).fetch()
expected = pd_df[cols].head(20)
result.reset_index(drop=True, inplace=True)
pd.testing.assert_frame_equal(result, expected)
r = df['c']
operand_executors = {
DataFrameReadCSV: _wrap_execute_data_source_usecols('c', DataFrameReadCSV)}
result = r.execute(extra_config={'operand_executors': operand_executors}).fetch()
expected = pd_df['c']
result.reset_index(drop=True, inplace=True)
pd.testing.assert_series_equal(result, expected)
r = df['d'].head(3)
operand_executors = {
DataFrameReadCSV: _wrap_execute_data_source_mixed(3, 'd', DataFrameReadCSV)}
result = r.execute(extra_config={'operand_executors': operand_executors}).fetch()
expected = pd_df['d'].head(3)
pd.testing.assert_series_equal(result, expected)
# test DataFrame.head
r = df.head(3)
operand_executors = {
DataFrameReadCSV: _wrap_execute_data_source(3, DataFrameReadCSV)}
result = r.execute(extra_config={'operand_executors': operand_executors}).fetch()
expected = pd_df.head(3)
pd.testing.assert_frame_equal(result, expected)
# test DataFrame.tail
r = df.tail(3)
result = r.execute().fetch()
expected = pd_df.tail(3)
pd.testing.assert_frame_equal(result.reset_index(drop=True),
expected.reset_index(drop=True))
# test head more than 1 chunk
r = df.head(99)
result = r.execute().fetch()
result.reset_index(drop=True, inplace=True)
expected = pd_df.head(99)
pd.testing.assert_frame_equal(result, expected)
# test Series.tail more than 1 chunk
r = df.tail(99)
result = r.execute().fetch()
expected = pd_df.tail(99)
pd.testing.assert_frame_equal(result.reset_index(drop=True),
expected.reset_index(drop=True))
# test head number greater than limit
df = md.read_csv(filename, chunk_bytes=chunk_bytes)
r = df.head(1100)
with pytest.raises(RuntimeError):
operand_executors = {
DataFrameReadCSV: _wrap_execute_data_source(3, DataFrameReadCSV)}
r.execute(extra_config={'operand_executors': operand_executors})
result = r.execute().fetch()
expected = pd_df.head(1100)
pd.testing.assert_frame_equal(result.reset_index(drop=True),
expected.reset_index(drop=True))
filename = os.path.join(tempdir, 'test_sql.db')
conn = sa.create_engine('sqlite:///' + filename)
pd_df.to_sql('test_sql', conn)
df = md.read_sql('test_sql', conn, index_col='index', chunk_size=20)
# test DataFrame.head
r = df.head(3)
operand_executors = {
DataFrameReadSQL: _wrap_execute_data_source(3, DataFrameReadSQL)}
result = r.execute(extra_config={'operand_executors': operand_executors}).fetch()
result.index.name = None
expected = pd_df.head(3)
pd.testing.assert_frame_equal(result, expected)
# test head on read_parquet
filename = os.path.join(tempdir, 'test_parquet.db')
pd_df.to_parquet(filename, index=False, compression='gzip')
engines = []
if pa is not None:
engines.append('pyarrow')
if fp is not None:
engines.append('fastparquet')
for engine in engines:
df = md.read_parquet(filename, engine=engine)
r = df.head(3)
operand_executors = {
DataFrameReadParquet: _wrap_execute_data_source(3, DataFrameReadParquet)}
result = r.execute(extra_config={'operand_executors': operand_executors}).fetch()
expected = pd_df.head(3)
pd.testing.assert_frame_equal(result, expected)
dirname = os.path.join(tempdir, 'test_parquet2')
os.makedirs(dirname)
pd_df[:1000].to_parquet(os.path.join(dirname, 'q1.parquet'))
pd_df[1000:].to_parquet(os.path.join(dirname, 'q2.parquet'))
df = md.read_parquet(dirname)
r = df.head(3)
operand_executors = {
DataFrameReadParquet: _wrap_execute_data_source(3, DataFrameReadParquet)}
result = r.execute(extra_config={'operand_executors': operand_executors}).fetch()
expected = pd_df.head(3)
pd.testing.assert_frame_equal(result, expected)
def test_reindex_execution(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=4)
for enable_sparse in [True, False, None]:
r = df.reindex(index=mt.arange(10, 1, -1, chunk_size=3),
enable_sparse=enable_sparse)
result = r.execute().fetch()
expected = data.reindex(index=np.arange(10, 1, -1))
pd.testing.assert_frame_equal(result, expected)
r = df.reindex(columns=['c5', 'c6', 'c2'],
enable_sparse=enable_sparse)
result = r.execute().fetch()
expected = data.reindex(columns=['c5', 'c6', 'c2'])
pd.testing.assert_frame_equal(result, expected)
for enable_sparse in [True, False]:
r = df.reindex(index=[5, 11, 1], columns=['c5', 'c6', 'c2'],
enable_sparse=enable_sparse)
result = r.execute().fetch()
expected = data.reindex(index=[5, 11, 1], columns=['c5', 'c6', 'c2'])
pd.testing.assert_frame_equal(result, expected)
r = df.reindex(index=mt.tensor([2, 4, 10]),
columns=['c2', 'c3', 'c5', 'c7'],
method='bfill',
enable_sparse=enable_sparse)
result = r.execute().fetch()
expected = data.reindex(index=[2, 4, 10],
columns=['c2', 'c3', 'c5', 'c7'],
method='bfill')
pd.testing.assert_frame_equal(result, expected)
for fill_value, test_fill_value in \
[(3, 3), (df.iloc[:, 0].max(), data.iloc[:, 0].max())]:
r = df.reindex(index=mt.tensor([2, 4, 10]),
columns=['c2', 'c3', 'c5', 'c7'],
fill_value=fill_value,
enable_sparse=enable_sparse)
result = r.execute().fetch()
expected = data.reindex(index=[2, 4, 10],
columns=['c2', 'c3', 'c5', 'c7'],
fill_value=test_fill_value)
|
pd.testing.assert_frame_equal(result, expected)
|
pandas.testing.assert_frame_equal
|
"""
data_ops
This file contains access to data and methods for assembly of data.
- <NAME>, 2018
"""
import argparse
import os
import random
from collections import Counter, OrderedDict, defaultdict
import networkx as nx
import numpy as np
import pandas as pd
import scipy.io as sio
import tensorflow as tf
from log_control import *
from utils import Utils
kl = tf.keras.layers
DATA_DIR_DICT = {
'seal': Utils.data_file('public_seal'),
'snap': Utils.data_file('public_snap'),
'snap_csv': Utils.data_file('public_snap'),
'wsn': Utils.data_file('public_wsn')
}
DATA_PARSE = {
'seal': 'mat',
'snap': 'txt',
'snap_csv': 'csv',
'wsn': 'csv_wsn'
}
class DataHandling(object):
def __init__(self,
dataset_name,
dataset_type='seal',
ordered_args=None):
self.dataset_name = dataset_name
self.dataset_type = dataset_type
self.ordered_args = ordered_args
self.g = None
self.g_features = None
if DATA_PARSE[self.dataset_type] == 'mat':
self.adj_mat, self.g_features = self._load_adj_from_mat_file(dataset_name)
self.g = nx.from_scipy_sparse_matrix(self.adj_mat, )
elif DATA_PARSE[self.dataset_type] == 'txt':
self.g = self._load_nxg_from_txt_file(dataset_name)
elif DATA_PARSE[self.dataset_type] == 'csv':
self.g = self._load_nxg_from_csv_edgelist(dataset_name)
elif DATA_PARSE[self.dataset_type] == 'csv_wsn':
self.g = self._load_nxg_from_csv_wsn(dataset_name)
self.g = nx.convert_node_labels_to_integers(self.g, first_label=1)
self.node_ref = {i: i for i in self.g.nodes}
self.node_len = len(self.g.nodes)
self.learning_dataset = None
self.is_data_generated = False
self.additional_embeddings = []
if self.ordered_args.get('use_node_features', True):
if self.g_features is not None:
node_feature_layer = self._get_node_features_from_scipy_sparse_matrix(self.g_features)
self.additional_embeddings.append(node_feature_layer)
if len(self.additional_embeddings):
logi("Additional embeddings will be used")
self.baselines_only = False
if self.ordered_args['for_baselines']:
self.baselines_only = True
self.metadata_file = None
if self.ordered_args['visualize']:
# Generate metadata file to visualize the data
FIELDS = {'degree': nx.degree,
'centrality': nx.degree_centrality,
'triangles': nx.triangles}
logi("Generating metadata for visualization using fields {}".format(FIELDS))
field_dicts = {k: dict(v(self.g)) for k, v in FIELDS.items()}
metadata_dict = defaultdict(dict)
for field, fd in field_dicts.items():
for k, v in fd.items():
metadata_dict[field][k] = v
df = pd.DataFrame(metadata_dict)
logi("Data frame shape: {}".format(df.shape))
meta_filename = os.path.join(DATA_DIR_DICT[self.dataset_type], "meta_{}.tsv".format(self.dataset_name))
df.to_csv(meta_filename, sep='\t', index_label='Node')
logi("Metadata saved to {}".format(meta_filename))
self.metadata_file = meta_filename
def _load_adj_from_mat_file(self, dataset_name):
data_file = os.path.join(DATA_DIR_DICT[self.dataset_type], '%s.mat' % dataset_name)
file_data = sio.loadmat(data_file)
if 'group' in file_data.keys():
return file_data['net'], file_data['group']
return file_data['net'], None
def _load_nxg_from_csv_edgelist(self, dataset_name):
data_file = os.path.join(DATA_DIR_DICT[self.dataset_type], '%s.csv' % dataset_name)
graph = nx.Graph()
data = pd.read_csv(data_file, header=None, index_col=None)
for idx, row in data.iterrows():
from_node, to_node = row[0], row[1]
graph.add_edge(int(from_node), int(to_node))
return graph
def _load_nxg_from_csv_wsn(self, dataset_name):
data_file = os.path.join(DATA_DIR_DICT[self.dataset_type], '%s.csv' % dataset_name)
graph = nx.DiGraph()
data =
|
pd.read_csv(data_file, header=None, index_col=None, names=['from', 'to', 'rating'])
|
pandas.read_csv
|
import os
import pandas as pd
import sp_util
from sp_util import OptionalStr
class DSException (Exception):
pass
class DataStore:
def __init__(self, root: OptionalStr = None, name: OptionalStr = None):
self.root: str = sp_util.root_or_default(root)
self.name: str = sp_util.name_or_default(name)
self.path: str = os.path.join(self.root, self.name)
self.validate()
def __str__(self) -> str:
return f"DataStore[{self.path}]"
def __repr__(self) -> str:
return str(self)
def validate(self):
if len(self.root) == 0:
raise DSException("Missing datastore root")
if not os.path.exists(self.path):
raise DSException(f"Datastore {self.path} does not exist")
def read_data(self, tag: str, symbol: str) -> pd.DataFrame:
if tag == sp_util.history_tag():
names = ["date", "open", "high", "low", "close", "adj_close", "volume"]
else:
names = ["date", "dividend"]
symbol_path = self.make_symbol_path(tag, symbol)
if os.path.exists(symbol_path):
return pd.read_csv(symbol_path,
names=names,
header=None,
converters={"date": pd.Timestamp},
index_col="date")
else:
return
|
pd.DataFrame({f: [] for f in names})
|
pandas.DataFrame
|
import pandas as pd
import matplotlib.pyplot as plt
from pyshop import ShopSession
license_path = r''
shop = ShopSession(license_path='', silent=False)
# Set time resolution
starttime = pd.Timestamp('2018-02-27')
endtime = pd.Timestamp('2018-02-28')
shop.set_time_resolution(starttime=starttime, endtime=endtime, timeunit='hour')
# Add scenarios
n_scen = 12
for i in range(1, n_scen + 1):
scen_name = 'S' + str(i)
if i > 1:
scen = shop.model.scenario.add_object(scen_name)
else:
scen = shop.model.scenario[scen_name]
scen.scenario_id.set(i)
scen.probability.set(1.0/n_scen)
scen.common_scenario.set(pd.Series([1, i], index=[starttime, starttime + pd.Timedelta(hours=1)]))
# Add topology
rsv1 = shop.model.reservoir.add_object('Reservoir1')
rsv1.max_vol.set(12)
rsv1.lrl.set(90)
rsv1.hrl.set(100)
rsv1.vol_head.set(dict(xy=[[0, 90], [12, 100], [14, 101]], ref=0))
rsv1.flow_descr.set(dict(xy=[[100, 0], [101, 1000]], ref=0))
plant1 = shop.model.plant.add_object('Plant1')
plant1.outlet_line.set(40)
plant1.main_loss.set([0.0002])
plant1.penstock_loss.set([0.0001])
p1g1 = shop.model.generator.add_object('Plant1_G1')
plant1.connect_to(p1g1)
p1g1.penstock.set(1)
p1g1.p_min.set(25)
p1g1.p_max.set(100)
p1g1.p_nom.set(100)
p1g1.startcost.set(500)
p1g1.gen_eff_curve.set(pd.Series([95, 98], index=[0, 100]))
# p1g1.gen_eff_curve.set(dict(xy=[[0, 95], [100, 98]], ref=0)) # Alternative way to set eff curve
p1g1.turb_eff_curves.set([pd.Series([80, 95, 90], index=[25, 90, 100], name=90),
pd.Series([82, 98, 92], index=[25, 90, 100], name=100)])
# p1g1.turb_eff_curves.set([dict(ref=90, xy=[[25, 80], [90, 95], [100, 90]]),
# dict(ref=100, xy=[[25, 82], [90, 98], [100, 92]])]) # Alternative way to set curve
rsv2 = shop.model.reservoir.add_object('Reservoir2')
rsv2.max_vol.set(5)
rsv2.lrl.set(40)
rsv2.hrl.set(50)
rsv2.vol_head.set(pd.Series([40, 50, 51], index=[0, 5, 6]))
# rsv2.vol_head.set(dict(xy=[[0, 40], [5, 50], [6, 51]], ref=0))
rsv2.flow_descr.set(pd.Series([0, 1000], index=[50, 51]))
# rsv2.flow_descr.set(dict(xy=[[50, 0], [51, 1000]], ref=0))
plant2 = shop.model.plant.add_object('Plant2')
plant2.outlet_line.set(0)
plant2.main_loss.set([0.0002])
plant2.penstock_loss.set([0.0001])
p2g1 = shop.model.generator.add_object('Plant2_G1')
plant2.connect_to(p2g1)
p2g1.penstock.set(1)
p2g1.p_min.set(25)
p2g1.p_max.set(100)
p2g1.p_nom.set(100)
p2g1.startcost.set(500)
p2g1.gen_eff_curve.set(pd.Series([95, 98], index=[0, 100]))
p2g1.turb_eff_curves.set([pd.Series([80, 95, 90], index=[25, 90, 100], name=90),
pd.Series([82, 98, 92], index=[25, 90, 100], name=100)])
# Connect objects
rsv1.connect_to(plant1)
plant1.connect_to(rsv2)
rsv2.connect_to(plant2)
rsv1.start_head.set(92)
rsv2.start_head.set(43)
rsv1.energy_value_input.set(39.7)
rsv2.energy_value_input.set(38.6)
shop.model.market.add_object('Day_ahead')
da = shop.model.market.Day_ahead
da.sale_price.set(pd.DataFrame({'1': [39, 38.5],
'2': [39, 39.0],
'3': [39, 39.5],
'4': [39, 40],
'5': [39, 38.5],
'6': [39, 39.0],
'7': [39, 39.5],
'8': [39, 40],
'9': [39, 38.5],
'10': [39, 39.0],
'11': [39, 39.5],
'12': [39, 40]
}, index=[starttime, starttime + pd.Timedelta(hours=1)],
columns=['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']))
da.buy_price.set(40.01)
da.max_buy.set(9999)
da.max_sale.set(9999)
rsv1.inflow.set(pd.DataFrame({'1': [101, 50],
'2': [101, 50],
'3': [101, 50],
'4': [101, 50],
'5': [101, 100],
'6': [101, 100],
'7': [101, 100],
'8': [101, 100],
'9': [101, 150],
'10': [101, 150],
'11': [101, 150],
'12': [101, 150]
}, index=[starttime, starttime +
|
pd.Timedelta(hours=1)
|
pandas.Timedelta
|
import warnings
import pandas as pd
warnings.filterwarnings('ignore')
import time
from autox.autox_server.util import log
from tqdm import tqdm
def fe_window(G_df_dict, G_data_info, G_hist, is_train, remain_time):
# 对G_df_dict['BIG']表做扩展特征
start = time.time()
log('[+] feature engineer, window')
big_size = G_df_dict['BIG'].shape[0]
time_col = G_data_info['target_time']
if is_train:
G_hist['FE_window'] = []
if G_data_info['time_series_data'] == 'true':
if G_hist['big_data_type'][time_col] == 'Unix_timestamp':
G_df_dict['BIG'] = G_df_dict['BIG'].sort_values(by=time_col)
window_features = []
for col in G_hist['big_cols_cat']:
if big_size * 0.01 < G_df_dict['BIG'][col].nunique() < big_size * 0.3:
window_features.append(col)
G_hist['FE_window'] = window_features
log("window features: {}".format(window_features))
G_df_dict['FE_window'] =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import streamlit as st
import pandas as pd
import altair as alt
import sqlite3
from sqlite3 import Connection
import requests
import json
import plotly.express as px
# spotify stuff
SPOTIFY_CLIENT_ID = os.environ.get('SPOTIFY_CLIENT_ID')
SPOTIFY_CLIENT_SECRET = os.environ.get('SPOTIFY_CLIENT_SECRET')
def get_spotify_token():
url='https://accounts.spotify.com/api/token'
grant_type = 'client_credentials'
body_params = {'grant_type' : grant_type}
r = requests.post(url, data=body_params, auth = (SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET))
r.raise_for_status()
token_raw = json.loads(r.text)
token = token_raw["access_token"]
return token
def spotify_search(song):
token = get_spotify_token()
url = f'https://api.spotify.com/v1/search?q={song}&type=track&limit=1'
headers = {
'Accept': 'application/json',
'Content-type': 'application/json',
'Authorization': f'Bearer {token}'
}
r = requests.get(url, headers=headers)
r.raise_for_status()
if r.status_code == 200:
data = r.json()
result = data['tracks']['items'][0]
thirty_sec_preview_url = result['preview_url']
return thirty_sec_preview_url
else:
raise Exception('Failed to get Spotify data.')
@st.cache(hash_funcs={Connection: id}) # add caching so we load the data only once
def get_connection(path_to_db):
# connect to db
try:
conn = sqlite3.connect(path_to_db, check_same_thread=False)
return conn
except Exception as e:
print(e)
def get_data(conn: Connection):
sql_query = """
SELECT
song, artist, album, date, energy, valence, danceability, instrumentalness, tempo
FROM
acoustic_features
WHERE
artist LIKE '%<NAME>%'
ORDER BY date DESC
"""
df = pd.read_sql(sql_query, con=conn)
df['date'] = pd.to_datetime(df['date'])
return df
def get_bowie_data(conn: Connection,feature):
df = pd.read_sql(f'select song, tempo,round({feature},2) as {feature},cast(valence*10 as int) as valence,date,album from acoustic_features where artist="<NAME>"', con=conn)
df['date'] = pd.to_datetime(df['date'])
return df
def get_feature_avg(conn: Connection,feature):
df =
|
pd.read_sql(f'select song, date, album, round(avg({feature}),2) as avg_feature from acoustic_features where artist="<NAME>" group by album', con=conn)
|
pandas.read_sql
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import pandas as pd
import numpy as np
import pathlib
import pickle
from datetime import datetime, timezone
from emhass.retrieve_hass import retrieve_hass
from emhass.optimization import optimization
from emhass.forecast import forecast
from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger
# the root folder
root = str(get_root(__file__, num_parent=2))
# create logger
logger, ch = get_logger(__name__, root, save_to_file=False)
class TestOptimization(unittest.TestCase):
def setUp(self):
get_data_from_file = True
params = None
retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(pathlib.Path(root+'/config_emhass.yaml'), use_secrets=False)
self.retrieve_hass_conf, self.optim_conf, self.plant_conf = \
retrieve_hass_conf, optim_conf, plant_conf
self.rh = retrieve_hass(self.retrieve_hass_conf['hass_url'], self.retrieve_hass_conf['long_lived_token'],
self.retrieve_hass_conf['freq'], self.retrieve_hass_conf['time_zone'],
params, root, logger)
if get_data_from_file:
with open(pathlib.Path(root+'/data/test_df_final.pkl'), 'rb') as inp:
self.rh.df_final, self.days_list, self.var_list = pickle.load(inp)
else:
self.days_list = get_days_list(self.retrieve_hass_conf['days_to_retrieve'])
self.var_list = [self.retrieve_hass_conf['var_load'], self.retrieve_hass_conf['var_PV']]
self.rh.get_data(self.days_list, self.var_list,
minimal_response=False, significant_changes_only=False)
self.rh.prepare_data(self.retrieve_hass_conf['var_load'], load_negative = self.retrieve_hass_conf['load_negative'],
set_zero_min = self.retrieve_hass_conf['set_zero_min'],
var_replace_zero = self.retrieve_hass_conf['var_replace_zero'],
var_interp = self.retrieve_hass_conf['var_interp'])
self.df_input_data = self.rh.df_final.copy()
self.fcst = forecast(self.retrieve_hass_conf, self.optim_conf, self.plant_conf,
params, root, logger, get_data_from_file=get_data_from_file)
self.df_weather = self.fcst.get_weather_forecast(method=optim_conf['weather_forecast_method'])
self.P_PV_forecast = self.fcst.get_power_from_weather(self.df_weather)
self.P_load_forecast = self.fcst.get_load_forecast(method=optim_conf['load_forecast_method'])
self.df_input_data_dayahead = pd.concat([self.P_PV_forecast, self.P_load_forecast], axis=1)
self.df_input_data_dayahead.columns = ['P_PV_forecast', 'P_load_forecast']
self.costfun = 'profit'
self.opt = optimization(self.retrieve_hass_conf, self.optim_conf, self.plant_conf,
self.fcst.var_load_cost, self.fcst.var_prod_price,
self.costfun, root, logger)
self.df_input_data = self.fcst.get_load_cost_forecast(self.df_input_data)
self.df_input_data = self.fcst.get_prod_price_forecast(self.df_input_data)
self.input_data_dict = {
'retrieve_hass_conf': retrieve_hass_conf,
}
def test_perform_perfect_forecast_optim(self):
self.opt_res = self.opt.perform_perfect_forecast_optim(self.df_input_data, self.days_list)
self.assertIsInstance(self.opt_res, type(
|
pd.DataFrame()
|
pandas.DataFrame
|
import glob
import pandas as pd
import numpy as np
import config
from lcoc import afdc
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
##### Functions #####
###################
### Residential ###
###################
def res_rates_to_utils(scenario = 'baseline',
urdb_rates_file = 'outputs/cost-of-electricity/urdb-res-rates/res_rates.csv',
eia_cw_file = config.EIAID_TO_UTILITY_CW_PATH,
eia_utils_file = config.EIA_RES_PATH,
outpath = 'outputs/cost-of-electricity/res-utilities/'):
"""
Takes res urdb rates from urdb_path and combines with eia_utils_file to produce
utility-lvl annual avg cost of electricity estimates under the following scenarios:
'baseline' (replace eia cost of electricity w/ off-peak TOU rate, if applicable),
'no-tou' (eia cost of electricity only), 'tou-only' (only TOU rates
from URDB are considered).
"""
# Load/Preprocess EIA datasets
eiaid_cw = pd.read_csv(eia_cw_file)
eiaid_cw = eiaid_cw[['eiaid', 'entity', 'state']]
eiaid_utils = pd.read_csv(eia_utils_file)
eiaid_utils.rename(columns={'avg_price_cents_per_kwh': 'eia_cost_per_kwh'}, inplace=True)
eiaid_utils['eia_cost_per_kwh'] = eiaid_utils['eia_cost_per_kwh'] / 100
eiaid_utils = eiaid_utils[eiaid_utils.eiaid!=99999]
wm = lambda x: np.average(x, weights=eiaid_utils.loc[x.index, "customers"])
f = {'customers': 'sum', 'eia_cost_per_kwh': wm}
eiaid_utils = eiaid_utils.groupby(['entity', 'state']).agg(f).reset_index()
#eiaid_utils.columns = eiaid_utils.columns.droplevel(1)
eiaid_res_df = eiaid_cw.merge(eiaid_utils, how='right', on=['entity', 'state'])
eiaid_res_df = eiaid_res_df.drop_duplicates()
# Load URDB Rates
urdb_rates = pd.read_csv(urdb_rates_file, low_memory=False)
# Find Off-Peak TOU Price for URDB Rates
all_tou_rates_df = urdb_rates[urdb_rates.is_tou_rate==1]
eiaid_tou_rates_df = all_tou_rates_df.groupby('eiaid')['electricity_cost_per_kwh'].min().reset_index()
eiaid_tou_rates_df.rename(columns={'electricity_cost_per_kwh': 'offpeak_tou_cost_per_kwh'}, inplace=True)
# Baseline - {MIN((off-peak TOU, EIA average))}
if scenario == "baseline": #default
eiaid_res_df = eiaid_res_df.merge(eiaid_tou_rates_df, how='left', on='eiaid')
tou_rates_used, costs_incl_tou = 0, []
for i in range(len(eiaid_res_df)):
eia_cost = eiaid_res_df.iloc[i].eia_cost_per_kwh
offpeak_tou_cost = eiaid_res_df.iloc[i].offpeak_tou_cost_per_kwh
low_cost = min([eia_cost, offpeak_tou_cost])
if low_cost == offpeak_tou_cost:
tou_rates_used+=1
costs_incl_tou.append(low_cost)
eiaid_res_df['cost_per_kwh'] = costs_incl_tou
print("Complete, {0} utitilies represented ({1} TOU rates used).".format(len(eiaid_res_df),
tou_rates_used))
eiaid_res_df.to_csv(outpath+'res_utils.csv', index=False)
# No-TOU - "Business as Usual", EIA averages used (upper bound)
elif scenario == "no-tou":
eiaid_res_df['cost_per_kwh'] = eiaid_res_df['eia_cost_per_kwh']
print("Complete, {} utilities represented (no TOU rates used).".format(len(eiaid_res_df)))
eiaid_res_df.to_csv(outpath+"upper_bnd_res_utils.csv", index=False)
# TOU-Only - URDB TOU rates only (lower bound)
elif scenario == "tou-only":
eiaid_tou_rates_df['cost_per_kwh'] = eiaid_tou_rates_df['offpeak_tou_cost_per_kwh']
eiaid_tou_rates_df = eiaid_tou_rates_df.merge(eiaid_res_df[['eiaid', 'state', 'customers']], how='inner', on='eiaid')
print("Complete, {} utitilies represented (only TOU rates used).".format(len(eiaid_tou_rates_df)))
eiaid_tou_rates_df.to_csv(outpath+"lower_bnd_res_utils.csv", index=False)
else:
raise ValueError('scenario not in ["baseline", "no_tou", "tou-only"]')
return eiaid_res_df
def res_utils_to_state(utils_file = 'outputs/cost-of-electricity/res-utilities/res_utils.csv',
outfile = 'outputs/cost-of-electricity/res-states/res_states_baseline.csv'):
"""
Takes utility-level cost of electricity and calculates customer-weighted state-level
cost of electricity for the baseline scenario (TOU & No-TOU).
"""
res_util_df = pd.read_csv(utils_file, low_memory=False)
states, cost_per_kwh, customers = [], [], []
for state in set(res_util_df['state']):
temp_df = res_util_df[res_util_df['state'] == state]
tot_customers = temp_df['customers'].sum()
wgt_cost = ((temp_df['cost_per_kwh'] * temp_df['customers']) / tot_customers).sum()
states.append(state)
customers.append(tot_customers)
cost_per_kwh.append(wgt_cost)
state_df = pd.DataFrame({'state': states,
'customers': customers,
'cost_per_kwh': cost_per_kwh})
#Add national estimate
nat_customers = state_df['customers'].sum()
nat_cost_per_kwh = ((state_df['cost_per_kwh'] * state_df['customers']) / nat_customers).sum()
nat_df = pd.DataFrame({'state': ['US'],
'customers': [nat_customers],
'cost_per_kwh': [nat_cost_per_kwh]})
state_df = pd.concat([state_df, nat_df]).reset_index(drop=True)
state_df.to_csv(outfile, index=False)
print('Complete, national cost of electricity is ${}/kWh.'.format(round(nat_cost_per_kwh,2)))
def calculate_state_residential_lcoc(coe_file = 'outputs/cost-of-electricity/res-states/res_states_baseline.csv',
fixed_costs_path = 'data/fixed-costs/residential/',
annual_maint_frac = 0.01, #Annual cost of maintenance (fraction of equip costs)
veh_lifespan = 15,
veh_kwh_per_100miles = 29.82, #source: EIA
aavmt = 10781, #source: 2017 NHTS
fraction_residential_charging = 0.81, #source: EPRI study
fraction_home_l1_charging = 0.16, #source: EPRI study
dr = 0.035, #source: Mercatus
outfile = 'outputs/cost-of-charging/residential/res_states_baseline.csv'):
"""
Function calculates the state-level residential levelized cost of charging, taking
into account the average cost of electricity, fixed costs, and equipment
maintenance.
"""
# Load data
df = pd.read_csv(coe_file)
filenames = ['res_level1.txt', 'res_level2.txt']
fixed_cost_files = [fixed_costs_path + filename for filename in filenames]
fixed_costs = {}
for file in fixed_cost_files:
if 'level1' in file:
plug_typ = 'L1'
elif 'level2' in file:
plug_typ = 'L2'
plug_typ_dict = {}
with open (file) as f:
for line in f:
key, val = line.split(':')
plug_typ_dict[key] = float(val)
fixed_costs[plug_typ] = plug_typ_dict
# Calculate lifetime EVSE cost of maintenance (assumed to be 1% of equipment cost annually)
for plug_typ in fixed_costs.keys():
discounted_lifetime_maint_cost = 0
for i in range(1, veh_lifespan+1):
ann_maint_cost = annual_maint_frac * fixed_costs[plug_typ]['equipment']
discounted_ann_maint_cost = ann_maint_cost / (1+dr)**i
discounted_lifetime_maint_cost += discounted_ann_maint_cost
fixed_costs[plug_typ]['lifetime_evse_maint'] = discounted_lifetime_maint_cost
# Calculate lifetime energy from residential charging
lifetime_miles = veh_lifespan * aavmt
veh_kwh_per_mile = veh_kwh_per_100miles / 100
lifetime_energy_kwh = lifetime_miles * veh_kwh_per_mile
lifetime_residential_energy_kwh = fraction_residential_charging * lifetime_energy_kwh
# Calculate lvl fixed costs for residential L1, L2 charging
try:
lvl_fixed_costs_l1 = (fixed_costs['L1']['equipment'] + fixed_costs['L1']['installation'] \
+ fixed_costs['L1']['lifetime_evse_maint']) / lifetime_residential_energy_kwh
except:
lvl_fixed_costs_l1 = 0
lvl_fixed_costs_l2 = (fixed_costs['L2']['equipment'] + fixed_costs['L2']['installation'] \
+ fixed_costs['L2']['lifetime_evse_maint']) / lifetime_residential_energy_kwh
# Calculate single lvl fixed cost for residential charging
lvl_fixed_costs_res = lvl_fixed_costs_l1 * fraction_home_l1_charging + lvl_fixed_costs_l2 * (1-fraction_home_l1_charging)
# Calculate state-level residential LCOC, write to file
df['lcoc_cost_per_kwh'] = df['cost_per_kwh'] + lvl_fixed_costs_res
df = df[['state', 'lcoc_cost_per_kwh']]
df.to_csv(outfile, index=False)
nat_lcoc = round(float(df[df.state=='US']['lcoc_cost_per_kwh']), 2)
print('LCOC calculation complete, national LCOC (residential) is ${}/kWh'.format(nat_lcoc))
###########################
### Workplace/Public L2 ###
###########################
def calculate_state_workplace_public_l2_lcoc(coe_path = config.EIA_COM_PATH,
fixed_costs_file = 'data/fixed-costs/workplace-public-l2/com_level2.txt',
equip_lifespan = 15,
equip_utilization_kwh_per_day = 30, #source: INL
outpath = 'outputs/cost-of-charging/workplace-public-l2/work_pub_l2_states_baseline.csv'):
"""
Function calculates the state-level workplace/public-L2 levelized cost of charging, taking
into account the average cost of electricity, fixed costs, and equipment
maintenance.
"""
# Load data
df = pd.read_csv(coe_path)
fixed_cost_dict = {}
with open(fixed_costs_file) as f:
for line in f:
key, val = line.split(':')
fixed_cost_dict[key] = float(val)
ann_maint_cost = 0.01 * fixed_cost_dict['equipment']
lifetime_maint_cost = ann_maint_cost * equip_lifespan
fixed_cost_dict['lifetime_evse_maint'] = lifetime_maint_cost
# Calculate lifetime energy output
lifetime_evse_energy_kwh = equip_lifespan * 365 * equip_utilization_kwh_per_day
# Calculate lvl fixed costs for commercial charging
lvl_fixed_costs = (fixed_cost_dict['equipment'] + fixed_cost_dict['installation'] \
+ fixed_cost_dict['lifetime_evse_maint']) / lifetime_evse_energy_kwh
# Calculate state-level workplace/public-L2 LCOC, write to file
df['cost'] = df['cost'] / 100
df['lcoc_cost_per_kwh'] = df['cost'] + lvl_fixed_costs
df.rename(columns={'description': 'state'}, inplace=True)
df = df[['state', 'lcoc_cost_per_kwh']]
df.to_csv(outpath, index=False)
nat_lcoc = round(float(df[df.state=='US']['lcoc_cost_per_kwh']), 2)
print('LCOC calculation complete, national LCOC (workplace/pub-L2) is ${}/kWh'.format(nat_lcoc))
####################
### DCFC Station ###
####################
def dcfc_rates_to_utils(urdb_rates_files = config.DCFC_PROFILES_DICT,
outpath = 'outputs/cost-of-electricity/urdb-dcfc-utilities/'):
"""
Aggregates dcfc urdb rates in urdb_rates_files by utility, keeping the minimum
cost of electricity value.
"""
for prof in urdb_rates_files.keys():
rates_df = pd.read_csv(urdb_rates_files[prof], low_memory=False)
cost_col = "{}_lvl_cost_per_kwh".format(prof)
rates_df = rates_df[['eiaid', cost_col]]
utils_df = rates_df.groupby('eiaid')[cost_col].min().reset_index()
outfile = outpath + 'dcfc_utils_{}.csv'.format(prof)
utils_df.to_csv(outfile, index=False)
print('Utility-level results generated for {}.'.format(prof))
def dcfc_utils_to_county(urdb_util_files = {'p1':'outputs/cost-of-electricity/urdb-dcfc-utilities/dcfc_utils_p1.csv',
'p2':'outputs/cost-of-electricity/urdb-dcfc-utilities/dcfc_utils_p2.csv',
'p3':'outputs/cost-of-electricity/urdb-dcfc-utilities/dcfc_utils_p3.csv',
'p4':'outputs/cost-of-electricity/urdb-dcfc-utilities/dcfc_utils_p4.csv'},
eia_territory_file = config.EIAID_TO_COUNTY_CW_PATH,
outpath = 'outputs/cost-of-electricity/urdb-dcfc-counties/'):
"""
Joins DCFC cost of electricity for station profiles in urdb_util_files to eia_territory
file.
"""
eiaid_territories = pd.read_csv(eia_territory_file)
eiaid_territories = eiaid_territories[['eiaid', 'state', 'county']]
for prof in urdb_util_files.keys():
utils_df = pd.read_csv(urdb_util_files[prof], low_memory=False)
county_df = eiaid_territories.merge(utils_df, on='eiaid', how='left')
cost_col = "{}_lvl_cost_per_kwh".format(prof)
county_df = county_df.groupby(['state', 'county'])[cost_col].median().reset_index()
#For counties w/ no utilities in URDB, assign median cost of electricity
median_coe = county_df[cost_col].median()
county_df = county_df.fillna(median_coe)
outfile = outpath + 'dcfc_counties_{}.csv'.format(prof)
county_df.to_csv(outfile, index=False)
print("County-level results generated for {}.".format(prof))
def dcfc_county_to_state(urdb_county_files = {'p1': 'outputs/cost-of-electricity/urdb-dcfc-counties/dcfc_counties_p1.csv',
'p2': 'outputs/cost-of-electricity/urdb-dcfc-counties/dcfc_counties_p2.csv',
'p3': 'outputs/cost-of-electricity/urdb-dcfc-counties/dcfc_counties_p3.csv',
'p4': 'outputs/cost-of-electricity/urdb-dcfc-counties/dcfc_counties_p4.csv'},
afdc_counties_file = 'outputs/county-dcfc-counts/afdc_county_station_counts.csv',
outpath = 'outputs/cost-of-electricity/dcfc-states/'):
"""
Function calculates state-level cost of electricity for profiles in urdb_county_files. Cost is
weighted by the number of DCFC stations present within the county (AFDC).
"""
afdc_df = pd.read_csv(afdc_counties_file)
afdc_df.rename(columns={'county_name': 'county'}, inplace=True)
afdc_df = afdc_df[['state', 'county', 'n_dcfc_stations']]
for prof in urdb_county_files.keys():
dcfc_county_df = pd.read_csv(urdb_county_files[prof], low_memory=False)
dcfc_county_df = dcfc_county_df.merge(afdc_df, on=['state', 'county'], how='left')
dcfc_county_df = dcfc_county_df.fillna(0)
states, dcfc_stations, coe = [], [], []
for state in set(dcfc_county_df['state']):
state_df = dcfc_county_df[dcfc_county_df['state']==state]
stations = state_df['n_dcfc_stations'].sum()
cost_col = "{}_lvl_cost_per_kwh".format(prof)
if stations > 0:
cost = (state_df[cost_col] * state_df['n_dcfc_stations']).sum()/stations
else:
cost = state_df[cost_col].mean()
states.append(state)
dcfc_stations.append(stations)
coe.append(cost)
state_df = pd.DataFrame({'state': states,
'n_dcfc_stations': dcfc_stations,
cost_col: coe})
# Add US row
total_us_stations = state_df['n_dcfc_stations'].sum()
nat_coe = ((state_df[cost_col] * state_df['n_dcfc_stations']) / total_us_stations).sum()
nat_df = pd.DataFrame({'state': ['US'],
'n_dcfc_stations': [total_us_stations],
cost_col: [nat_coe]})
state_df = pd.concat([state_df, nat_df]).reset_index(drop=True)
outfile = outpath + 'dcfc_states_{}.csv'.format(prof)
state_df.to_csv(outfile, index=False)
print("State-level results generated for {}.".format(prof))
def combine_dcfc_profiles_into_single_lcoc(dcfc_lcoc_file = 'outputs/cost-of-charging/dcfc/dcfc_states_baseline.csv',
load_profile_path = config.DCFC_PROFILES_DICT,
afdc_path = config.AFDC_PATH):
"""
Adds 'comb_lcoc' field to dcfc_lcoc_file that is the weighted average of each station profile lcoc. Weighting is by
load (total annual power) and how common stations of a similar size are in the real world (using AFDC station locations).
"""
df =
|
pd.read_csv(dcfc_lcoc_file)
|
pandas.read_csv
|
"""
The main module for Atomic pattern dictionary, jjoiningthe atlas estimation
and computing the encoding / weights
Copyright (C) 2015-2020 <NAME> <<EMAIL>>
"""
from __future__ import absolute_import
import logging
import os
import time
# to suppress all visual, has to be on the beginning
import matplotlib
if os.environ.get('DISPLAY', '') == '' and matplotlib.rcParams['backend'] != 'agg':
print('No display found. Using non-interactive Agg backend.')
# https://matplotlib.org/faq/usage_faq.html
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import skimage.segmentation as sk_image
# using https://github.com/Borda/pyGCO
from gco import cut_general_graph, cut_grid_graph_simple
from skimage import filters
from bpdl.data_utils import export_image
from bpdl.metric_similarity import compare_atlas_adjusted_rand
from bpdl.pattern_atlas import (
atlas_split_indep_ptn,
compute_positive_cost_images_weights,
compute_relative_penalty_images_weights,
edges_in_image2d_plane,
init_atlas_mosaic,
reinit_atlas_likely_patterns,
)
from bpdl.pattern_weights import weights_image_atlas_overlap_major, weights_image_atlas_overlap_partial
from bpdl.registration import register_images_to_atlas_demons
NB_GRAPH_CUT_ITER = 5
TEMPLATE_NAME_ATLAS = 'BPDL_{}_{}_iter_{:04d}'
LIST_BPDL_STEPS = [
'weights update',
'reinit. atlas',
'atlas update',
'deform images',
]
# TRY: init: spatial clustering
# TRY: init: use ICA
# TRY: init: greedy
def estimate_atlas_graphcut_simple(imgs, ptn_weights, coef=1.):
""" run the graphcut to estimate atlas from computed unary terms
source: https://github.com/yujiali/pyGCO
:param list(ndarray) imgs: list of input binary images [np.array<height, width>]
:param ndarray ptn_weights: binary ptn selection np.array<nb_imgs, nb_lbs>
:param float coef: coefficient for graphcut
:return list(int):
>>> atlas = np.zeros((8, 12), dtype=int)
>>> atlas[:3, 1:5] = 1
>>> atlas[3:7, 6:12] = 2
>>> luts = np.array([[0, 1, 0]] * 3 + [[0, 0, 1]] * 3 + [[0, 1, 1]] * 3)
>>> imgs = [lut[atlas] for lut in luts]
>>> estimate_atlas_graphcut_simple(imgs, luts[:, 1:]).astype(int)
array([[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> np.sum(abs(estimate_atlas_graphcut_simple(imgs, luts[:, :1]).astype(int)))
0
"""
logging.debug('estimate atlas via GraphCut from Potts model')
if ptn_weights.shape[1] <= 1:
logging.warning('nothing to do for single label')
labels = np.zeros(imgs[0].shape)
return labels
labeling_sum = compute_positive_cost_images_weights(imgs, ptn_weights)
unary_cost = np.array(-1 * labeling_sum, dtype=np.int32)
logging.debug('graph unaries potentials %r: \n %r', unary_cost.shape, list(zip(np.histogram(unary_cost, bins=10))))
# original and the right way..
pairwise = (1 - np.eye(labeling_sum.shape[-1])) * coef
pairwise_cost = np.array(pairwise, dtype=np.int32)
logging.debug('graph pairwise coefs %r', pairwise_cost.shape)
# run GraphCut
try:
labels = cut_grid_graph_simple(unary_cost, pairwise_cost, algorithm='expansion')
except Exception:
logging.exception('cut_grid_graph_simple')
labels = np.argmin(unary_cost, axis=1)
# reshape labels
labels = labels.reshape(labeling_sum.shape[:2])
logging.debug('resulting labelling %r: \n %r', labels.shape, labels)
return labels
def estimate_atlas_graphcut_general(imgs, ptn_weights, coef=0., init_atlas=None, connect_diag=False):
""" run the graphcut on the unary costs with specific pairwise cost
source: https://github.com/yujiali/pyGCO
:param list(ndarray) imgs: list of np.array<height, width> input binary images
:param ndarray ptn_weights: np.array<nb_imgs, nb_lbs> binary ptn selection
:param float coef: coefficient for graphcut
:param ndarray init_atlas: init labeling np.array<nb_seg, 1>
while None it take the arg ming of the unary costs
:param bool connect_diag: used connecting diagonals, like use 8- instead 4-neighbour
:return ndarray: np.array<nb_seg, 1>
>>> atlas = np.zeros((8, 12), dtype=int)
>>> atlas[:3, 1:5] = 1
>>> atlas[3:7, 6:12] = 2
>>> luts = np.array([[0, 1, 0]] * 3 + [[0, 0, 1]] * 3 + [[0, 1, 1]] * 3)
>>> imgs = [lut[atlas] for lut in luts]
>>> estimate_atlas_graphcut_general(imgs, luts[:, 1:]).astype(int)
array([[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> np.sum(abs(estimate_atlas_graphcut_general(imgs, luts[:, :1]).astype(int)))
0
"""
logging.debug('estimate atlas via GraphCut from Potts model')
if ptn_weights.shape[1] <= 1:
logging.warning('nothing to do for single label')
labels = np.zeros(imgs[0].shape)
return labels
u_cost = compute_relative_penalty_images_weights(imgs, ptn_weights)
# u_cost = 1. / (labelingSum +1)
unary_cost = np.array(u_cost, dtype=np.float64)
unary_cost = unary_cost.reshape(-1, u_cost.shape[-1])
logging.debug('graph unaries potentials %r: \n %r', unary_cost.shape, list(zip(np.histogram(unary_cost, bins=10))))
edges, edge_weights = edges_in_image2d_plane(u_cost.shape[:-1], connect_diag)
# original and the right way...
pairwise = (1 - np.eye(u_cost.shape[-1])) * coef
pairwise_cost = np.array(pairwise, dtype=np.float64)
logging.debug('graph pairwise coefs %r', pairwise_cost.shape)
if init_atlas is None:
init_labels = np.argmin(unary_cost, axis=1)
else:
init_labels = init_atlas.ravel()
logging.debug('graph initial labels of shape %r', init_labels.shape)
# run GraphCut
try:
labels = cut_general_graph(
edges,
edge_weights,
unary_cost,
pairwise_cost,
algorithm='expansion',
init_labels=init_labels,
n_iter=NB_GRAPH_CUT_ITER
)
except Exception:
logging.exception('cut_general_graph')
labels = np.argmin(unary_cost, axis=1)
# reshape labels
labels = labels.reshape(u_cost.shape[:2])
logging.debug('resulting labelling %r of %r', labels.shape, np.unique(labels).tolist())
return labels
def export_visualization_image(img, idx, out_dir, prefix='debug', name='', ration=None, labels=('', '')):
""" export visualisation as an image with some special desc.
:param ndarray img: np.array<height, width>
:param int idx: iteration to be shown in the img name
:param str out_dir: path to the resulting folder
:param str prefix:
:param str name: name of this particular visual
:param str ration: mainly for weights to ne stretched
:param tuple(str,str) labels: labels for axis
CRASH: TclError: no display name and no $DISPLAY environment variable
>>> img = np.random.random((50, 50))
>>> path_fig = export_visualization_image(img, 0, '.')
>>> os.path.exists(path_fig)
True
>>> os.remove(path_fig)
"""
# plt.ioff()
fig, ax = plt.subplots()
ax.imshow(img, interpolation='none', aspect=ration)
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
name_fig = TEMPLATE_NAME_ATLAS.format(prefix, name, idx)
path_fig = os.path.join(out_dir, name_fig + '.png')
logging.debug('.. export Visualization as "%s"', path_fig)
fig.savefig(path_fig, bbox_inches='tight', pad_inches=0.05)
plt.close(fig)
return path_fig
def export_visual_atlas(i, out_dir, atlas=None, prefix='debug'):
""" export the atlas and/or weights to results directory
:param int i: iteration to be shown in the img name
:param str out_dir: path to the resulting folder
:param ndarray atlas: np.array<height, width>
:param str prefix:
>>> import shutil
>>> logging.getLogger().setLevel(logging.DEBUG)
>>> dir_name = 'sample_dir'
>>> os.mkdir(dir_name)
>>> export_visual_atlas(0, dir_name, np.random.randint(0, 5, (10, 5)))
>>> shutil.rmtree(dir_name, ignore_errors=True)
"""
if logging.getLogger().getEffectiveLevel() < logging.DEBUG:
return
if out_dir is None or not os.path.exists(out_dir):
logging.debug('results path "%s" does not exist', out_dir)
return None
if atlas is not None:
# export_visualization_image(atlas, i, out_dir, prefix, 'atlas',
# labels=['X', 'Y'])
n_img = TEMPLATE_NAME_ATLAS.format(prefix, 'atlas', i)
export_image(out_dir, atlas, n_img)
# if weights is not None:
# export_visualization_image(weights, i, out_dir, prefix, 'weights',
# 'auto', ['patterns', 'images'])
def bpdl_initialisation(imgs, init_atlas, init_weights, out_dir, out_prefix, rand_seed=None):
""" more complex initialisation depending on inputs
:param list(ndarray) imgs: list of np.array<height, width>
:param ndarray init_atlas: np.array<height, width>
:param ndarray init_weights: np.array<nb_imgs, nb_lbs>
:param str out_prefix:
:param str out_dir: path to the results directory
:param rand_seed: random initialization
:return tuple(ndarray,ndarray): np.array<height, width>, np.array<nb_imgs, nb_lbs>
>>> atlas = np.zeros((8, 12), dtype=int)
>>> atlas[:3, 1:5] = 1
>>> atlas[3:7, 6:12] = 2
>>> luts = np.array([[0, 1, 0]] * 3 + [[0, 0, 1]] * 3 + [[0, 1, 1]] * 3)
>>> imgs = [lut[atlas] for lut in luts]
>>> w_bins = luts[:, 1:]
>>> init_atlas, init_w_bins = bpdl_initialisation(imgs, init_atlas=None,
... init_weights=w_bins, out_dir=None, out_prefix='', rand_seed=0)
>>> init_atlas.astype(int)
array([[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> init_w_bins
array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[0, 1],
[1, 1],
[1, 1],
[1, 1]])
>>> init_atlas, init_w_bins = bpdl_initialisation(imgs, init_atlas=None,
... init_weights=None, out_dir=None, out_prefix='', rand_seed=0)
>>> init_atlas
array([[3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1],
[3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1],
[3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1],
[1, 1, 1, 1, 3, 3, 3, 3, 2, 2, 2, 2],
[1, 1, 1, 1, 3, 3, 3, 3, 2, 2, 2, 2],
[1, 1, 1, 1, 3, 3, 3, 3, 2, 2, 2, 2],
[1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3],
[1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]])
>>> init_w_bins
"""
if init_weights is not None and init_atlas is None:
logging.debug('... initialise Atlas from w_bins')
init_atlas = estimate_atlas_graphcut_general(imgs, init_weights, 0.)
# export_visual_atlas(0, out_dir, init_atlas, out_prefix)
if init_atlas is None:
nb_patterns = int(np.sqrt(len(imgs)))
logging.debug('... initialise Atlas with ')
# IDEA: find better way of initialisation
init_atlas = init_atlas_mosaic(imgs[0].shape, nb_patterns, rand_seed=rand_seed)
# export_visual_atlas(0, out_dir, init_atlas, out_prefix)
atlas = init_atlas
w_bins = init_weights
if len(np.unique(atlas)) == 1:
logging.error('the init. atlas does not contain any label... %r', np.unique(atlas))
export_visual_atlas(0, out_dir, atlas, out_prefix)
return atlas, w_bins
def bpdl_update_weights(imgs, atlas, overlap_major=False):
""" single iteration of the block coordinate descent algo
:param list(ndarray) imgs: list of images np.array<height, width>
:param ndarray atlas: used atlas of np.array<height, width>
:param bool overlap_major: whether it has majority overlap the pattern
:return ndarray: np.array<nb_imgs, nb_lbs>
>>> atlas = np.zeros((8, 12), dtype=int)
>>> atlas[:3, 1:5] = 1
>>> atlas[3:7, 6:12] = 2
>>> luts = np.array([[0, 1, 0]] * 3 + [[0, 0, 1]] * 3 + [[0, 1, 1]] * 3)
>>> imgs = [lut[atlas] for lut in luts]
>>> bpdl_update_weights(imgs, atlas)
array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[0, 1],
[1, 1],
[1, 1],
[1, 1]])
>>> bpdl_update_weights(imgs, atlas, overlap_major=True)
array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[0, 1],
[1, 1],
[1, 1],
[1, 1]])
"""
# update w_bins
logging.debug('... perform pattern weights')
fn_weights_ = weights_image_atlas_overlap_major if overlap_major else weights_image_atlas_overlap_partial
w_bins = [fn_weights_(img, atlas) for img in imgs]
# add once for patterns that are not used at all
# w_bins = ptn_weight.fill_empty_patterns(np.array(w_bins))
return np.array(w_bins)
def bpdl_update_atlas(imgs, atlas, w_bins, label_max, gc_coef, gc_reinit, ptn_compact, connect_diag=False):
""" single iteration of the block coordinate descent algo
:param list(ndarray) imgs: list of images np.array<height, width>
:param ndarray atlas: used atlas of np.array<height, width>
:param ndarray w_bins: weights np.array<nb_imgs, nb_lbs>
:param int label_max: max number of used labels
:param float gc_coef: graph cut regularisation
:param bool gc_reinit: weather use atlas from previous step as init for act.
:param bool ptn_compact: split individial patterns
:param bool connect_diag: used connecting diagonals, like use 8- instead 4-neighbour
:return ndarray: np.array<height, width>
>>> atlas = np.zeros((8, 12), dtype=int)
>>> atlas[:3, 1:5] = 1
>>> atlas[3:7, 6:12] = 2
>>> luts = np.array([[0, 1, 0]] * 3 + [[0, 0, 1]] * 3 + [[0, 1, 1]] * 3)
>>> imgs = [lut[atlas] for lut in luts]
>>> bpdl_update_atlas(imgs, atlas, luts[:, 1:], 2, gc_coef=0.,
... gc_reinit=False, ptn_compact=False)
array([[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)
"""
if np.sum(w_bins) == 0:
logging.warning('the w_bins is empty... %r', np.unique(atlas))
w_bins = np.array(w_bins)
logging.debug('... perform Atlas estimation')
if gc_reinit:
atlas_new = estimate_atlas_graphcut_general(imgs, w_bins, gc_coef, atlas, connect_diag=connect_diag)
else:
atlas_new = estimate_atlas_graphcut_general(imgs, w_bins, gc_coef, connect_diag=connect_diag)
if ptn_compact:
atlas_new = atlas_split_indep_ptn(atlas_new, label_max)
atlas_new = np.remainder(atlas_new, label_max + 1)
return atlas_new
def bpdl_deform_images(images, atlas, weights, deform_coef, inverse=False):
if deform_coef is None or deform_coef < 0:
return images, None
# coef = deform_coef * np.sqrt(np.product(images.shape))
smooth_coef = deform_coef * min(images[0].shape)
logging.debug('... perform register images onto atlas with smooth_coef: %f', smooth_coef)
images_warped, deforms = register_images_to_atlas_demons(images, atlas, weights, smooth_coef, inverse=inverse)
return images_warped, deforms
def bpdl_pipeline(
images,
init_atlas=None,
init_weights=None,
gc_regul=0.0,
tol=1e-3,
max_iter=25,
gc_reinit=True,
ptn_compact=True,
overlap_major=False,
connect_diag=False,
deform_coef=None,
out_prefix='debug',
out_dir=''
):
""" the experiments_synthetic pipeline for block coordinate descent
algo with graphcut...
:param float deform_coef: regularise the deformation
:param list(ndarray) images: list of images np.array<height, width>
:param ndarray init_atlas: used atlas of np.array<height, width>
:param ndarray init_weights: weights np.array<nb_imgs, nb_lbs>
:param float gc_regul: graph cut regularisation
:param float tol: stop if the diff between two conseq steps
is less then this given threshold. eg for -1 never until max nb iters
:param int max_iter: max namber of iteration
:param bool gc_reinit: whether use atlas from previous step as init for act.
:param bool ptn_compact: enforce compactness of patterns
(split the connected components)
:param bool overlap_major: whether it has majority overlap the pattern
:param bool connect_diag: used connecting diagonals, like use 8- instead 4-neighbour
:param str out_dir: path to the results directory
:param str out_prefix:
:return tuple(ndarray,ndarray): np.array<height, width>, np.array<nb_imgs, nb_lbs>
>>> import shutil
>>> logging.getLogger().setLevel(logging.DEBUG)
>>> atlas = np.zeros((8, 12), dtype=int)
>>> atlas[:3, 1:5] = 1
>>> atlas[3:7, 6:12] = 2
>>> luts = np.array([[0, 1, 0]] * 3 + [[0, 0, 1]] * 3 + [[0, 1, 1]] * 3)
>>> images = [lut[atlas] for lut in luts]
>>> w_bins = luts[:, 1:]
>>> init_atlas = init_atlas_mosaic(atlas.shape, nb_patterns=2,
... coef=1.5, rand_seed=0)
>>> init_atlas
array([[1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
[1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1]])
>>> bpdl_atlas, bpdl_w_bins, deforms = bpdl_pipeline(images, init_atlas,
... out_dir='temp_export')
>>> shutil.rmtree('temp_export', ignore_errors=True)
>>> bpdl_atlas
array([[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> bpdl_w_bins
array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[0, 1],
[1, 1],
[1, 1],
[1, 1]])
>>> bpdl_atlas, bpdl_w_bins, deforms = bpdl_pipeline(images, init_atlas,
... deform_coef=1)
>>> bpdl_atlas
array([[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
logging.debug('compute an Atlas and weights for %i images...', len(images))
assert len(images) >= 0, 'missing input images'
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
if len(out_dir) > 0 and not os.path.exists(out_dir):
os.mkdir(out_dir)
# initialise
label_max = np.max(init_atlas)
assert label_max > 0, 'at least some patterns should be searched'
logging.debug('max nb labels set: %i', label_max)
atlas, w_bins = bpdl_initialisation(images, init_atlas, init_weights, out_dir, out_prefix)
list_diff = []
list_times = []
imgs_warped = images
deforms = None
max_iter = max(1, max_iter) # set at least single iteration
for it in range(max_iter):
if len(np.unique(atlas)) == 1:
logging.warning('.. iter: %i, no labels in the atlas %r', it, np.unique(atlas).tolist())
times = [time.time()]
# 1: update WEIGHTS
w_bins = bpdl_update_weights(imgs_warped, atlas, overlap_major)
times.append(time.time())
# 2: reinitialise empty patterns
atlas_reinit, w_bins = reinit_atlas_likely_patterns(imgs_warped, w_bins, atlas, label_max, ptn_compact)
times.append(time.time())
# 3: update the ATLAS
atlas_new = bpdl_update_atlas(
imgs_warped, atlas_reinit, w_bins, label_max, gc_regul, gc_reinit, ptn_compact, connect_diag
)
times.append(time.time())
# 4: optional deformations
if it > 0:
imgs_warped, deforms = bpdl_deform_images(images, atlas_new, w_bins, deform_coef)
times.append(time.time())
times = [times[i] - times[i - 1] for i in range(1, len(times))]
d_times = dict(zip(LIST_BPDL_STEPS[:len(times)], times))
step_diff = compare_atlas_adjusted_rand(atlas, atlas_new)
# step_diff = np.sum(abs(atlas - atlas_new)) / float(np.product(atlas.shape))
list_diff.append(step_diff)
list_times.append(d_times)
atlas = sk_image.relabel_sequential(atlas_new)[0]
logging.debug('-> iter. #%i with Atlas diff %f', (it + 1), step_diff)
export_visual_atlas(it + 1, out_dir, atlas, out_prefix)
# STOPPING criterion
if step_diff <= tol and len(np.unique(atlas)) > 1:
logging.debug('>> exit while the atlas diff %f is smaller then %f', step_diff, tol)
break
# TODO: force set background for to small components
imgs_warped, deforms = bpdl_deform_images(images, atlas, w_bins, deform_coef)
w_bins = [weights_image_atlas_overlap_major(img, atlas) for img in imgs_warped]
logging.debug(
'BPDL: terminated with iter %i / %i and step diff %f <? %f', len(list_diff), max_iter, list_diff[-1], tol
)
logging.debug('criterion evolved:\n %r', list_diff)
df_time =
|
pd.DataFrame(list_times)
|
pandas.DataFrame
|
# coding=utf-8
# !/usr/bin/env python3
import os, re
import numpy as np
import pandas as pd
def svLen(sv_data):
data_grab = re.compile("^.*SVLEN=(?P<sv_len>-?[0-9]+).*$")
if 'SVLEN' in str(sv_data['INFO'].iloc[0]):
data_info = data_grab.search(sv_data['INFO'].iloc[0]).groupdict()
sv_len = data_info['sv_len']
else:
# if the sv_type is not DEL, INS, DUP or INV, we prefer to preserve it thus default sv_len 51 (>50).
sv_len = 51
return int(sv_len)
def svType(sv_data):
data_grab = re.compile("^.*SVTYPE=(?P<sv_type>[a-zA-Z]+).*$")
if 'SVTYPE' in str(sv_data['INFO'].iloc[0]):
data_info = data_grab.search(sv_data['INFO'].iloc[0]).groupdict()
sv_type = data_info['sv_type']
else:
sv_type = 'None'
return sv_type
def readvcf(file_name):
count_num = 0
with open(file_name,'r') as f1:
for row in f1:
if '#' in row:
count_num = count_num + 1
# print(count_num)
rawData = pd.read_csv(file_name,skiprows=count_num-1,sep='\t')
rawData = rawData.set_index('#CHROM')
rawData.index.name = 'CHROM'
# print(rawData.loc['chr1'])
return rawData
def typeCalculate(file_name):
if 'vcf' in file_name:
sv_data = readvcf(file_name)
else:
sv_data = pd.read_csv(file_name)
# print(sv_data)
# dnsv_filter_data =pd.DataFrame(columns=dnsv_data.columns)
sv_type_list = []
for i in range(sv_data.shape[0]):
print(i)
# sv_len =svLen(sv_data.iloc[[i]])
# if sv_len>10000:
sv_type = svType(sv_data.iloc[[i]])
sv_type_list.append(sv_type)
sv_type_list = pd.Series(sv_type_list)
print(sv_type_list.value_counts())
return
def process_bar(i):
num = i // 2
if i == 100:
process = "\r[%3s%%]: |%-50s|\n" % (i, '|' * num)
else:
process = "\r[%3s%%]: |%-50s|" % (i, '|' * num)
print(process, end='', flush=True)
def calcultateImprecise(file_name):
data = pd.read_csv(file_name)
imprecise_ins = pd.DataFrame(columns=data.columns)
imprecise_del = pd.DataFrame(columns=data.columns)
imprecise = pd.DataFrame(columns=data.columns)
process_count = 0; process_path = data.shape[0]/100
for i in range(data.shape[0]):
if i >= process_path * process_count:
process_bar(process_count+1)
process_count = process_count + 1
sv_type =svType(data.iloc[[i]])
if 'IMPRECISE' in data['INFO'].iloc[i]:
imprecise = pd.concat([imprecise, data.iloc[[i]]])
if sv_type == 'INS':
imprecise_ins = pd.concat([imprecise_ins, data.iloc[[i]]])
elif sv_type == 'DEL':
imprecise_del = pd.concat([imprecise_del , data.iloc[[i]]])
print('ins',imprecise_ins)
print('del',imprecise_del)
print('all',imprecise)
# deimprecise.to_csv(out_dir,index=None)
return
def filterImprecise(file_name,out_dir):
data = pd.read_csv(file_name)
deimprecise_ins = pd.DataFrame(columns=data.columns)
deimprecise_del = pd.DataFrame(columns=data.columns)
deimprecise = pd.DataFrame(columns=data.columns)
process_count = 0; process_path = data.shape[0]/100
for i in range(data.shape[0]):
if i >= process_path * process_count:
process_bar(process_count+1)
process_count = process_count + 1
sv_type =svType(data.iloc[[i]])
if 'IMPRECISE' not in data['INFO'].iloc[i]:
deimprecise = pd.concat([deimprecise, data.iloc[[i]]])
if sv_type == 'INS':
deimprecise_ins = pd.concat([deimprecise_ins, data.iloc[[i]]])
elif sv_type == 'DEL':
deimprecise_del = pd.concat([deimprecise_del , data.iloc[[i]]])
print('ins',deimprecise_ins)
print('del',deimprecise_del)
deimprecise.to_csv(out_dir,index=None)
return
def sizeChromStatistics(certain_type_data):
# print(certain_type_data)
# print(certain_type_data['CHROM'].value_counts())
statistics_total = certain_type_data.shape[0]
statistics_100bp = 0
statistics_100bp_300bp = 0
statistics_300bp_1kb = 0
statistics_1kb = 0
for i in range(certain_type_data.shape[0]):
sv_len = abs(svLen(certain_type_data.iloc[[i]]))
if sv_len < 100:
statistics_100bp = statistics_100bp + 1
elif 100<=sv_len<300:
statistics_100bp_300bp = statistics_100bp_300bp + 1
elif 300<=sv_len<1000:
statistics_300bp_1kb = statistics_300bp_1kb + 1
elif sv_len>=1000:
statistics_1kb = statistics_1kb + 1
#chr1 to chr22
chrom_part = []
for i in range(1,23):
if 'chr'+str(i) in certain_type_data.index:
chrom_part.append(certain_type_data.index.value_counts()['chr'+str(i)])
else:
chrom_part.append(0)
#chrX chrY & Other Chroms
if 'chrX' in certain_type_data.index:
chrom_part.append(certain_type_data.index.value_counts()['chrX'])
else:
chrom_part.append(0)
if 'chrY' in certain_type_data.index:
chrom_part.append(certain_type_data.index.value_counts()['chrY'])
else:
chrom_part.append(0)
chrom_part.append(statistics_total-sum(chrom_part))
statistics_list = [statistics_total,statistics_100bp,statistics_100bp_300bp,statistics_300bp_1kb,statistics_1kb]
statistics_list.extend(chrom_part)
return statistics_list
def simpleStatistics(file_name,out_dir=None):
if 'vcf' in file_name:
sv_data = readvcf(file_name)
elif '.' in file_name:
sv_data = pd.read_csv(file_name,index_col='CHROM')
else:
sv_data =file_name
INS_data = pd.DataFrame(columns=sv_data.columns)
DEL_data =
|
pd.DataFrame(columns=sv_data.columns)
|
pandas.DataFrame
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
from mars import opcodes
from mars.config import options, option_context
from mars.core import OutputType, tile
from mars.core.operand import OperandStage
from mars.dataframe import eval as mars_eval, cut, to_numeric
from mars.dataframe.base import to_gpu, to_cpu, astype
from mars.dataframe.core import DATAFRAME_TYPE, SERIES_TYPE, SERIES_CHUNK_TYPE, \
INDEX_TYPE, CATEGORICAL_TYPE, CATEGORICAL_CHUNK_TYPE
from mars.dataframe.datasource.dataframe import from_pandas as from_pandas_df
from mars.dataframe.datasource.series import from_pandas as from_pandas_series
from mars.dataframe.datasource.index import from_pandas as from_pandas_index
from mars.tensor.core import TENSOR_TYPE
def test_to_gpu():
# test dataframe
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
cdf = to_gpu(df)
assert df.index_value == cdf.index_value
assert df.columns_value == cdf.columns_value
assert cdf.op.gpu is True
pd.testing.assert_series_equal(df.dtypes, cdf.dtypes)
df, cdf = tile(df, cdf)
assert df.nsplits == cdf.nsplits
assert df.chunks[0].index_value == cdf.chunks[0].index_value
assert df.chunks[0].columns_value == cdf.chunks[0].columns_value
assert cdf.chunks[0].op.gpu is True
pd.testing.assert_series_equal(df.chunks[0].dtypes, cdf.chunks[0].dtypes)
assert cdf is to_gpu(cdf)
# test series
sdata = data.iloc[:, 0]
series = from_pandas_series(sdata)
cseries = to_gpu(series)
assert series.index_value == cseries.index_value
assert cseries.op.gpu is True
series, cseries = tile(series, cseries)
assert series.nsplits == cseries.nsplits
assert series.chunks[0].index_value == cseries.chunks[0].index_value
assert cseries.chunks[0].op.gpu is True
assert cseries is to_gpu(cseries)
def test_to_cpu():
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
cdf = to_gpu(df)
df2 = to_cpu(cdf)
assert df.index_value == df2.index_value
assert df.columns_value == df2.columns_value
assert df2.op.gpu is False
pd.testing.assert_series_equal(df.dtypes, df2.dtypes)
df, df2 = tile(df, df2)
assert df.nsplits == df2.nsplits
assert df.chunks[0].index_value == df2.chunks[0].index_value
assert df.chunks[0].columns_value == df2.chunks[0].columns_value
assert df2.chunks[0].op.gpu is False
pd.testing.assert_series_equal(df.chunks[0].dtypes, df2.chunks[0].dtypes)
assert df2 is to_cpu(df2)
def test_rechunk():
raw = pd.DataFrame(np.random.rand(10, 10))
df = from_pandas_df(raw, chunk_size=3)
df2 = tile(df.rechunk(4))
assert df2.shape == (10, 10)
assert len(df2.chunks) == 9
assert df2.chunks[0].shape == (4, 4)
pd.testing.assert_index_equal(df2.chunks[0].index_value.to_pandas(), pd.RangeIndex(4))
pd.testing.assert_index_equal(df2.chunks[0].columns_value.to_pandas(), pd.RangeIndex(4))
pd.testing.assert_series_equal(df2.chunks[0].dtypes, raw.dtypes[:4])
assert df2.chunks[2].shape == (4, 2)
pd.testing.assert_index_equal(df2.chunks[2].index_value.to_pandas(), pd.RangeIndex(4))
pd.testing.assert_index_equal(df2.chunks[2].columns_value.to_pandas(), pd.RangeIndex(8, 10))
pd.testing.assert_series_equal(df2.chunks[2].dtypes, raw.dtypes[-2:])
assert df2.chunks[-1].shape == (2, 2)
pd.testing.assert_index_equal(df2.chunks[-1].index_value.to_pandas(), pd.RangeIndex(8, 10))
pd.testing.assert_index_equal(df2.chunks[-1].columns_value.to_pandas(), pd.RangeIndex(8, 10))
pd.testing.assert_series_equal(df2.chunks[-1].dtypes, raw.dtypes[-2:])
for c in df2.chunks:
assert c.shape[1] == len(c.dtypes)
assert len(c.columns_value.to_pandas()) == len(c.dtypes)
columns = [np.random.bytes(10) for _ in range(10)]
index = np.random.randint(-100, 100, size=(4,))
raw = pd.DataFrame(np.random.rand(4, 10), index=index, columns=columns)
df = from_pandas_df(raw, chunk_size=3)
df2 = tile(df.rechunk(6))
assert df2.shape == (4, 10)
assert len(df2.chunks) == 2
assert df2.chunks[0].shape == (4, 6)
pd.testing.assert_index_equal(df2.chunks[0].index_value.to_pandas(), df.index_value.to_pandas())
pd.testing.assert_index_equal(df2.chunks[0].columns_value.to_pandas(), pd.Index(columns[:6]))
pd.testing.assert_series_equal(df2.chunks[0].dtypes, raw.dtypes[:6])
assert df2.chunks[1].shape == (4, 4)
pd.testing.assert_index_equal(df2.chunks[1].index_value.to_pandas(), df.index_value.to_pandas())
pd.testing.assert_index_equal(df2.chunks[1].columns_value.to_pandas(), pd.Index(columns[6:]))
pd.testing.assert_series_equal(df2.chunks[1].dtypes, raw.dtypes[-4:])
for c in df2.chunks:
assert c.shape[1] == len(c.dtypes)
assert len(c.columns_value.to_pandas()) == len(c.dtypes)
# test Series rechunk
series = from_pandas_series(pd.Series(np.random.rand(10,)), chunk_size=3)
series2 = tile(series.rechunk(4))
assert series2.shape == (10,)
assert len(series2.chunks) == 3
pd.testing.assert_index_equal(series2.index_value.to_pandas(), pd.RangeIndex(10))
assert series2.chunk_shape == (3,)
assert series2.nsplits == ((4, 4, 2), )
assert series2.chunks[0].shape == (4,)
pd.testing.assert_index_equal(series2.chunks[0].index_value.to_pandas(), pd.RangeIndex(4))
assert series2.chunks[1].shape == (4,)
pd.testing.assert_index_equal(series2.chunks[1].index_value.to_pandas(), pd.RangeIndex(4, 8))
assert series2.chunks[2].shape == (2,)
pd.testing.assert_index_equal(series2.chunks[2].index_value.to_pandas(), pd.RangeIndex(8, 10))
series2 = tile(series.rechunk(1))
assert series2.shape == (10,)
assert len(series2.chunks) == 10
pd.testing.assert_index_equal(series2.index_value.to_pandas(), pd.RangeIndex(10))
assert series2.chunk_shape == (10,)
assert series2.nsplits == ((1,) * 10, )
assert series2.chunks[0].shape == (1,)
pd.testing.assert_index_equal(series2.chunks[0].index_value.to_pandas(), pd.RangeIndex(1))
# no need to rechunk
series2 = tile(series.rechunk(3))
series = tile(series)
assert series2.chunk_shape == series.chunk_shape
assert series2.nsplits == series.nsplits
def test_data_frame_apply():
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
def df_func_with_err(v):
assert len(v) > 2
return v.sort_values()
with pytest.raises(TypeError):
df.apply(df_func_with_err)
r = df.apply(df_func_with_err, output_type='dataframe',
dtypes=df_raw.dtypes)
assert r.shape == (np.nan, df.shape[-1])
assert r.op._op_type_ == opcodes.APPLY
assert r.op.output_types[0] == OutputType.dataframe
assert r.op.elementwise is False
r = df.apply('ffill')
assert r.op._op_type_ == opcodes.FILL_NA
r = tile(df.apply(np.sqrt))
assert all(v == np.dtype('float64') for v in r.dtypes) is True
assert r.shape == df.shape
assert r.op._op_type_ == opcodes.APPLY
assert r.op.output_types[0] == OutputType.dataframe
assert r.op.elementwise is True
r = tile(df.apply(lambda x: pd.Series([1, 2])))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (np.nan, df.shape[1])
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (np.nan, 1)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
r = tile(df.apply(np.sum, axis='index'))
assert np.dtype('int64') == r.dtype
assert r.shape == (df.shape[1],)
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (20 // df.shape[0],)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
r = tile(df.apply(np.sum, axis='columns'))
assert np.dtype('int64') == r.dtype
assert r.shape == (df.shape[0],)
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (20 // df.shape[1],)
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
r = tile(df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], np.nan)
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (20 // df.shape[1], np.nan)
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
r = tile(df.apply(lambda x: [1, 2], axis=1, result_type='expand'))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], np.nan)
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (20 // df.shape[1], np.nan)
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
r = tile(df.apply(lambda x: list(range(10)), axis=1, result_type='reduce'))
assert np.dtype('object') == r.dtype
assert r.shape == (df.shape[0],)
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (20 // df.shape[1],)
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
r = tile(df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast'))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], np.nan)
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (20 // df.shape[1], np.nan)
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
finally:
options.chunk_store_limit = old_chunk_store_limit
raw = pd.DataFrame({'a': [np.array([1, 2, 3]), np.array([4, 5, 6])]})
df = from_pandas_df(raw)
df2 = df.apply(lambda x: x['a'].astype(pd.Series), axis=1,
output_type='dataframe', dtypes=pd.Series([np.dtype(float)] * 3))
assert df2.ndim == 2
def test_series_apply():
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = tile(series.apply('add', args=(1,)))
assert r.op._op_type_ == opcodes.ADD
r = tile(series.apply(np.sqrt))
assert np.dtype('float64') == r.dtype
assert r.shape == series.shape
assert r.op._op_type_ == opcodes.APPLY
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (5,)
assert r.chunks[0].inputs[0].shape == (5,)
r = tile(series.apply('sqrt'))
assert np.dtype('float64') == r.dtype
assert r.shape == series.shape
assert r.op._op_type_ == opcodes.APPLY
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (5,)
assert r.chunks[0].inputs[0].shape == (5,)
r = tile(series.apply(lambda x: [x, x + 1], convert_dtype=False))
assert np.dtype('object') == r.dtype
assert r.shape == series.shape
assert r.op._op_type_ == opcodes.APPLY
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (5,)
assert r.chunks[0].inputs[0].shape == (5,)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
r = series.apply(np.sum)
assert r.dtype == np.dtype(object)
r = series.apply(lambda x: pd.Series([1]), output_type='dataframe')
expected = s_raw2.apply(lambda x: pd.Series([1]))
pd.testing.assert_series_equal(r.dtypes, expected.dtypes)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
assert r.ndim == 2
pd.testing.assert_series_equal(r.dtypes, dtypes)
assert r.shape == (2, 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes, index=pd.RangeIndex(2))
assert r.ndim == 2
pd.testing.assert_series_equal(r.dtypes, dtypes)
assert r.shape == (2, 3)
with pytest.raises(AttributeError, match='abc'):
series.apply('abc')
with pytest.raises(TypeError):
# dtypes not provided
series.apply(lambda x: x.tolist(), output_type='dataframe')
def test_transform():
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
df = from_pandas_df(df_raw, chunk_size=5)
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
# test transform with infer failure
def transform_df_with_err(v):
assert len(v) > 2
return v.sort_values()
with pytest.raises(TypeError):
df.transform(transform_df_with_err)
r = tile(df.transform(transform_df_with_err, dtypes=df_raw.dtypes))
assert r.shape == df.shape
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (df.shape[0], 20 // df.shape[0])
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
# test transform scenarios on data frames
r = tile(df.transform(lambda x: list(range(len(x)))))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == df.shape
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (df.shape[0], 20 // df.shape[0])
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform(lambda x: list(range(len(x))), axis=1))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == df.shape
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (20 // df.shape[1], df.shape[1])
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform(['cumsum', 'cummax', lambda x: x + 1]))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], df.shape[1] * 3)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (df.shape[0], 20 // df.shape[0] * 3)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform({'A': 'cumsum', 'D': ['cumsum', 'cummax'], 'F': lambda x: x + 1}))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], 4)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (df.shape[0], 1)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
# test agg scenarios on series
r = tile(df.transform(lambda x: x.iloc[:-1], _call_agg=True))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (np.nan, df.shape[1])
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (np.nan, 1)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], np.nan)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (2, np.nan)
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = tile(df.transform(fn_list, _call_agg=True))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (np.nan, df.shape[1] * 2)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (np.nan, 2)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform(lambda x: x.sum(), _call_agg=True))
assert r.dtype == np.dtype('int64')
assert r.shape == (df.shape[1],)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (20 // df.shape[0],)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
fn_dict = {
'A': rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
'D': [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)],
'F': lambda x: x.iloc[:-1].reset_index(drop=True),
}
r = tile(df.transform(fn_dict, _call_agg=True))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (np.nan, 4)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (np.nan, 1)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
# SERIES CASES
# test transform scenarios on series
r = tile(series.transform(lambda x: x + 1))
assert np.dtype('int64') == r.dtype
assert r.shape == series.shape
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (5,)
assert r.chunks[0].inputs[0].shape == (5,)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_string_method():
s = pd.Series(['a', 'b', 'c'], name='s')
series = from_pandas_series(s, chunk_size=2)
with pytest.raises(AttributeError):
_ = series.str.non_exist
r = series.str.contains('c')
assert r.dtype == np.bool_
assert r.name == s.name
pd.testing.assert_index_equal(r.index_value.to_pandas(), s.index)
assert r.shape == s.shape
r = tile(r)
for i, c in enumerate(r.chunks):
assert c.index == (i,)
assert c.dtype == np.bool_
assert c.name == s.name
pd.testing.assert_index_equal(c.index_value.to_pandas(),
s.index[i * 2: (i + 1) * 2])
assert c.shape == (2,) if i == 0 else (1,)
r = series.str.split(',', expand=True, n=1)
assert r.op.output_types[0] == OutputType.dataframe
assert r.shape == (3, 2)
pd.testing.assert_index_equal(r.index_value.to_pandas(), s.index)
pd.testing.assert_index_equal(r.columns_value.to_pandas(), pd.RangeIndex(2))
r = tile(r)
for i, c in enumerate(r.chunks):
assert c.index == (i, 0)
pd.testing.assert_index_equal(c.index_value.to_pandas(),
s.index[i * 2: (i + 1) * 2])
pd.testing.assert_index_equal(c.columns_value.to_pandas(), pd.RangeIndex(2))
assert c.shape == (2, 2) if i == 0 else (1, 2)
with pytest.raises(TypeError):
_ = series.str.cat([['1', '2']])
with pytest.raises(ValueError):
_ = series.str.cat(['1', '2'])
with pytest.raises(ValueError):
_ = series.str.cat(',')
with pytest.raises(TypeError):
_ = series.str.cat({'1', '2', '3'})
r = series.str.cat(sep=',')
assert r.op.output_types[0] == OutputType.scalar
assert r.dtype == s.dtype
r = tile(r)
assert len(r.chunks) == 1
assert r.chunks[0].op.output_types[0] == OutputType.scalar
assert r.chunks[0].dtype == s.dtype
r = series.str.extract(r'[ab](\d)', expand=False)
assert r.op.output_types[0] == OutputType.series
assert r.dtype == s.dtype
r = tile(r)
for i, c in enumerate(r.chunks):
assert c.index == (i,)
assert c.dtype == s.dtype
assert c.name == s.name
pd.testing.assert_index_equal(c.index_value.to_pandas(),
s.index[i * 2: (i + 1) * 2])
assert c.shape == (2,) if i == 0 else (1,)
r = series.str.extract(r'[ab](\d)', expand=True)
assert r.op.output_types[0] == OutputType.dataframe
assert r.shape == (3, 1)
pd.testing.assert_index_equal(r.index_value.to_pandas(), s.index)
pd.testing.assert_index_equal(r.columns_value.to_pandas(), pd.RangeIndex(1))
r = tile(r)
for i, c in enumerate(r.chunks):
assert c.index == (i, 0)
pd.testing.assert_index_equal(c.index_value.to_pandas(),
s.index[i * 2: (i + 1) * 2])
pd.testing.assert_index_equal(c.columns_value.to_pandas(), pd.RangeIndex(1))
assert c.shape == (2, 1) if i == 0 else (1, 1)
assert 'lstrip' in dir(series.str)
def test_datetime_method():
s = pd.Series([pd.Timestamp('2020-1-1'),
pd.Timestamp('2020-2-1'),
pd.Timestamp('2020-3-1')],
name='ss')
series = from_pandas_series(s, chunk_size=2)
r = series.dt.year
assert r.dtype == s.dt.year.dtype
pd.testing.assert_index_equal(r.index_value.to_pandas(), s.index)
assert r.shape == s.shape
assert r.op.output_types[0] == OutputType.series
assert r.name == s.dt.year.name
r = tile(r)
for i, c in enumerate(r.chunks):
assert c.index == (i,)
assert c.dtype == s.dt.year.dtype
assert c.op.output_types[0] == OutputType.series
assert r.name == s.dt.year.name
pd.testing.assert_index_equal(c.index_value.to_pandas(),
s.index[i * 2: (i + 1) * 2])
assert c.shape == (2,) if i == 0 else (1,)
with pytest.raises(AttributeError):
_ = series.dt.non_exist
assert 'ceil' in dir(series.dt)
def test_series_isin():
# one chunk in multiple chunks
a = from_pandas_series(pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), chunk_size=10)
b = from_pandas_series(pd.Series([2, 1, 9, 3]), chunk_size=2)
r = tile(a.isin(b))
for i, c in enumerate(r.chunks):
assert c.index == (i,)
assert c.dtype == np.dtype('bool')
assert c.shape == (10,)
assert len(c.op.inputs) == 2
assert c.op.output_types[0] == OutputType.series
assert c.op.inputs[0].index == (i,)
assert c.op.inputs[0].shape == (10,)
assert c.op.inputs[1].index == (0,)
assert c.op.inputs[1].shape == (4,) # has been rechunked
# multiple chunk in one chunks
a = from_pandas_series(pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), chunk_size=2)
b = from_pandas_series(pd.Series([2, 1, 9, 3]), chunk_size=4)
r = tile(a.isin(b))
for i, c in enumerate(r.chunks):
assert c.index == (i,)
assert c.dtype == np.dtype('bool')
assert c.shape == (2,)
assert len(c.op.inputs) == 2
assert c.op.output_types[0] == OutputType.series
assert c.op.inputs[0].index == (i,)
assert c.op.inputs[0].shape == (2,)
assert c.op.inputs[1].index == (0,)
assert c.op.inputs[1].shape == (4,)
# multiple chunk in multiple chunks
a = from_pandas_series(pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), chunk_size=2)
b = from_pandas_series(pd.Series([2, 1, 9, 3]), chunk_size=2)
r = tile(a.isin(b))
for i, c in enumerate(r.chunks):
assert c.index == (i,)
assert c.dtype == np.dtype('bool')
assert c.shape == (2,)
assert len(c.op.inputs) == 2
assert c.op.output_types[0] == OutputType.series
assert c.op.inputs[0].index == (i,)
assert c.op.inputs[0].shape == (2,)
assert c.op.inputs[1].index == (0,)
assert c.op.inputs[1].shape == (4,) # has been rechunked
with pytest.raises(TypeError):
_ = a.isin('sth')
with pytest.raises(TypeError):
_ = a.to_frame().isin('sth')
def test_cut():
s = from_pandas_series(pd.Series([1., 2., 3., 4.]), chunk_size=2)
with pytest.raises(ValueError):
_ = cut(s, -1)
with pytest.raises(ValueError):
_ = cut([[1, 2], [3, 4]], 3)
with pytest.raises(ValueError):
_ = cut([], 3)
r, b = cut(s, [1.5, 2.5], retbins=True)
assert isinstance(r, SERIES_TYPE)
assert isinstance(b, TENSOR_TYPE)
r = tile(r)
assert len(r.chunks) == 2
for c in r.chunks:
assert isinstance(c, SERIES_CHUNK_TYPE)
assert c.shape == (2,)
r = cut(s.to_tensor(), [1.5, 2.5])
assert isinstance(r, CATEGORICAL_TYPE)
assert len(r) == len(s)
assert 'Categorical' in repr(r)
r = tile(r)
assert len(r.chunks) == 2
for c in r.chunks:
assert isinstance(c, CATEGORICAL_CHUNK_TYPE)
assert c.shape == (2,)
assert c.ndim == 1
r = cut([0, 1, 1, 2], bins=4, labels=False)
assert isinstance(r, TENSOR_TYPE)
e = pd.cut([0, 1, 1, 2], bins=4, labels=False)
assert r.dtype == e.dtype
def test_to_numeric():
raw = pd.DataFrame({"a": [1.0, 2, 3, -3]})
df = from_pandas_df(raw, chunk_size=2)
with pytest.raises(ValueError):
_ = to_numeric(df)
with pytest.raises(ValueError):
_ = to_numeric([['1.0', 1]])
with pytest.raises(ValueError):
_ = to_numeric([])
s = from_pandas_series(pd.Series(['1.0', '2.0', 1, -2]), chunk_size=2)
r = tile(to_numeric(s))
assert len(r.chunks) == 2
assert isinstance(r, SERIES_TYPE)
r = tile(to_numeric(['1.0', '2.0', 1, -2]))
assert isinstance(r, TENSOR_TYPE)
def test_astype():
s = from_pandas_series(pd.Series([1, 2, 1, 2], name='a'), chunk_size=2)
with pytest.raises(KeyError):
astype(s, {'b': 'str'})
df = from_pandas_df(pd.DataFrame({'a': [1, 2, 1, 2],
'b': ['a', 'b', 'a', 'b']}), chunk_size=2)
with pytest.raises(KeyError):
astype(df, {'c': 'str', 'a': 'str'})
def test_drop():
# test dataframe drop
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=8)
with pytest.raises(KeyError):
df.drop(columns=['c9'])
with pytest.raises(NotImplementedError):
df.drop(columns=from_pandas_series(pd.Series(['c9'])))
r = df.drop(columns=['c1'])
pd.testing.assert_index_equal(r.index_value.to_pandas(), raw.index)
tiled = tile(r)
start = 0
for c in tiled.chunks:
raw_index = raw.index[start: start + c.shape[0]]
start += c.shape[0]
pd.testing.assert_index_equal(raw_index, c.index_value.to_pandas())
df = from_pandas_df(raw, chunk_size=3)
columns = ['c2', 'c4', 'c5', 'c6']
index = [3, 6, 7]
r = df.drop(columns=columns, index=index)
assert isinstance(r, DATAFRAME_TYPE)
# test series drop
raw = pd.Series(rs.randint(1000, size=(20,)))
series = from_pandas_series(raw, chunk_size=3)
r = series.drop(index=index)
assert isinstance(r, SERIES_TYPE)
# test index drop
ser = pd.Series(range(20))
rs.shuffle(ser)
raw = pd.Index(ser)
idx = from_pandas_index(raw)
r = idx.drop(index)
assert isinstance(r, INDEX_TYPE)
def test_drop_duplicates():
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 7)),
columns=['c' + str(i + 1) for i in range(7)])
raw['c7'] = [f's{j}' for j in range(20)]
df = from_pandas_df(raw, chunk_size=10)
with pytest.raises(ValueError):
df.drop_duplicates(method='unknown')
with pytest.raises(KeyError):
df.drop_duplicates(subset='c8')
# test auto method selection
assert tile(df.drop_duplicates()).chunks[0].op.method == 'tree'
# subset size less than chunk_store_limit
assert tile(df.drop_duplicates(subset=['c1', 'c3'])).chunks[0].op.method == 'subset_tree'
with option_context({'chunk_store_limit': 5}):
# subset size greater than chunk_store_limit
assert tile(df.drop_duplicates(subset=['c1', 'c3'])).chunks[0].op.method == 'tree'
assert tile(df.drop_duplicates(subset=['c1', 'c7'])).chunks[0].op.method == 'tree'
assert tile(df['c7'].drop_duplicates()).chunks[0].op.method == 'tree'
s = df['c7']
with pytest.raises(ValueError):
s.drop_duplicates(method='unknown')
def test_memory_usage():
dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
data = dict([(t, np.ones(shape=500).astype(t))
for t in dtypes])
raw = pd.DataFrame(data)
df = from_pandas_df(raw, chunk_size=(500, 2))
r = tile(df.memory_usage())
assert isinstance(r, SERIES_TYPE)
assert r.shape == (6,)
assert len(r.chunks) == 3
assert r.chunks[0].op.stage is None
df = from_pandas_df(raw, chunk_size=(100, 3))
r = tile(df.memory_usage(index=True))
assert isinstance(r, SERIES_TYPE)
assert r.shape == (6,)
assert len(r.chunks) == 2
assert r.chunks[0].op.stage == OperandStage.reduce
r = tile(df.memory_usage(index=False))
assert isinstance(r, SERIES_TYPE)
assert r.shape == (5,)
assert len(r.chunks) == 2
assert r.chunks[0].op.stage == OperandStage.reduce
raw = pd.Series(np.ones(shape=500).astype('object'), name='s')
series = from_pandas_series(raw)
r = tile(series.memory_usage())
assert isinstance(r, TENSOR_TYPE)
assert r.shape == ()
assert len(r.chunks) == 1
assert r.chunks[0].op.stage is None
series = from_pandas_series(raw, chunk_size=100)
r = tile(series.memory_usage())
assert isinstance(r, TENSOR_TYPE)
assert r.shape == ()
assert len(r.chunks) == 1
assert r.chunks[0].op.stage == OperandStage.reduce
def test_shift():
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)],
index=pd.date_range('2021-1-1', periods=10))
df = from_pandas_df(raw, chunk_size=5)
df2 = df.shift(1)
df2 = tile(df2)
for c in df2.chunks:
pd.testing.assert_index_equal(c.dtypes.index, c.columns_value.to_pandas())
df2 = df.shift(1, freq='D')
df2 = tile(df2)
for c in df2.chunks:
pd.testing.assert_index_equal(c.dtypes.index, c.columns_value.to_pandas())
def test_eval_query():
rs = np.random.RandomState(0)
raw = pd.DataFrame({'a': rs.rand(100),
'b': rs.rand(100),
'c c': rs.rand(100)})
df = from_pandas_df(raw, chunk_size=(10, 2))
with pytest.raises(NotImplementedError):
mars_eval('df.a * 2', engine='numexpr')
with pytest.raises(NotImplementedError):
mars_eval('df.a * 2', parser='pandas')
with pytest.raises(TypeError):
df.eval(df)
with pytest.raises(SyntaxError):
df.query("""
a + b
a + `c c`
""")
with pytest.raises(SyntaxError):
df.eval("""
def a():
return v
a()
""")
with pytest.raises(SyntaxError):
df.eval("a + `c")
with pytest.raises(KeyError):
df.eval("a + c")
with pytest.raises(ValueError):
df.eval("p, q = a + c")
with pytest.raises(ValueError):
df.query("p = a + c")
def test_empty():
# for DataFrame
assert from_pandas_df(pd.DataFrame()).empty == pd.DataFrame().empty
assert from_pandas_df(pd.DataFrame({})).empty ==
|
pd.DataFrame({})
|
pandas.DataFrame
|
import time
import numpy as np
import pandas as pd
pd.plotting.register_matplotlib_converters()
from pandas_datareader import data as pd_data
from fbprophet import Prophet
import matplotlib.pyplot as plt
from statsmodels.tsa.seasonal import STL
def get_ticker_data(ticker, start_date, end_date):
retry_cnt, max_num_retry = 0, 3
while retry_cnt < max_num_retry:
try:
return pd_data.DataReader(ticker, "yahoo", start_date, end_date)
except Exception as e:
print(e)
retry_cnt += 1
time.sleep(np.random.randint(1, 10))
print("yahoo is not reachable")
return
|
pd.DataFrame()
|
pandas.DataFrame
|
import scipy.sparse
import pickle
import gzip
import pandas as pd
import numpy as np
import scipy.io
import os, sys, re
import logging
def _load_items(dirname, **kwargs):
name = kwargs.get('name')
column = kwargs.get('column', -1)
trim_suffix = kwargs.get('trim', False)
fbz = os.path.join(dirname, f'{name}.tsv.gz')
fb = os.path.join(dirname, f'{name}.tsv')
items = []
if os.path.exists(fbz):
with gzip.open(fbz) as fi:
for line in fi:
items.append(line.decode('utf-8').strip())
else:
with open(fb) as fi:
for line in fi:
items.append(line.strip())
if column >= 0:
data = []
for line in items:
data.append(line.split('\t')[column])
items = data
if trim_suffix:
data = []
for line in items:
data.append(re.split('\\W', line)[0])
items = data
return items
def load_barcodes(dirname, **kwargs):
"""Load barcodes.tsv or barcodes.tsv.gz"""
kwargs['name'] = 'barcodes'
return _load_items(dirname, **kwargs)
def load_features(dirname, **kwargs):
kwargs['name'] = 'features'
return _load_items(dirname, **kwargs)
def load_sparse_matrix(dirname:str, **kwargs):
"""Load matrx.mtx
"""
import gzip
fm = os.path.join(dirname, 'matrix.mtx')
mtz = os.path.join(dirname, 'matrix.mtx.gz')
if os.path.exists(mtz):
mtx = scipy.io.mmread(mtz)
elif os.path.exists(fm):
mtx = scipy.io.mmread(fm)
else:
raise Exception('{} does not include data'.format(dirname))
return mtx
def load_reads_from_sparse_matrix(srcdir:str, **kwargs)->pd.DataFrame:
verbose = kwargs.get('verbose', False)
fn_cache = os.path.join(srcdir, '.count.cache')
if os.path.exists(fn_cache) and os.path.getsize(fn_cache) > 1000:
df =
|
pd.read_csv(fn_cache, sep='\t', dtype=np.int32)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import random
import datetime
import os
def max_price(df):
return max(df['close'])
def max_close_date(df):
return pd.to_datetime(max_price_row(df).date.iloc[0])
def max_price_row(df):
r, c = df[df['close'] == max_price(df)].shape
try:
if r == 1:
return df[df['close'] == max_price(df)]
except ValueError:
print("There are two values for this date")
def delta_days(df1, df2, col=None):
"""
Input:
df1 = spac_
"""
return pd.to_datetime(df1[col].iloc[0]) - max_close_date(df2)
def rename_trade_cols():
col_names = ['company',
'symbol',
'ipo_date',
'press_release',
'record_date',
'vote_date',
'closing_liquidation_date',
'closing_year',
'new_company_ticker',
'china',
'current_stock_price',
'return_val',
'status',
'fallon_qs']
return col_names
def make_df(c1, c2, c3, c4, c5, c6, c7):
return pd.DataFrame(list(zip(c1,
c2,
c3,
c4,
c5,
c6,
c7)),
columns =['symbol',
'max_prices',
'delta_ipo_max_close_date',
'delta_press_max_close_date',
'delta_record_max_close_date',
'delta_vote_max_close_date',
'delta_liquid_max_close_date'])
def delta_df(spac_master, company_dict, spac_list):
symbol = []
max_prices = []
delta_ipo_close_date = []
delta_press_close_date = []
delta_record_close_date = []
delta_vote_close_date = []
delta_liquid_max_close_date = []
for marker in spac_list:
if marker == 'jsyn' or marker == 'algr':
spac_row = spac_master[spac_master['symbol']== marker.upper()]
trade_details = company_dict[marker+"_hist"]
symbol.append(marker)
max_prices.append(max_price(trade_details))
#All Dates
delta_ipo_close_date.append(delta_days(spac_row, trade_details, 'ipo_date'))
delta_press_close_date.append(delta_days(spac_row, trade_details, 'press_release'))
delta_record_close_date.append(delta_days(spac_row, trade_details, 'record_date'))
delta_vote_close_date.append(delta_days(spac_row, trade_details, 'record_date'))
delta_liquid_max_close_date.append(delta_days(spac_row, trade_details, 'closing_liquidation_date'))
# print (marker, spac_row.shape, trade_details.shape)
else:
pass
return make_df(symbol,
max_prices,
delta_ipo_close_date,
delta_press_close_date,
delta_record_close_date,
delta_vote_close_date,
delta_liquid_max_close_date)
def make_dictionary(path):
company_files = os.listdir(path)
company_files.remove('.DS_Store')
company_dfs = {} #dictionary
for name in company_files:
df =
|
pd.read_csv(path+name)
|
pandas.read_csv
|
from unittest import result
import pytest
import stockeasy
import logging
import pandas as pd
df_stocklist = pd.DataFrame([['VTSAX', 120], ['MSFT', 100]], columns=['symbol', 'sharesOwned'])
df_stocklist_meta = pd.DataFrame(columns=['symbol', 'sharesOwned'])
def test_init():
assert 1 == 1
# Default Contract Checks
def test_get_info_data_typecheck():
# wrong data type passed
with pytest.raises(TypeError):
stockeasy.get_info(data=df_stocklist)
# expected data type passed
results = stockeasy.get_info(data={'input': df_stocklist})
assert isinstance(results.get('output'), pd.DataFrame)
def test_get_info_config_typecheck():
# wrong data type passed
with pytest.raises(TypeError):
stockeasy.get_info(config='')
# expected data type passed
results = stockeasy.get_info(config={'setting 1': 'Anything'})
assert isinstance(results.get('output'), pd.DataFrame)
def test_get_info_logger_typecheck():
# wrong data type passed
with pytest.raises(TypeError):
stockeasy.get_info(logger='')
# expected data type passed
results = stockeasy.get_info(logger=logging.getLogger('log'))
assert isinstance(results.get('output'), pd.DataFrame)
def test_get_info_results_typecheck():
# Verify only named dataframes are returned
results = stockeasy.get_info(data={'input': df_stocklist})
for item in results:
assert isinstance(results.get(item), pd.DataFrame)
def test_get_info_verify_results():
config = {
'symbolField': 'symbol',
'sharesField': 'sharesOwned',
'dataFields': ['symbol', 'shortName']
}
df_expected_results = pd.DataFrame(
[
['VTSAX', 120, 'Vanguard Total Stock Market Ind'],
['MSFT', 100, 'Microsoft Corporation']
],
columns=['symbol', 'sharesOwned', 'shortName']
)
# Verify Run
results = stockeasy.get_info({'input': df_stocklist}, config=config)
for item in results:
assert isinstance(results.get(item), pd.DataFrame)
print(results.get('output').head())
# Verify Results Match expectations
assert results.get('output').equals(df_expected_results)
def test_get_info_verify_results_lower_case():
df_stocklist_lower =
|
pd.DataFrame([['vtsax', 120], ['msft', 100]], columns=['symbol', 'sharesOwned'])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import streamlit as st
import importlib
import os
import sys
import time
def file_selector(folder_path='.'):
filenames = os.listdir(folder_path)
filenames_ = [f for f in filenames if f[-3:] == "txt"]
selected_filename = st.selectbox('Select a file', filenames_)
return os.path.join(folder_path, selected_filename)
st.header("Rocking Data Bytes")
modo = st.sidebar.radio("Modo", options=["Buscar contenido", "Subir contenido", "Configuración"], index=0)
if "METADATA.csv" in os.listdir(".") and "TAGS.csv" in os.listdir("."):
METADATA = pd.read_csv("./METADATA.csv", index_col=0)
TAGS = pd.read_csv("./TAGS.csv", index_col=0)
else:
METADATA = pd.DataFrame(np.zeros((1, 5)), index=["INIT"], columns=["TAG_{}".format(i) for i in range(1,6)])
METADATA.to_csv("./METADATA.csv")
TAGS = pd.DataFrame({"TAGS":["funciones", "machine learning", "visualizacion", "estadistica"]})
TAGS.to_csv("./TAGS.csv")
if modo == "Buscar contenido":
METADATA = pd.read_csv("./METADATA.csv", index_col=0)
TAGS = pd.read_csv("./TAGS.csv", index_col=0)
search_tags = st.multiselect("Tags", options=[_[0] for _ in TAGS.values])
available_bytes = []
for byte in METADATA.index:
if sum([tag_ in METADATA.loc[byte].values for tag_ in search_tags]) == len(search_tags):
print(sum([tag_ in METADATA.loc[byte] for tag_ in search_tags]))
available_bytes.append(byte)
if search_tags == []:
selection = st.selectbox("Índice", options=METADATA.index[1:])
else:
selection = st.selectbox("Índice", options=available_bytes)
if st.button("Ver"):
importlib.import_module("{}".format(selection))
del sys.modules["{}".format(selection)]
elif modo == "Subir contenido":
METADATA = pd.read_csv("./METADATA.csv", index_col=0)
TAGS =
|
pd.read_csv("./TAGS.csv", index_col=0)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
'''
Copyright 2018, University of Freiburg.
Chair of Algorithms and Data Structures.
<NAME> <<EMAIL>>
'''
import urllib
import codecs
import os
import glob
import http
from time import sleep
import pandas as pd
from bs4 import BeautifulSoup
import nltk
from nltk.tokenize import sent_tokenize
from six.moves import cPickle
import http.client
drop_out_lines = [
"Subscribe to receive email notifications whenever new talks are published.",
"Thanks! Please check your inbox for a confirmation email. ",
"If you want to get even more from TED, like the ability to save talks to watch later, sign up for a TED account now. ",
"TED.com translations are made possible by volunteer translators. Learn more about the Open Translation Project."
]
class TEDExtract(object):
'''
Simple ted extraction tool. Returns csv files for all transcripts of
all ted talks (defined by the number of pages)
'''
def __init__(self, args):
'''
Constructor.
'''
self.args = args
# Dictionary containing all talks
self.all_talks = {}
self.header = { 'User-Agent' : 'TEDExtract script by naetherm' }
self.conn = http.client.HTTPSConnection('www.ted.com')
def run(self):
'''
Receive all information from the ted page.
Parse all pages and save their transcripts in own csv files.
'''
if os.path.isfile(os.path.join(self.args.output, 'talk_list.pkl')):
with open(os.path.join(self.args.output, 'talk_list.pkl'), 'rb') as fin:
self.all_talks = cPickle.load(fin)
else:
# Collect all talks from all pages
for p in range(1, self.args.max_pages + 1):
path = '/talks?page={}'.format(p)
self._fetch_talk_list(path)
with open(os.path.join(self.args.output, 'talk_list.pkl'), 'wb') as fout:
cPickle.dump(self.all_talks, fout)
# DEBUG
# Loop through all talks and download the content for all available languages
for i in self.all_talks:
self._fetch_talk_content(i)
self.conn.close()
def _fetch_talk_list(self, path):
'''
This method is used to receive all
'''
print("Reading talks of \'{}\'".format(path))
content = self._get_content2(path)
soup = BeautifulSoup(content)
talks = soup.find_all("a", class_='ga-link')
for i in talks:
if i.attrs['href'].find('/talks/') == 0 and self.all_talks.get(i.attrs['href']) != 1:
self.all_talks[i.attrs['href']] = 1
def _get_content2(self, uri):
'''
Reading and returning the content of the provided uri.
'''
self.conn.request('GET', uri, headers=self.header)
resp = None
# do try to fetch the content of the uri
while True:
try:
resp = self.conn.getresponse()
except ConnectionError as e:
print("Received an error from server, wait for {} seconds.".format(
self.args.delay))
sleep(self.args.delay)
else:
break
return resp.read()
def _fetch_talk_content(self, talk):
'''
This method will read all transcriptions of a specific talk, do some
cleanup (removing line breaks, etc.) and save everything within a
separate csv file.
'''
# Extract the talk name
talkname = talk[7:]
if os.path.isfile(os.path.join(self.args.output, talkname + '.csv')):
print("Already downloaded, skip {}".format(talk))
else:
# The data frame object for saving all languages
req = self._get_content2(talk + '/transcript')
print("Reading transcriptions of {}".format(talk + '/transcript'))
soup = BeautifulSoup(req)
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# Import containerclass with static data for use of FingridApi services.
#from statics import FingridApiStatics
# Import libraries
from ratelimit import limits
import datetime
import difflib
import requests
import pandas as pd
class FingridOpenDataClient():
'''
Pythonic Client Module, for interaction with the Fingrid Open Data-platforms API, and easy access to the platforms open datasets.
Fingrid Open Data url: https://data.fingrid.fi/en/
:How to use:
- Request free api_key from the Fingrid Open Data platform, include in this module initialization.
- Show list of available datasets using the function .show_available_datasets().
- Extract datasets using the function .get_data(). Returns a dictionary containing the requested data responses.
'''
def __init__(self, api_key):
# Statics
self.static_datetimeformat_str = "%Y-%m-%dT%H:%M:%SZ"
self.static_datasets_dict = self._datasets()
self.static_datasets_names_list, self.static_datasets_variableids_list, self.static_datasets_formats_list, self.static_datasets_infos_list = self._datasets_values_to_lists()
self.static_baseurl = 'https://api.fingrid.fi/v1'
# Initialise inherance from all parent classes, setting fingridapi static data attributes.
#super().__init__()
# Store users api key.
self.api_key = api_key
################################################################
############## Static Data.
################################################################
def _datasets(self):
'''Returns static data on of available api datasets as dict.'''
return {
'Other power transactions, down-regulation': {
'VariableId': 213,
'Formats': ('csv', 'json'),
'Info':
'''
Other power transactions which are necessary in view of the power system.
'''
},
'Other power transactions, up-regulation': {
'VariableId': 214,
'Formats': ('csv', 'json'),
'Info':
'''
Other power transactions which are necessary in view of the power system.
'''
},
'Fast Frequency Reserve FFR, procurement forecast': {
'VariableId': 278,
'Formats': ('csv', 'json'),
'Info':
'''
The procurement prognosis for Fast Frequency Reserve (FFR) (MW). Fingrid procures FFR based on the procurement prognosis. The prognosis is updated once a day, typically at 11:00 (EET).
The Fast Frequency Reserve (FFR) is procured to handle low-inertia situations. The needed volume of Fast Frequency Reserve depends on the amount of inertia in the power system and the size of the reference incident.
'''
},
'Fast Frequency Reserve FFR, procured volume': {
'VariableId': 276,
'Formats': ('csv', 'json'),
'Info':
'''
The volume of procured Fast Frequency Reserve (FFR). The procured volume will be published 22:00 (EET) on previous evening.
The Fast Frequency Reserve (FFR) is procured to handle low-inertia situations. The needed volume of Fast Frequency Reserve depends on the amount of inertia in the power system and the size of the reference incident.
'''
},
'Fast Frequency Reserve FFR, received bids': {
'VariableId': 275,
'Formats': ('csv', 'json'),
'Info':
'''
The volume of received Fast Frequency Reserve (FFR) bids. The volume of bids will be published 22:00 (EET) on previous evening.
The Fast Frequency Reserve (FFR) is procured to handle low-inertia situations. The needed volume of Fast Frequency Reserve depends on the amount of inertia in the power system and the size of the reference incident.
'''
},
'Fast Frequency Reserve FFR, price': {
'VariableId': 277,
'Formats': ('csv', 'json'),
'Info':
'''
The price of procured Fast Frequency Reserve (FFR) (€/MW). The price will be published 22:00 (EET) on previous evening. The price is determined by the price of the most expensive procured bid (marginal pricing).
The Fast Frequency Reserve (FFR) is procured to handle low-inertia situations. The needed volume of Fast Frequency Reserve depends on the amount of inertia in the power system and the size of the reference incident.
'''
},
'Kinetic energy of the Nordic power system - real time data': {
'VariableId': 260,
'Formats': ('csv', 'json'),
'Info':
'''
Real-time estimate of the kinetic energy of the Nordic power system calculated by the Nordic transmission system operators.
The data is updated every 1 minute.
Historical data as of 2015/3/27 available.
More information can be found on Fingrid's internet-site.
'''
},
'Cross-border transmission fee, import from Russia': {
'VariableId': 85,
'Formats': ('csv', 'json'),
'Info':
'''
Hourly cross-border transmission fee (dynamic tariff) for imports from Russia on Fingrid's connections.
'''
},
'Cross-border transmission fee, export to Russia': {
'VariableId': 86,
'Formats': ('csv', 'json'),
'Info':
'''
Hourly cross-border transmission fee (dynamic tariff) for exports to Russia on Fingrid's connections.
'''
},
'Imbalance power between Finland and Sweden': {
'VariableId': 176,
'Formats': ('csv', 'json'),
'Info':
'''
The volume of power equals to the difference between measured and commercial transmission between Finland and Sweden. The tradetypes of commercial flow include day ahead, intraday and trades between Fingrid and Svenska Kraftnät during the operational hour. When the value of imbalance power volume is positive Fingrid has sold imbalance power to Sweden. When the value of imbalance power volume is negative Fingrid has bought imbalance power from Sweden.
'''
},
'Emission factor of electricity production in Finland - real time data': {
'VariableId': 266,
'Formats': ('csv', 'json'),
'Info':
'''
Near in real time calculated carbon dioxide emission estimate of electricity production in Finland. The emissions are estimated by summing each product of different electricity production type and their emission factor together, and by dividing the sum by Finland's total electricity production.
The data is updated every 3 minutes.
'''
},
'Emission factor for electricity consumed in Finland - real time data': {
'VariableId': 265,
'Formats': ('csv', 'json'),
'Info':
'''
Estimate of carbon dioxide of produced electricity, which is consumed in Finland. The emissions are estimated by taking FInland's electricity production, electricity import as well as electricity export into account.
The data is updated every 3 minutes.
'''
},
'Power system state - real time data': {
'VariableId': 209,
'Formats': ('csv', 'json'),
'Info':
'''
Different states of the power system - traffic lights: 1=green, 2=yellow, 3=red, 4=black, 5=blue
Green: Power system is in normal secure state.
Yellow: Power system is in endangered state. The adequacy of the electricity is endangered or the power system doesn't fulfill the security standards.
Red: Power system is in disturbed state. Load shedding has happened in order to keep the adequacy and security of the power system or there is a remarkable risk to a wide black out.
Black: An extremely serious disturbance or a wide black out in Finland.
Blue: The network is being restored after an extremely serious disturbance or a wide blackout.
The data is updated every 3 minutes.
'''
},
'Net import/export of electricity - real time data': {
'VariableId': 194,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Net import to Finland and net export from Finland. The data is updated every 3 minutes.
Production information and import/export are based on the real-time measurements in Fingrid's operation control system.
'''
},
'Transmission between Sweden and Åland - real time data': {
'VariableId': 90,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Power transmission between Åland and Sweden based on the real-time measurements in Fingrid's operation control system. Åland is a part of SE3 (Central-Sweden) bidding zone. Positive sign means transmission from Åland to Sweden. Negative sign means transmission from Sweden to Åland. The data is updated every 3 minutes.
'''
},
'Transmission between Finland and Central Sweden - real time data': {
'VariableId': 89,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Power transmission between Central Sweden (SE3) and Finland (FI) HVDC tie lines. Data is based on the real-time measurements in Fingrid's operation control system. Positive sign means transmission from Finland to Central Sweden (SE3). Negative sign means transmission from Central Sweden (SE3) to Finland. The data is updated every 3 minutes.
'''
},
'Transmission between Finland and Norway - real time data': {
'VariableId': 187,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Power transmission between Finland and Norway 220kV AC tie line. Data is based on the real-time measurements in Fingrid's operation control system. Positive sign means transmission from Finland to Norway. Negative sign means transmission from Norway to Finland. The data is updated every 3 minutes.
'''
},
'Transmission between Finland and Northern Sweden - real time data': {
'VariableId': 87,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Power transmission between Northern Sweden (SE1) and Finland (FI) 400kV AC tie line. Data is based on the real-time measurements in Fingrid's operation control system. Positive sign means transmission from Finland to Northern Sweden (SE1). Negative sign means transmission from Northern Sweden (SE1) to Finland. The data is updated every 3 minutes.
'''
},
'Transmission between Finland and Russia - real time data': {
'VariableId': 195,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Power transmission between Finland and Russia based on the real-time measurements in Fingrid's operation control system. Positive sign means transmission from Finland to Russia. Negative sign means transmission from Russia to Finland. The data is updated every 3 minutes.
'''
},
'Transmission between Finland and Estonia - real time data': {
'VariableId': 180,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Power transmission between Finland and Estonia HVDC tie lines (Estlink 1 and Estlink 2). Data is based on the real-time measurements in Fingrid's operation control system. Positive sign means transmission from Finland to Estonia. Negative sign means transmission from Estonia to Finland. The data is updated every 3 minutes.
'''
},
'Balancing Capacity Market bids': {
'VariableId': 270,
'Formats': ('csv', 'json'),
'Info':
'''
The amount of bids in the balancing capacity market, MW/week. Fingrid procures mFRR capacity throught the balancing capacity market on a weekly auction, which is held when needed. Balance service provider pledges itself to leave regulating bids on the regulation market. For that the balance service provider is entitled to capacity payment. The amount of bids is published at latest on Friday on the week before the procurement week at 12:00 (EET)
'''
},
'Balancing Capacity Market results': {
'VariableId': 261,
'Formats': ('csv', 'json'),
'Info':
'''
The amount of capacity procured from the balancing capacity market, MW/week. Fingrid procures mFRR capacity throught the balancing capacity market on a weekly auction, which is held when needed. Balance service provider pledges itself to leave regulating bids on the regulation market. For that the balance service provider is entitled to capacity payment. The procured amount is published at latest on Friday on the week before the procurement week at 12:00 (EET)
'''
},
'Frequency - historical data': {
'VariableId': None,
'Formats': ('zip'),
'Info':
'''
Frequency of the Nordic synchronous system with a 10 Hz sample rate.
The frequency measurement data has been divided into archives consisting of monthly frequency measurement data. Within the archives, the data is divided into daily CSV-files that can be manipulated using common data analysis software.
The frequency is measured at 400 kV substations at different locations in Finland with a sample rate of 10 Hz. The data may contain some gaps due to telecommunication errors etc. The times are according to UTC+2 / UTC+3 during daylight saving time period.
'''
},
'Frequency - real time data': {
'VariableId': 177,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Frequency of the power system based on the real-time measurements in Fingrid's operation control system. The data is updated every 3 minutes.
'''
},
'Frequency containment reserve for disturbances, procured volumes in hourly market': {
'VariableId': 82,
'Formats': ('csv', 'json'),
'Info':
'''
Hourly volume of procured frequency containment reserve for disturbances (FCR-D) in Finnish hourly market for each CET-timezone day is published previous evening at 22:45 (EET).
FCR-D is the frequency containment reserve used in the Nordic synchronous system that aims to keep the frequency above 49,5 Hz during disturbances.
Hourly market is a reserve market operated by Fingrid. Procured volumes vary for each hour and price is the price of the most expensive procured bid.
'''
},
'Frequency containment reserve for disturbances, received bids in hourly market': {
'VariableId': 286,
'Formats': ('csv', 'json'),
'Info':
'''
The volume of received frequency containment reserve for disturbances (FCR-D) bids. The volume of bids will be published 22:45 (EET) on previous evening.
FCR-D is the frequency containment reserve used in the Nordic synchronous system that aims to keep the frequency above 49,5 Hz during disturbances.
Hourly market is a reserve market operated by Fingrid. Procured volumes vary for each hour and price is the price of the most expensive procured bid.
'''
},
'Frequency containment reserves for disturbances, hourly market prices': {
'VariableId': 81,
'Formats': ('csv', 'json'),
'Info':
'''
Hourly prices (€/MW,h) of procured frequency containment reserve for disturbances (FCR-D) in Finnish hourly market for each CET-timezone day is published previous evening at 22:45 (EET).
FCR-D is the frequency containment reserve used in the Nordic synchronous system that aims to keep the frequency above 49,5 Hz during disturbances.
Hourly market is a reserve market operated by Fingrid. Procured volumes vary for each hour and price is the price of the most expensive procured bid.
'''
},
'Peak load power - real time data': {
'VariableId': 183,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Activated peak load power based on the real-time measurements in Fingrid's operation control system including peak load reserve activations and trial runs during winter period. The data is updated every 3 minutes.
'''
},
'Industrial cogeneration - real time data': {
'VariableId': 202,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Cogeneration of industry based on the real-time measurements in Fingrid's operation control system. The data is updated every 3 minutes.
Cogeneration means power plants that produce both electricity and district heating or process steam (combined heat and power, CHP).
'''
},
'Hour change regulation, down-regulation': {
'VariableId': 239,
'Formats': ('csv', 'json'),
'Info':
'''
In order to reduce problems encountered at the turn of the hour in the Nordic countries or in Finland, the planned production changes will be transfered to begin 15 minutes before or after the planned moment.
'''
},
'Hour change regulation, up-regulation': {
'VariableId': 240,
'Formats': ('csv', 'json'),
'Info':
'''
In order to reduce problems encountered at the turn of the hour in the Nordic countries or in Finland, the planned production changes will be transfered to begin 15 minutes before or after the planned moment.
'''
},
'The sales price of production imbalance electricity': {
'VariableId': 93,
'Formats': ('csv', 'json'),
'Info':
'''
The up-regulating price of the hour is the price of production imbalance power sold by Fingrid to a balance responsible party. If no up regulation has been made or if the hour has been defined as a down-regulation hour, the day ahead spot price of Finland is used as the selling price of production imbalance power. Prices are updated hourly.
'''
},
'Surplus/deficit, cumulative - real time data': {
'VariableId': 186,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Information is based on the real time measurements in Fingrid's power control system.
Power deficit/surplus represents the balance between production and consumption in Finland, taking into account imports and exports. It is calculated as the difference between the measured net import/export and the confirmed net exchange program between Finland and the other Nordic countries. The cumulative production deficit/surplus is the hourly energy generated from the difference.
Sign convention: production deficit -, surplus +
The data is updated every 3 minutes.
'''
},
'Wind power production - real time data': {
'VariableId': 181,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Wind power production based on the real-time measurements in Fingrid's operation control system. About a tenth of the production capacity is estimated as measurements aren't available. The data is updated every 3 minutes.
'''
},
'Wind power generation - hourly data': {
'VariableId': 75,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Finnish hourly wind power generation is a sum of measurements from wind parks supplied to Fingrid and of the estimate Fingrid makes from non-measured wind parks. Non-measured wind parks are about a tenth of the production capacity.
'''
},
'Hydro power production - real time data': {
'VariableId': 191,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Hydro power production in Finland based on the real-time measurements in Fingrid's operation control system. The data is updated every 3 minutes.
'''
},
'Nuclear power production - real time data': {
'VariableId': 188,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Nuclear power production in Finland based on the real-time measurements in Fingrid's operation control system. The data is updated every 3 minutes.
Due to the fire on our Olkiluoto substation the total amount of nuclear power measurement has been incorrect between 18 July at 09:00 to 20 July at 13:00. Data corrected 25.1.2019.
'''
},
'Day-ahead transmission capacity SE1-FI – planned': {
'VariableId': 142,
'Formats': ('csv', 'json'),
'Info':
'''
Planned day-ahead transmission capacity from North-Sweden (SE1) to Finland (FI). Transmission capacity is given hourly for every next week hour. Each week's hour is given one value. Planned weekly transmission capacity Fingrid will publish every Tuesday. Information will be updated if there are changes to the previous plan timetable or capacity. Transmission capacity mean the capability of the electricity system to supply electricity to the market without compromising the system security.
'''
},
'Intraday transmission capacity FI - SE1': {
'VariableId': 44,
'Formats': ('csv', 'json'),
'Info':
'''
Transmission capacity for intraday market from Finland to Northern Sweden (FI - SE1). For intraday market capacity is given as free capacity after dayahead market. Capacity is published once a day and not updated.
'''
},
'Day-ahead transmission capacity FI-SE1 – planned': {
'VariableId': 143,
'Formats': ('csv', 'json'),
'Info':
'''
Planned day-ahead transmission capacity from Finland (FI) to North-Sweden (SE1). Transmission capacity is given hourly for every next week hour. Each week's hour is given one value. Planned weekly transmission capacity Fingrid will publish every Tuesday. Information will be updated if there are changes to the previous plan timetable or capacity. Transmission capacity mean the capability of the electricity system to supply electricity to the market without compromising the system security.
'''
},
'Intraday transmission capacity SE1-FI': {
'VariableId': 38,
'Formats': ('csv', 'json'),
'Info':
'''
Transmission capacity for intraday market from Northern Sweden to Finland (SE1-FI). For intraday market capacity is given as free capacity after dayahead market. Capacity is published once a day and not updated.
'''
},
'The sum of the down-regualtion bids in the Balancing energy market': {
'VariableId': 105,
'Formats': ('csv', 'json'),
'Info':
'''
The hourly sum of the down-regulation offers given by Finnish parties to the Balancing energy market is published hourly with one hour delay, eg. information from hour 07-08 is published at 9 o'clock.
Balancing energy market is market place for manual freqeuncy restoration reserve (mFRR) which is used to balance the electricity generation and consumption in real time. The Balancing energy market organized by Fingrid is part of the Nordic Balancing energy market that is called also Regulating power market. Fingrid orders up- or down-regulation from the Balancing energy market. Down-regulation considers increasing of consumption or reducing of generation. Down-regulation bids have negative sign.
'''
},
'The sum of the up-regulation bids in the balancing energy market': {
'VariableId': 243,
'Formats': ('csv', 'json'),
'Info':
'''
The hourly sum of the up-regulation offers given by Finnish parties to the Balancing energy market is published hourly with one hour delay, eg. information from hour 07-08 is published at 9 o'clock.
Balancing energy market is market place for manual freqeuncy restoration reserve (mFRR) which is used to balance the electricity generation and consumption in real time. The Balancing energy market organized by Fingrid is part of the Nordic Balancing energy market that is called also Regulating power market. Fingrid orders up- or down-regulation from the Balancing energy market. Up-regulation considers increasing of production or reducing of consumption.
'''
},
'Day-ahead transmission capacity FI-SE3 – official': {
'VariableId': 27,
'Formats': ('csv', 'json'),
'Info':
'''
Day-ahead transmission capacity from Finland (FI) to Central-Sweden (SE3). Transmission capacity is given hourly for every hour of the next day. Each hour is given one value. Day-ahead transmission capacity Fingrid will publish every day in the afternoon. This capacity will not changed after publication. Transmission capacity mean the capability of the electricity system to supply electricity to the market without compromising the system security.
'''
},
'Transmission capacity RUS-FI': {
'VariableId': 63,
'Formats': ('csv', 'json'),
'Info':
'''
The total commercial transmission capacity of the 400 kV transmission lines from Russia to Finland owned by Fingrid. The technical capacity on 400 kV lines from Russia to Finland is 1400 MW or 1000 MW, depending whether the NWPP power plant that is located in St. Petersburg area is connected to the Finnish or the Russian power system. Fingrid has reserved 100 MW of transmission capacity from Russia to Finland to buy reserve power. The technical maximum capacity from Finland to Russia is 350 MW, of which Fingrid has reserved 30 MW to buy reserve power.
'''
},
'The buying price of production imbalance electricity': {
'VariableId': 96,
'Formats': ('csv', 'json'),
'Info':
'''
The down-regulating price of the hour is the price of production imbalance power purchased by Fingrid from a balance responsible party. If no down-regulation has been made or if the hour has been defined as an up-regulation hour, the Elspot FIN price is used as the purchase price of production imbalance power.
'''
},
'Intraday transmission capacity FI-EE – real time data': {
'VariableId': 114,
'Formats': ('csv', 'json'),
'Info':
'''
Transmission capacity to be given to intraday market FI-EE. After Elspot trades have been closed, real time intraday capacity is equivalent to the allocated intraday capacity. The real time capacity is updated after each intraday trade so that it corresponds to real time situation.
'''
},
'Commercial transmission of electricity between FI-SE3': {
'VariableId': 32,
'Formats': ('csv', 'json'),
'Info':
'''
Commercial electricity flow (dayahead market and intraday market) between Finland (FI) and Central Sweden (SE3). Positive sign is export from Finland to Sweden.
'''
},
'Bilateral trade capacity RUS-FI, unused': {
'VariableId': 64,
'Formats': ('csv', 'json'),
'Info':
'''
Unused bilateral trade capacity From Russia (RUS) to Finland (FI). The capacity of electricity transmission in bilateral trade can be left unused if the parties do not import the maximum amount of electricity to Finland.
'''
},
'Intraday transmission capacity FI-SE3': {
'VariableId': 45,
'Formats': ('csv', 'json'),
'Info':
'''
Transmission capacity for intraday market from Finland to Mid Sweden (FI - SE3). For intraday market capacity is given as free capacity after dayahead market. Capacity is published once a day and not updated.
'''
},
'Day-ahead transmission capacity SE1-FI – official': {
'VariableId': 24,
'Formats': ('csv', 'json'),
'Info':
'''
Day-ahead transmission capacity from North-Sweden (SE1) to Finland (FI). Transmission capacity is given hourly for every hour of the next day. Each hour is given one value. Day-ahead transmission capacity Fingrid will publish every day in the afternoon. This capacity will not changed after publication. Transmission capacity mean the capability of the electricity system to supply electricity to the market without compromising the system security.
'''
},
'Automatic Frequency Restoration Reserve, capacity, down': {
'VariableId': 2,
'Formats': ('csv', 'json'),
'Info':
'''
Procured automatic Frequency Restoration Reserve (aFRR / FRR-A) capacity, down [MW]
'''
},
'Automatic Frequency Restoration Reserve, activated, up': {
'VariableId': 54,
'Formats': ('csv', 'json'),
'Info':
'''
Activated automatic Frequency Restoration Reserve (aFRR) energy, up [MWh]
'''
},
'Intraday transmission capacity SE3-FI': {
'VariableId': 39,
'Formats': ('csv', 'json'),
'Info':
'''
Transmission capacity for intraday market from Mid Sweden to Finland (SE3-FI). Capacity for intraday market is given as free capacity after dayahead market. Capacity is published once a day and not updated.
'''
},
'Electricity consumption forecast - updated hourly': {
'VariableId': 166,
'Formats': ('csv', 'json'),
'Info':
'''
Electricity consumption forecast of Finland. The forecast is made by Fingrid and updated hourly.
'''
},
'Electricity production, surplus/deficit - real time data': {
'VariableId': 198,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Finland's energy production surplus/deficit. Information is based on the real time measurements in Fingrid's power control system.
Power deficit/surplus represents the balance between power production and consumption in Finland, taking into account imports and exports. Power deficit/surplus is calculated as the difference between the measured net import/export and the confirmed net exchange program between Finland and the other Nordic countries.
Sign convention: production deficit -, surplus +
The data is updated every 3 minutes.
'''
},
'Bilateral trade capacity FI-RUS, unused': {
'VariableId': 49,
'Formats': ('csv', 'json'),
'Info':
'''
Unused bilateral trade capacity from Finland (FI) to Russia (RUS). The capacity of electricity transmission in bilateral trade can be left unused if the parties do not export the maximum amount of electricity to Russia.
'''
},
'Transmission of electricity between Finland and Central Sweden - measured hourly data': {
'VariableId': 61,
'Formats': ('csv', 'json'),
'Info':
'''
Measured electrical transmission between Finland and Central Sweden (SE3) high voltage direct current tie lines. Positive sign means transmission from Finland to Central Sweden (SE3). Negative sign means transmission from Central Sweden (SE3) to Finland.
The value is updated once every hour after the hour shift. Each day before noon the values of the previous day are updated with more accurate measurement values.
'''
},
'Commercial transmission of electricity between FI-SE1': {
'VariableId': 31,
'Formats': ('csv', 'json'),
'Info':
'''
Commercial transmission of electricity (dayahead market and intraday market) between Finland (FI) and Northern Sweden (SE1). Positive sign is export from Finland to Sweden.
'''
},
'Intraday transmission capacity FI-EE': {
'VariableId': 113,
'Formats': ('csv', 'json'),
'Info':
'''
Transmission capacity to be given to intraday market FI-EE
'''
},
'Intraday transmission capacity FI-RUS': {
'VariableId': 50,
'Formats': ('csv', 'json'),
'Info':
'''
The capacity given to intraday market means transfer capacity after day-ahead trade from Finland (FI) to Russia (RUS). The indraday capacity between Finland and Russia is updated once a day. The data will not be revised after hourly day-ahead trade.
'''
},
'Measured transmission of electricity in Finland from north to south': {
'VariableId': 30,
'Formats': ('csv', 'json'),
'Info':
'''
Measured electricity flow in North-South cut in Finland (cut P1). In the graph flow from North to South is positive.
'''
},
'Day-ahead transmission capacity EE-FI – official': {
'VariableId': 112,
'Formats': ('csv', 'json'),
'Info':
'''
Day-ahead transmission capacity from Estonia (EE) to Finland (FI). Transmission capacity is given hourly for every hour of the next day. Each hour is given one value. Day-ahead transmission capacity Fingrid will publish every day in the afternoon. This capacity will not changed after publication. Transmission capacity mean the capability of the electricity system to supply electricity to the market without compromising the system security.
'''
},
'Planned transmission capacity RUS-FI': {
'VariableId': 127,
'Formats': ('csv', 'json'),
'Info':
'''
Planned transmission capacity from Russia to Finland. Transmission capacity is given hourly for every next week hour. Each week's hour is given one value. Planned weekly transmission capacity Fingrid will publish every Tuesday. Transmission capacity mean the capability of the electricity system to supply electricity to the market without compromising the system security.
'''
},
'Planned transmission capacity FI-RUS': {
'VariableId': 41,
'Formats': ('csv', 'json'),
'Info':
'''
Planned transmission capacity from Finland to Russia. Transmission capacity is given hourly for every next week hour. Each week's hour is given one value. Planned weekly transmission capacity Fingrid will publish every Tuesday. Transmission capacity mean the capability of the electricity system to supply electricity to the market without compromising the system security.
'''
},
'Transmission of electricity between Finland and Estonia - measured hourly data': {
'VariableId': 55,
'Formats': ('csv', 'json'),
'Info':
'''
Measured electrical transmission between Finland and Estonia HVDC tile lines (Estlink 1 and Estlink 2). Positive sign means transmission from Finland to Estonia. Negative sign means transmission from Estonia to Finland.
The value is updated once every hour after the hour shift. Each day before noon the values of the previous day are updated with more accurate measurement values.
'''
},
'Transmission capacity FI-RUS': {
'VariableId': 103,
'Formats': ('csv', 'json'),
'Info':
'''
The total commercial transmission capacity of the 400 kV transmission lines from Finland to Russia owned by Fingrid. The technical capacity on 400 kV lines from Russia to Finland is 1400 MW or 1000 MW, depending whether the NWPP power plant that is located in St. Petersburg area is connected to the Finnish or the Russian power system. Fingrid has reserved 100 MW of transmission capacity from Russia to Finland to buy reserve power. The technical maximum capacity from Finland to Russia is 350 MW, of which Fingrid has reserved 30 MW to buy reserve power.
'''
},
'Planned weekly capacity from south to north': {
'VariableId': 29,
'Formats': ('csv', 'json'),
'Info':
'''
Planned weekly capacity on North-South cut in Finland (cut P1) from South to North. Planned outages are included in the weekly capacity, information is not updated after disturbances.
'''
},
'Intraday transmission capacity EE-FI': {
'VariableId': 110,
'Formats': ('csv', 'json'),
'Info':
'''
Transmission capacity to be given to intraday market EE - FI
'''
},
'Wind power generation forecast - updated once a day': {
'VariableId': 246,
'Formats': ('csv', 'json'),
'Info':
'''
Finnish wind power generation forecasts for the next day. Forecast is updated every day at 12 p.m. EET. Length of the forecast is 36 hours. Overlapping hours are overwritten.
The forecast is based on weather forecasts and data about the location, size and capacity of wind turbines. The weather data sourced from multiple providers.
'''
},
'Day-ahead transmission capacity FI-EE – official': {
'VariableId': 115,
'Formats': ('csv', 'json'),
'Info':
'''
Day-ahead transmission capacity from Finland (FI) to Estonia (EE). Transmission capacity is given hourly for every hour of the next day. Each hour is given one value. Day-ahead transmission capacity Fingrid will publish every day in the afternoon. This capacity will not changed after publication. Transmission capacity mean the capability of the electricity system to supply electricity to the market without compromising the system security.
'''
},
'Total production capacity used in the solar power forecast': {
'VariableId': 267,
'Formats': ('csv', 'json'),
'Info':
'''
This is the total solar power production capacity used in Fingrid's solar power forecast. It is based on the small scale production statistics gathered by the Energy authority. It is also updated with estimates based on information that's provided to Fingrid.
This total capacity information can be used, for example, to calculate the rate of production of solar power, by comparing it to the forecasted solar production series by Fingrid. This capacity information cannot however be considered as the official amount of solar production capacity in Finland, as it is updated manually and by using estimates.
'''
},
'Wind power generation forecast - updated hourly': {
'VariableId': 245,
'Formats': ('csv', 'json'),
'Info':
'''
Finnish wind power generation forecast for the next 36 hours. Updated hourly.
The forecast is based on weather forecasts and data about the location, size and capacity of wind turbines. The weather data sourced from multiple providers.
'''
},
'Electricity consumption forecast - next 24 hours': {
'VariableId': 165,
'Formats': ('csv', 'json'),
'Info':
'''
An hourly consumption forecast for the next 24 hours made by Fingrid. Forecast is published on previous day at 12:00 EET.
'''
},
'Electricity consumption in Finland': {
'VariableId': 124,
'Formats': ('csv', 'json'),
'Info':
'''
Electricity consumption in Finland is based on Fingrid's production measurements. Minor part of production which is not measured is estimated. The consumption is calculated as follows: Consumption = Production + Import - Export. Updated hourly.
'''
},
'Bilateral trade between FI-RUS': {
'VariableId': 68,
'Formats': ('csv', 'json'),
'Info':
'''
Bilateral trade between Finland and Russia. Fingrid and the Russian parties confirm the bilateral trades on 400 kV cross-border connection in the morning of the commercial day D for the following commercial day D+1. The confirmed bilateral trades will be bid price-independently on the electricity spot market
'''
},
'Condensing power production - real time data': {
'VariableId': 189,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Condensing power production based on the real-time measurements in Fingrid's operation control system. The data is updated every 3 minutes.
Publishing this data has been stopped since 14.9.2017 due to changes in division of power plants. The production data is included in other real time production measurement time series.
'''
},
'Intraday transmission capacity EE-FI – real time data': {
'VariableId': 111,
'Formats': ('csv', 'json'),
'Info':
'''
Transmission capacity to be given to intraday market EE-FI. After Elspot trades have been closed, real time intraday capacity is equivalent to the allocated intraday capacity. The real time capacity is updated after each intraday trade so that it corresponds to real time situation.
'''
},
'Ordered down-regulations from Balancing energy market in Finland': {
'VariableId': 33,
'Formats': ('csv', 'json'),
'Info':
'''
Ordered down-regulations from Balancing energy market in Finland. The volume of ordered down-regulations from Balancing energy market in Finland is published hourly with two hours delay, eg. information from hour 06-07 is published at 9 o'clock.
Balancing energy market is market place for manual freqeuncy restoration reserve (mFRR) which is used to balance the electricity generation and consumption in real time. The Balancing energy market organized by Fingrid is part of the Nordic Balancing energy market that is called also Regulating power market. Fingrid orders up- or down-regulation from the Balancing energy market. Down-regulation considers increasing of consumption or reducing of generation. Down-regulation volume has negative sign.
'''
},
'Electricity consumption in Finland - real time data': {
'VariableId': 193,
'Formats': ('csv', 'json'),
'Info':
'''
Electricity consumption in Finland is calculated based on production and import/export. The data is updated every 3 minutes.
Production information and import/export are based on the real-time measurements in Fingrid's operation control system.
'''
},
'Temperature in Jyväskylä - real time data': {
'VariableId': 182,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Outside air temperature measurement at Petäjävesi substation. The data is updated every 3 minutes.
'''
},
'Cogeneration of district heating - real time data': {
'VariableId': 201,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Cogeneration of district heating based on the real-time measurements in Fingrid's operation control system. The data is updated every 3 minutes.
Cogeneration means power plants that produce both electricity and district heating or process steam (combined heat and power, CHP).
'''
},
'Special regulation, up-regulation': {
'VariableId': 119,
'Formats': ('csv', 'json'),
'Info':
'''
Regulation which takes place in the regulating power market by Fingrid for reasons other than the needs of national balance management
'''
},
'Temperature in Helsinki - real time data': {
'VariableId': 178,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Outside air temperature measurement at Tammisto substation. The data is updated every 3 minutes.
'''
},
'Electricity production in Finland - real time data': {
'VariableId': 192,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Electricity production in Finland based on the real-time measurements in Fingrid's operation control system The data is updated every 3 minutes.
'''
},
'Automatic Frequency Restoration Reserve, price, up': {
'VariableId': 52,
'Formats': ('csv', 'json'),
'Info':
'''
Volume weighted average price for procured upward automatic Frequency Restoration Reserve (aFRR) capacity, [€/MW]
'''
},
'Automatic Frequency Restoration Reserve, price, down': {
'VariableId': 51,
'Formats': ('csv', 'json'),
'Info':
'''
Volume weighted average price for procured downward automatic Frequency Restoration Reserve (aFRR) capacity, [€/MW]
'''
},
'Time deviation - real time data': {
'VariableId': 206,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Time deviation is the time difference in seconds between a clock running according to the frequency of the grid and a reference clock independent of the frequency of the grid. The data is updated every 3 minutes.
'''
},
'Stock exchange trade FI-RUS-FI': {
'VariableId': 69,
'Formats': ('csv', 'json'),
'Info':
'''
Direct trade volumes derive from freely placed bids in the Nordic day-ahead (Elspot) and intraday (Elbas) electricity markets. Information is updated once the day-ahead market results are public. Information on the intraday trade is updated before the operational hour.
'''
},
'Electricity production prediction - updated hourly': {
'VariableId': 241,
'Formats': ('csv', 'json'),
'Info':
'''
The calculation of production forecast in Finland is based on the production plans that balance responsible parties has reported to Fingrid. Production forecast is updated hourly.
'''
},
'Automatic Frequency Restoration Reserve, capacity, up': {
'VariableId': 1,
'Formats': ('csv', 'json'),
'Info':
'''
Procured automatic Frequency Restoration Reserve (aFRR) capacity, up [MW]
'''
},
'Transmission of electricity between Finland and Northern Sweden - measured hourly data': {
'VariableId': 60,
'Formats': ('csv', 'json'),
'Info':
'''
Measured transmission of electricity between Finland and Northern Sweden (SE1). Positive sign means transmission from Finland to Northern Sweden (SE1). Negative sign means transmission from Northern Sweden (SE1) to Finland.
The value is updated once every hour after the hour shift. Each day before noon the values of the previous day are updated with more accurate measurement values.
'''
},
'Temperature in Oulu - real time data': {
'VariableId': 196,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Outside air temperature measurement at Leväsuo substation. The data is updated every 3 minutes.
'''
},
'Total production capacity used in the wind power forecast': {
'VariableId': 268,
'Formats': ('csv', 'json'),
'Info':
'''
This is the total wind production capacity used in Fingrid's wind power forecast. It is based capacity information gathered by Fingrid.
This total capacity information can be used, for example, to calculate the rate of production of wind power, by comparing it to the actual wind production series by Fingrid. This capacity information cannot however be considered as the official amount of wind production capacity in Finland, as it is updated manually.
'''
},
'Temperature in Rovaniemi - real time data': {
'VariableId': 185,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Outside air temperature measurement at Valajaskoski substation. The data is updated every 3 minutes.
'''
},
'Stock exchange capacity FI-RUS': {
'VariableId': 102,
'Formats': ('csv', 'json'),
'Info':
'''
The capacity on the 400 kV connection from Finland to Russia is reserved to direct trade of the following commercial day. Fingrid and the Russian parties, who have jointly agreed that the capacity is 140 MW in both directions, daily confirm the capacity.
'''
},
'Transmission of electricity between Finland and Russia - measured hourly data': {
'VariableId': 58,
'Formats': ('csv', 'json'),
'Info':
'''
Measured electrical transmission between Finland and Russia. Positive sign means transmission from Finland to Russia. Negative sign means transmission from Russia to Finland.
The value is updated once every hour after the hour shift. Each day before noon the values of the previous day are updated with more accurate measurement values.
'''
},
'Electricity production prediction - premilinary': {
'VariableId': 242,
'Formats': ('csv', 'json'),
'Info':
'''
Hourly electricity generation forecast is based on the production plans that balance responsible parties have reported to Fingrid. The forecast is published daily by 6.00 pm for the next day, and it is not updated to match the updated production plans that balance responsible parties send to Fingrid hourly.
'''
},
'Automatic Frequency Restoration Reserve, activated, down': {
'VariableId': 53,
'Formats': ('csv', 'json'),
'Info':
'''
Activated automatic Frequency Restoration Reserve (aFRR) energy, down [MWh]
'''
},
'The price of comsumption imbalance electricity': {
'VariableId': 92,
'Formats': ('csv', 'json'),
'Info':
'''
The price of consumption imbalance power is the price for which Fingrid both purchases imbalance power from a balance responsible party and sells it to one. In the case of regulating hour, the regulation price is used. If no regulation has been made, the Elspot FIN price is used as the purchase and selling price of consumption imbalance power. Data gathering to Excel-sheet or XML format is possible in periods not longer that one year due to limitations in data transmission.
'''
},
'Electricity production in Finland': {
'VariableId': 74,
'Formats': ('csv', 'json'),
'Info':
'''
Hourly electricity production in Finland are based on Fingrid's measurements. Minor part of production which is not measured is estimated. Updated hourly.
'''
},
'Commercial transmission of electricity between FI-EE': {
'VariableId': 140,
'Formats': ('csv', 'json'),
'Info':
'''
Commercial electricity flow (dayahead market and intraday market) between Finland (FI) and Estonia (EE) including system supportive trade between TSOs. Positive sign is export from Finland to Estonia.
'''
},
'Transmission of electricity between Finland and Norway - measured hourly data': {
'VariableId': 57,
'Formats': ('csv', 'json'),
'Info':
'''
Measured electrical transmission between Finland and Norway 220kV tie line. Positive sign means transmission from Finland to Norway. Negative sign means transmission from Norway to Finland.
The value is updated once every hour after the hour shift. Each day before noon the values of the previous day are updated with more accurate measurement values.
'''
},
'Special regulation, down-regulation': {
'VariableId': 118,
'Formats': ('csv', 'json'),
'Info':
'''
Regulation which takes place in the regulating power market by Fingrid for reasons other than the needs of national balance management
'''
},
'Electricity production, reserve power plants and small-scale production - real time data': {
'VariableId': 205,
'Formats': ('csv', 'json', 'app'),
'Info':
'''
Reserve power plants electrical production is based on the real-time measurements in Fingrid's operation control system. Estimated small-scale production is added, of which there are no measurements available. The data is updated every 3 minutes.
'''
},
'Frequency Containment Reserve for Normal operation, hourly market bids': {
'VariableId': 285,
'Formats': ('csv', 'json'),
'Info':
'''
The volume of received Frequency Containment Reserves for Normal operation (FCR-N) bids. The volume of bids will be published 22:45 (EET) on previous evening.
FCR-N is the frequency containment reserve used in the Nordic synchronous system that aims to keep the frequency in normal frequency range between 49,9 - 50,1 Hz.
Hourly market is a reserve market operated by Fingrid. Procured volumes vary for each hour and price is the price of the most expensive procured bid.
'''
},
'Frequency Containment Reserve for Normal operation, activated': {
'VariableId': 123,
'Formats': ('csv', 'json'),
'Info':
'''
Activated Frequency Containment Reserve for Normal operation (FCR-N) is published hourly one hour after the hour in question, for example the value for hour 07-08 is published at 9 o'clock.
FCR-N is the frequency containment reserve used in the Nordic synchronous system that aims to keep the frequency in normal frequency range between 49,9 - 50,1 Hz.
Activated FCR-N volume (MWh) is calculated on the basis of the frequency in the Nordic synchronous system.
Value is activated net energy. Positive value means that the frequency has been in average below 50,0 Hz during the hour, and reserve has been activated as up-regulation. Respectively, negative value means that the frequency has been in average above 50,0 Hz, and reserve has been activated as down-regulation.
'''
},
'Bilateral trade capacity FI-RUS': {
'VariableId': 101,
'Formats': ('csv', 'json'),
'Info':
'''
The bilateral capacity on the 400 kV connection from Russia to Finland that is reserved to bilateral trade of the following commercial day. The capacity is confirmed by Fingrid and the Russian parties.
'''
},
'Transmission of electricity between Finland and Åland - measured hourly data': {
'VariableId': 280,
'Formats': ('csv', 'json'),
'Info':
'''
Measured electrical transmission between Finland and Åland islands DC tie line. Positive sign means transmission from Finland to Åland. Negative sign means transmission from Åland to Finland.
The value is updated once a day before noon with the values of the previous day.
'''
},
'Activated down-regulation power': {
'VariableId': 252,
'Formats': ('csv', 'json'),
'Info':
'''
The activated downward power from balancing power market. The value is given for each 15 minutes and indicated the amount of activated power in the end of each 15 minute time period. The values are available starting from December 2018.
'''
},
'Ordered up-regulations from Balancing energy market in Finland': {
'VariableId': 34,
'Formats': ('csv', 'json'),
'Info':
'''
Ordered up-regulations from Balancing energy market in Finland. The volume of ordered up-regulations from Balancing energy market in Finland is published hourly with two hours delay, eg. information from hour 06-07 is published at 9 o'clock.
Balancing energy market is market place for manual freqeuncy restoration reserve (mFRR) which is used to balance the electricity generation and consumption in real time. The Balancing energy market organized by Fingrid is part of the Nordic Balancing energy market that is called also Regulating power market. Fingrid orders up- or down-regulation from the Balancing energy market. Up-regulation considers increasing of generation or reducing of consumption.
'''
},
'Stock exchange capacity RUS-FI': {
'VariableId': 67,
'Formats': ('csv', 'json'),
'Info':
'''
The capacity on the 400 kV connection from Russia to Finland is reserved to direct trade of the following commercial day. Fingrid and the Russian parties, who have jointly agreed that the capacity is 140 MW in both directions, daily confirm the capacity.
'''
},
'Day-ahead transmission capacity FI-SE3 – planned': {
'VariableId': 145,
'Formats': ('csv', 'json'),
'Info':
'''
Planned day-ahead transmission capacity from Finland (FI) to Central-Sweden (SE3). Transmission capacity is given hourly for every next week hour. Each week's hour is given one value. Planned weekly transmission capacity Fingrid will publish every Tuesday. Information will be updated if there are changes to the previous plan timetable or capacity. Transmission capacity mean the capability of the electricity system to supply electricity to the market without compromising the system security.
'''
},
'Solar power generation forecast - updated once a day': {
'VariableId': 247,
'Formats': ('csv', 'json'),
'Info':
'''
Solar power generation forecasts for the next day. Forecast is updated every day at 12 p.m. EET. Length of the forecast is 36 hours. Overlapping hours are overwrited.
Solar forecasts are based on weather forecasts and estimates of installed PV capacity and location in Finland. Total PV capacity is based on yearly capacity statistics from the Finnish energy authority and estimates on installation rate of new capacity. Location information is a very rough estimate based on Finnish distribution grid operators information.
'''
},
'Frequency Containment Reserve for Normal operation, hourly market volumes': {
'VariableId': 80,
'Formats': ('csv', 'json'),
'Info':
'''
Hourly volume of procured frequency containment reserve for normal operation (FCR-N) in Finnish hourly market for each CET-timezone day is published previous evening at 22:45 (EET).
FCR-N is the frequency containment reserve used in the Nordic synchronous system that aims to keep the frequency in normal frequency range between 49,9 - 50,1 Hz.
Hourly market is a reserve market operated by Fingrid. Procured volumes vary for each hour and price is the price of the most expensive procured bid.
'''
},
'Bilateral trade capacity RUS-FI': {
'VariableId': 65,
'Formats': ('csv', 'json'),
'Info':
'''
The bilateral capacity on the 400 kV connection from Russia (RUS) to Finland (FI) that is reserved to bilateral trade of the following commercial day. The capacity is confirmed by Fingrid and the Russian parties.
'''
},
'Congestion income between FI-SE3': {
'VariableId': 71,
'Formats': ('csv', 'json'),
'Info':
'''
Congestion income between Finland (FI) and Central Sweden (SE3).
Congestion income is published on ENTSO-E's Transparency Platform, which can be founded here: https://transparency.entsoe.eu/transmission/r2/dailyImplicitAllocationsCongestionIncome/show . There are historical values to be found from Open Data until the beginning of February 2017. After February 2017 updated data as well as historical data can be founded from ENTSO-E's Transparency Platform.
Congestion income = commercial flow between FI and SE3 on the day ahead market [MWh/h] * absolute value of price difference between FI and SE3 [€/MWh].
Congestion originates in the situation where transmission capacity between bidding zones is not sufficient to fulfill the market demand and the congestion splits the bidding zones into separate price areas. Congestion income arises from the different prices that the sellers receive and the buyers pay when electricity flows from the higher price area to the lower price area. The seller acting in a lower price area receives lower price for electricity compared to the price the other party pays for electricity in the higher price area, and the power exchange receives surplus income, which it then pays to the Transmission System Operators (TSOs). The TSOs spend the received congestion income on increasing the transmission capacity on its cross-border interconnectors according to the EU regulation.
'''
},
'Activated up-regulation power': {
'VariableId': 253,
'Formats': ('csv', 'json'),
'Info':
'''
The activated upward power from balancing power market. The value is given for each 15 minutes and indicated the amount of activated power in the end of each 15 minute time period. The values are available starting from December 2018.
'''
},
'Day-ahead transmission capacity SE3-FI – planned': {
'VariableId': 144,
'Formats': ('csv', 'json'),
'Info':
'''
Planned day-ahead transmission capacity from Central-Sweden (SE3) to Finland (FI). Transmission capacity is given hourly for every next week hour. Each week's hour is given one value. Planned weekly transmission capacity Fingrid will publish every Tuesday. Information will be updated if there are changes to the previous plan timetable or capacity. Transmission capacity mean the capability of the electricity system to supply electricity to the market without compromising the system security.
'''
},
'Solar power generation forecast - updated hourly': {
'VariableId': 248,
'Formats': ('csv', 'json'),
'Info':
'''
Hourly updated solar power generation forecast for the next 36 hours.
Solar forecasts are based on weather forecasts and estimates of installed PV capacity and location in Finland. Total PV capacity is based on yearly capacity statistics from the Finnish energy authority and estimates on installation rate of new capacity. Location information is a very rough estimate based on Finnish distribution grid operators information.
'''
},
'Frequency Containment Reserve for Normal operation, hourly market prices': {
'VariableId': 79,
'Formats': ('csv', 'json'),
'Info':
'''
Hourly prices (€/MW,h) of procured frequency containment reserve for normal operation (FCR-N) in Finnish hourly market for each CET-timezone day is published previous evening at 22:45 (EET).
FCR-N is the frequency containment reserve used in the Nordic synchronous system that aims to keep the frequency in normal frequency range between 49,9 - 50,1 Hz.
Hourly market is a reserve market operated by Fingrid. Procured volumes vary for each hour and price is the price of the most expensive procured bid.
'''
},
'Frequency containment reserves for disturbances, nordic trade': {
'VariableId': 289,
'Formats': ('csv', 'json'),
'Info':
'''
The volume of the nordic trade of frequency containment reserve for disturbances (FCR-D) capacity. Positive numbers indicate import of capacity to Finland and negative numbers indicate export of capacity from Finland. The data contains the traded capacity for Sweden and Norway. The data will be published 22:45 (EET) on previous evening.
FCR-D is the frequency containment reserve used in the Nordic synchronous system that aims to keep the frequency above 49,5 Hz during disturbances.
Hourly market is a reserve market operated by Fingrid. Procured volumes vary for each hour and price is the price of the most expensive procured bid.
'''
},
'Price of the last activated up-regulation bid - real time data': {
'VariableId': 22,
'Formats': ('csv', 'json'),
'Info':
'''
The price of the last activated up-regulation bid. The price is published real-time when Finland is a separate regulation area.
'''
},
'Congestion income between FI-EE': {
'VariableId': 48,
'Formats': ('csv', 'json'),
'Info':
'''
Congestion income between Finland (FI) and Estonia (EE).
Congestion income is published on ENTSO-E's Transparency Platform, which can be founded here: https://transparency.entsoe.eu/transmission/r2/dailyImplicitAllocationsCongestionIncome/show . There are historical values to be found from Open Data until the beginning of February 2017. After February 2017 updated data as well as historical data can be founded from ENTSO-E's Transparency Platform.
Congestion income is calculated as follows: congestion income [€/h] = commercial flow on day ahead market [MW] * area price difference [€/MWh]
Congestion originates in the situation where transmission capacity between bidding zones is not sufficient to fulfill the market demand and the congestion splits the bidding zones into separate price areas. Congestion income arises from the different prices that the sellers receive and the buyers pay when electricity flows from the higher price area to the lower price area. The power exchange receives the difference, which it then pays to the Transmission System Operators (TSOs). The TSOs spend the received congestion income on increasing the transmission capacity on its cross-border interconnectors according to the EU regulation.
'''
},
'Intraday transmission capacity RUS-FI': {
'VariableId': 66,
'Formats': ('csv', 'json'),
'Info':
'''
The capacity given to intraday market means transfer capacity after day-ahead trade from Russia to Finland. The intraday capacity between Finland and Russia is updated once a day. The data will not be revised after hourly day-ahead trade.
'''
},
'Down-regulation bids, price of the last activated - real time data': {
'VariableId': 251,
'Formats': ('csv', 'json'),
'Info':
'''
The price of the last activated down-regulation bid. The price is published real-time when Finland is a separate regulation area.
'''
},
'Down-regulation price in the Balancing energy market': {
'VariableId': 106,
'Formats': ('csv', 'json'),
'Info':
'''
Down-regulation price in the Balancing energy market. The price of the cheapest regulating bid used in the balancing power market during the particular hour; however, at the most the price for price area Finland in Nord Pool Spot (Elspot FIN).
Down-regulating price in Finland is the price of the most expensive down-regulating bid used in the Balancing energy market during the hour in question; however, it is at the most the day ahead market price for the price area Finland. Down-regulating price for each hour is published hourly with one hour delay, eg. information from hour 07-08 is published at 9 o'clock.
Balancing energy market is market place for manual freqeuency restoration reserve (mFRR) which is used to balance the electricity generation and consumption in real time. The Balancing energy market organized by Fingrid is part of the Nordic Balancing energy market that is called also Regulating power market. Fingrid orders up- or down-regulation from the Balancing energy market. Down-regulation considers increasing of consumption or reducing of generation.
'''
},
'Congestion income between FI-SE1': {
'VariableId': 70,
'Formats': ('csv', 'json'),
'Info':
'''
Congestion income between Finland (FI) and Northern Sweden (SE1).
Congestion income is published on ENTSO-E's Transparency Platform, which can be founded here: https://transparency.entsoe.eu/transmission/r2/dailyImplicitAllocationsCongestionIncome/show . There are historical values to be found from Open Data until the beginning of February 2017. After February 2017 updated data as well as historical data can be founded from ENTSO-E's Transparency Platform.
Congestion income is calculated as follows: congestion income [€/h] = commercial flow on day ahead market [MW] * area price difference [€/MWh]
Congestion originates in the situation where transmission capacity between bidding zones is not sufficient to fulfill the market demand and the congestion splits the bidding zones into separate price areas. Congestion income arises from the different prices that the sellers receive and the buyers pay when electricity flows from the higher price area to the lower price area. The seller acting in a lower price area receives lower price for electricity compared to the price the other party pays for electricity in the higher price area, and the power exchange receives surplus income, which it then pays to the Transmission System Operators (TSOs). The TSOs spend the received congestion income on increasing the transmission capacity on its cross-border interconnectors according to the EU regulation.
'''
},
'Planned weekly capacity from north to south': {
'VariableId': 28,
'Formats': ('csv', 'json'),
'Info':
'''
Planned weekly capacity on North-South cut in Finland (cut P1) from North to South. Planned outages are included in the weekly capacity, information is not updated after disturbances.
'''
},
'Day-ahead transmission capacity FI-SE1 – official': {
'VariableId': 26,
'Formats': ('csv', 'json'),
'Info':
'''
Day-ahead transmission capacity from Finland (FI) to North-Sweden (SE1). Transmission capacity is given hourly for every hour of the next day. Each hour is given one value. Day-ahead transmission capacity Fingrid will publish every day in the afternoon. This capacity will not changed after publication. Transmission capacity mean the capability of the electricity system to supply electricity to the market without compromising the system security.
'''
},
'Day-ahead transmission capacity SE3-FI – official': {
'VariableId': 25,
'Formats': ('csv', 'json'),
'Info':
'''
Day-ahead transmission capacity from Central-Sweden (SE3) to Finland (FI). Transmission capacity is given hourly for every hour of the next day. Each hour is given one value. Day-ahead transmission capacity Fingrid will publish every day in the afternoon. This capacity will not changed after publication. Transmission capacity mean the capability of the electricity system to supply electricity to the market without compromising the system security.
'''
},
'Frequency Containment Reserve for Normal operation, foreign trade': {
'VariableId': 287,
'Formats': ('csv', 'json'),
'Info':
'''
The volume of the foreign trade of frequency containment reserve for normal operation (FCR-N) capacity. Positive numbers indicate import of capacity to Finland and negative numbers indicate export of capacity from Finland. The data contains the traded capacity for Sweden, Norway, Estonia and Russia. The data will be published 22:45 (EET) on previous evening.
FCR-N is the frequency containment reserve used in the Nordic synchronous system that aims to keep the frequency in normal frequency range between 49,9 - 50,1 Hz.
Hourly market is a reserve market operated by Fingrid. Procured volumes vary for each hour and price is the price of the most expensive procured bid.
'''
},
'Up-regulating price in the Balancing energy market': {
'VariableId': 244,
'Formats': ('csv', 'json'),
'Info':
'''
Up-regulating price in Finland is the price of the most expensive up-regulating bid used in the Balancing energy market during the hour in question; however, it is at least the day ahead market price for the price area Finland. Up-regulating price for each hour is published hourly with one hour delay, eg. information from hour 07-08 is published at 9 o'clock.
Balancing energy market is market place for manual freqeuncy restoration reserve (mFRR) which is used to balance the electricity generation and consumption in real time. The Balancing energy market organized by Fingrid is part of the Nordic Balancing energy market that is called also Regulating power market. Fingrid orders up- or down-regulation from the Balancing energy market. Up-regulation considers increasing of production or reducing of consumption.
'''
},
'Balancing Capacity Market price': {
'VariableId': 262,
'Formats': ('csv', 'json'),
'Info':
'''
The price of capacity procured from the balancing capacity market, €/MW,h. Fingrid procures mFRR capacity throught the balancing capacity market on a weekly auction, which is held when needed. Balance service provider pledges itself to leave regulating bids on the regulation market. For that the balance service provider is entitled to capacity payment. The price is published at latest on Friday on the week before the procurement week at 12:00 (EET)
'''
},
'Frequency containment reserves for disturbances, reserve plans in the yearly market': {
'VariableId': 290,
'Formats': ('csv', 'json'),
'Info':
'''
The hourly sum of reserve plans for frequency containment reserve for disturbances (FCR-D) in the yearly market. The data will be published 22:45 (EET) on previous evening.
FCR-D is the frequency containment reserve used in the Nordic synchronous system that aims to keep the frequency above 49,5 Hz during disturbances.
Yearly market is a reserve market operated by Fingrid. Hourly procured volumes vary according to the reserve plans submitted by the balancing service providers and the price is constant over the whole year.
'''
},
'Frequency Containment Reserve for Normal operation, yearly market plans': {
'VariableId': 288,
'Formats': ('csv', 'json'),
'Info':
'''
The hourly sum of reserve plans for frequency containment reserve for normal operation (FCR-N) in the yearly market. The data will be published 22:45 (EET) on previous evening.
FCR-N is the frequency containment reserve used in the Nordic synchronous system that aims to keep the frequency in normal frequency range between 49,9 - 50,1 Hz.
Yearly market is a reserve market operated by Fingrid. Hourly procured volumes vary according to the reserve plans submitted by the balancing service providers and the price is constant over the whole year.
'''
}
}
def _datasets_values_to_lists(self):
'''Return list of available variableIds in available dict of available datasets.'''
available_variableIds = []
# Get dict of data on the available datasets.
datasets_dict = self._datasets()
# Make lists to store information about the available datasets.
datasets_names_list = []
datasets_variableIds_list = []
datasets_formats_list = []
datasets_info_list = []
# Loop on the datasets dict.
for name, value in datasets_dict.items():
# Store datasets names in list.
datasets_names_list.append(name)
# Store datasets variableIds in list.
datasets_variableIds_list.append(value["VariableId"])
# Store available formats in list.
datasets_formats_list.append(value["Formats"])
# Store available info in list.
datasets_info_list.append(value["Info"])
# Return lists of datasets names and variableIds.
return datasets_names_list, datasets_variableIds_list, datasets_formats_list, datasets_info_list
################################################################
############## Frontend functions.
################################################################
def show_parameters(self, include_info=False, return_df=False, tablefmt="grid", savetofilepath=None):
'''
Displays available datasets in api as markdown list and possible returns as DataFrame.
'''
# Convert list of format tuples to list of strings before printing.
formats_str_list = []
for i in self.static_datasets_formats_list:
if isinstance(i, tuple):
formats_str_list.append(', '.join(i))
else:
formats_str_list.append(i)
# Create dict before creating DataFrame.
df_dict = {
'Available FingridApi Dataset Names': self.static_datasets_names_list,
'Dataset VariableIds': self.static_datasets_variableids_list,
'Dataset Formats': formats_str_list
}
# Add info to dict if spesified.
if include_info:
df_dict["Info"] = self.static_datasets_infos_list
# Create DataFrame.
df =
|
pd.DataFrame(df_dict)
|
pandas.DataFrame
|
#Autre test pour le filtre des musées sur les villes, qui vérifie la correspondance de manière plus précise.
import sys
import os
from pathlib import Path
scriptpath = Path(os.path.dirname(os.path.abspath(__file__))).parent
sys.path.insert(0,str(scriptpath))
import pandas as pd
from data_extraction.filtre_base_de_donnees import filtre_par_villes
from pandas import isnull
def test_filtre_par_villes():
df = pd.read_excel(r"tests\tests.xlsx")
assert df[df.VILLE=="ALISE-SAINTE-REINE"].applymap(lambda x: {} if isnull(x) else x).eq(filtre_par_villes(df,"ALISE-SAINTE-REINE").applymap(lambda x: {} if
|
isnull(x)
|
pandas.isnull
|
#######################################
# Input Example ::
# python hotspot_predict.py -lat 11.05 -long 76.1 -rad 0.2 -hpts 5
#######################################
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import math
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.metrics import mean_squared_error
import pickle
import argparse
parser = argparse.ArgumentParser()
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)
# print(tf.__version__)
parser.add_argument("-lat", "--Latitude", help="Input Latitude", default=11.05)
parser.add_argument("-long", "--Longitude", help="Input Longitude", default=76.1)
parser.add_argument("-rad", "--Radius", help="Input Radius", default=0.2)
parser.add_argument("-hpts", "--Hotspots", help="Input Hotspots", default=5)
args = parser.parse_args()
loca = pd.read_csv('locations_Kerala.csv')
def getdatawithinlat(latin,longin):
strexec = 'SELECT * FROM map ORDER BY ABS(latitude - ' +latin +') + ABS(longitude - ' +longin +') ASC;'
pass
def isInside(circle_x, circle_y, rad, x, y):
if ((x - circle_x) * (x - circle_x) + (y - circle_y) * (y - circle_y) <= rad * rad):
return True
else:
return False
def create_dataset(dataset, window_size=1):
data_X, data_Y = [], []
for i in range(len(dataset) - window_size):
a = dataset[i:(i + window_size), :]
data_X.append(a)
data_Y.append(dataset[i + window_size, :])
return(np.array(data_X), np.array(data_Y))
def create_model():
model = Sequential()
model.add(LSTM(8, input_shape=(2, window_size), return_sequences=True))
model.add(LSTM(4, input_shape=(2, window_size)))
model.add(Dense(2))
return model
locations = []
circle_x = 11.05
circle_y = 76.1
rad = 0.2
circle_x = float(args.Latitude)
circle_y = float(args.Longitude)
rad = float(args.Radius)
for row in loca.values:
x = row[4]
y = row[5]
if(isInside(circle_x, circle_y, rad, x, y)):
locations.append(row[0])
# long 77.28 - 74.88
# lat 12.78 - 8.31
data = {}
for num in range(1000):
i = np.random.randint(0, len(locations))
data[num] = [i, np.mean(loca['Lat'].loc[loca['Name'] == locations[i]]), np.mean(loca['Long'].loc[loca['Name'] == locations[i]])]
df = pd.DataFrame.from_dict(data, orient='index', columns=['place', 'lat', 'lon'])
df.to_csv('hotspots_fake_data.csv', header=True, index=False)
lat_scaler = MinMaxScaler(feature_range=(0, 1))
long_scaler = MinMaxScaler(feature_range=(0, 1))
lat_x = lat_scaler.fit_transform(df.iloc[:, 1].values.reshape(-1, 1))
long_x = long_scaler.fit_transform(df.iloc[:, 2].values.reshape(-1, 1))
x = np.concatenate((lat_x, long_x), axis=1)
size = 0.80
train_size = int(len(x) * size)
test_size = len(x) - train_size
train, test = x[0:train_size, :], x[train_size:len(x), :]
window_size = 5
train_X, train_Y = create_dataset(train, window_size)
test_X, test_Y = create_dataset(test, window_size)
train_X = train_X.transpose(0, 2, 1)
test_X = test_X.transpose(0, 2, 1)
ckpt_model = 'model.hdf5'
checkpoint = ModelCheckpoint(ckpt_model, monitor='loss', verbose=0, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model = create_model()
model.compile(loss="mean_squared_error", optimizer="adam", metrics=['mean_absolute_error'])
model.fit(train_X, train_Y, epochs=2, batch_size=1, verbose=0, callbacks=callbacks_list)
def predict_and_score(X, Y):
pred = model.predict(X)
score = math.sqrt(mean_squared_error(Y, pred))
return(score, pred)
rmse_train, train_predict = predict_and_score(train_X, train_Y)
rmse_test, test_predict = predict_and_score(test_X, test_Y)
pickle.dump(lat_scaler, open('lat_scaler.pkl', 'wb'))
pickle.dump(long_scaler, open('long_scaler.pkl', 'wb'))
df =
|
pd.read_csv('hotspots_fake_data.csv')
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import os
import json
import openpyxl
import pickle
import PySimpleGUI as sg
from keras_bert import load_trained_model_from_checkpoint
from keras_bert import get_custom_objects
from keras import Input, Model
from keras.models import load_model
from preprocessing import preprocessing
from preprocessing import make_table
from transformers import BertJapaneseTokenizer
############ デーブルの作成(トレンドデータ、テキスト)############
#df_trend = make_table.trend()
#df_news = make_table.text()
# データセットの作成(トレンド+指標データ、テキスト)
#df_index, df_text = make_table.concat(df_trend, df_news)
# 速度向上のため、csvから読み込む
df_index = pd.read_csv('./datasets/df_index.csv')
df_text =
|
pd.read_csv('./datasets/df_text.csv')
|
pandas.read_csv
|
import numpy as np
import vigra
from ilastikrag import Rag
from ilastikrag.util import generate_random_voronoi
from ilastikrag.accumulators.edgeregion import EdgeRegionEdgeAccumulator
class TestEdgeRegionEdgeAccumulator(object):
def test1(self):
superpixels = generate_random_voronoi((100,200), 200)
superpixels.axistags = vigra.defaultAxistags('yx')
feature_names = ['edgeregion_edge_regionradii']
rag = Rag( superpixels )
acc = EdgeRegionEdgeAccumulator(rag, feature_names)
features_df = rag.compute_features(None, feature_names, accumulator_set=[acc])
radii = features_df[features_df.columns.values[2:]].values
assert (radii[:,0] >= radii[:,1]).all()
# Transpose superpixels and check again
# Should match (radii are sorted by magnitude).
superpixels.axistags = vigra.defaultAxistags('xy')
rag = Rag( superpixels )
acc = EdgeRegionEdgeAccumulator(rag, feature_names)
transposed_features_df = rag.compute_features(None, feature_names, accumulator_set=[acc])
transposed_radii = transposed_features_df[transposed_features_df.columns.values[2:]].values
assert (transposed_features_df[['sp1', 'sp1']].values == features_df[['sp1', 'sp1']].values).all()
DEBUG = False
if DEBUG:
count_features = rag.compute_features(None, ['standard_edge_count', 'standard_sp_count'])
import pandas as pd
combined_features_df =
|
pd.merge(features_df, transposed_features_df, how='left', on=['sp1', 'sp2'], suffixes=('_orig', '_transposed'))
|
pandas.merge
|
"""
Module for processing and handling replays
"""
# Todo move into module
import asyncio
import lzma
from base64 import b64decode, b64encode
from io import StringIO
import bezier
import numpy as np
import pandas as pd
import requests
class DegenerateTriangle(Exception):
pass
def lzma_replay_to_df(lzma_byte_string):
"""
Turn a lzma stream into a pandas dataframe of the replay
:param lzma_byte_string: lzma encoded byte string
:return: pandas dataframe. columns are "offset", "x pos", "y pos", "clicks"
"""
stream = lzma.decompress(lzma_byte_string)
dataframe = info_string_to_df(stream)
dataframe.columns = ["ms since last", "x pos", "y pos", "clicks"]
seed = 0
if dataframe["ms since last"].iloc[-1] == -12345:
seed = int(dataframe["clicks"].iloc[-1])
dataframe.drop(dataframe.tail(1).index, inplace=True)
smallidx = dataframe["ms since last"].idxmin()
offset = 0
if dataframe["ms since last"].iloc[smallidx] < 0:
offset = int(dataframe.head(smallidx).sum()["ms since last"])
dataframe.drop(dataframe.head(smallidx).index, inplace=True)
dataframe["ms since last"] = dataframe["ms since last"].replace(0, 1)
dataframe['offset'] = dataframe["ms since last"].cumsum() + offset
dataframe = dataframe.drop(columns=["ms since last"])
return dataframe, seed
def info_string_to_df(info):
"""
Split a string separated into sections by , and into items by | into pandas dataframe
:param info: byte string
:return: pandas dataframe
"""
dataframe = pd.read_csv(StringIO(str(info)[2:-1]), sep="|", lineterminator=',', header=None)
return dataframe
def replay_string_to_df(replay):
"""
Decodes a base 64 encoded replay string into a pandas dataframe
:param replay: base 64 encode byte string
:return: pandas dataframe. columns are "offset", "x pos", "y pos", "clicks"
"""
byte_string = b64decode(replay)
dataframe, _ = lzma_replay_to_df(byte_string)
return dataframe
def open_file(file_name):
"""
Opens a replay file and returns info on replay
:param file_name: file name including path
:return: ParseReplayByteSting object
"""
with open(file_name, "rb") as replay:
return ParseReplayByteSting(replay.read())
def open_link(link):
"""
Opens a replay file from link and returns info on replay
:param link: link to replay
:return: ParseReplayByteSting object
"""
replay = requests.get(link)
return ParseReplayByteSting(replay.content)
class ParseReplayByteSting:
"""
Contains info from replay file
:param byte_string: byte string containing replay info
"""
def __init__(self, byte_string):
byte_string, self.gamemode = get_byte(byte_string)
byte_string, self.game_version = get_integer(byte_string)
byte_string, self.map_md5_hash = get_string(byte_string)
byte_string, self.player_name = get_string(byte_string)
byte_string, self.replay_md5_hash = get_string(byte_string)
byte_string, self.count300 = get_short(byte_string)
byte_string, self.count100 = get_short(byte_string)
byte_string, self.count50 = get_short(byte_string)
byte_string, self.countgekis = get_short(byte_string)
byte_string, self.countkatus = get_short(byte_string)
byte_string, self.countmisses = get_short(byte_string)
byte_string, self.final_score = get_integer(byte_string)
byte_string, self.max_combo = get_short(byte_string)
byte_string, self.perfect = get_byte(byte_string)
byte_string, self.mods = get_integer(byte_string)
byte_string, life_graph = get_string(byte_string)
if life_graph:
self.life_graph = info_string_to_df(life_graph)
else:
self.life_graph = pd.DataFrame([[0, 0], [0, 0]])
self.life_graph.columns = ["offset", "health"]
byte_string, self.time_stamp = get_long(byte_string)
byte_string, replay_length = get_integer(byte_string)
self.replay, self.seed = lzma_replay_to_df(byte_string[:replay_length])
self.replay_encoded = b64encode(byte_string[:replay_length])
byte_string = byte_string[replay_length:]
_, self.score_id = get_integer(byte_string)
def get_byte(byte_str):
"""
Get a byte from byte string
:param byte_str: byte string
:return: byte string, byte
"""
byte = byte_str[0]
byte_str = byte_str[1:]
return byte_str, byte
def get_short(byte_str):
"""
Get a short from byte string
:param byte_str: byte string
:return: byte string, short
"""
short = int.from_bytes(byte_str[:2], byteorder="little")
byte_str = byte_str[2:]
return byte_str, short
def get_integer(byte_str):
"""
Get a integer from byte string
:param byte_str: byte string
:return: byte string, integer
"""
integer = int.from_bytes(byte_str[:4], byteorder="little")
byte_str = byte_str[4:]
return byte_str, integer
def get_long(byte_str):
"""
Get a long from byte string
:param byte_str: byte string
:return: byte string, long
"""
long = int.from_bytes(byte_str[:8], byteorder="little")
byte_str = byte_str[8:]
return byte_str, long
def get_uleb128(byte_str):
"""
Gets a unsigned leb128 number from byte sting
:param byte_str: byte string
:return: byte string, integer
"""
uleb_parts = []
while byte_str[0] >= 0x80:
uleb_parts.append(byte_str[0] - 0x80)
byte_str = byte_str[1:]
uleb_parts.append(byte_str[0])
byte_str = byte_str[1:]
uleb_parts = uleb_parts[::-1]
integer = 0
for i in range(len(uleb_parts) - 1):
integer = (integer + uleb_parts[i]) << 7
integer += uleb_parts[-1]
return byte_str, integer
def get_string(byte_str):
"""
Get a string from byte string
:param byte_str: byte string
:return: byte string, string
"""
byte_str, string_existence = get_byte(byte_str)
if string_existence == 0:
return byte_str, ""
byte_str, length = get_uleb128(byte_str)
string = str(byte_str[:length])[2:-1]
byte_str = byte_str[length:]
return byte_str, string
def index_at_value(dataframe, value, column):
"""
get closet lower index closet to value
:param dataframe: pandas dataframe
:param value: value to search for
:param column: column to search in
:return: index
"""
exact_match = dataframe[dataframe[column] == value]
if not exact_match.empty:
index = exact_match.index[0]
else:
index = dataframe[column][dataframe[column] < value].idxmax()
return index
def get_action_at_time(dataframe, time):
"""
Gives the closest entry in a dataframe rounded down in a replay dataframe
:param dataframe: pandas dataframe
:param time: time in milliseconds
:return: dataframe entry
"""
time = max(time, dataframe.iloc[0].loc["offset"])
time = min(time, dataframe.iloc[-1].loc["offset"])
index = index_at_value(dataframe, time, "offset")
"""lower = dataframe.iloc[index]
upper = dataframe.iloc[index+1]
perc = (time-lower["offset"])/(upper["offset"]-lower["offset"])
dist = upper - lower"""
# todo: smart interpolation
return dataframe.loc[index]
class SliderCurve:
"""
slider curve object
:param points: all cords of points on slider
:param slider_type: slider type
:param resolution: --optional-- resolution default: 200
"""
def __init__(self, points, slider_type, resolution=None):
if resolution is None:
resolution = 200
if slider_type == "L":
resolution = 2
if slider_type != "C":
if slider_type == "B":
points_list = split_on_double(points)
else:
points_list = [points]
paths = list()
if slider_type == "P":
try:
paths.append(PerfectSlider(points))
except DegenerateTriangle:
for i in points_list:
nodes = np.asfortranarray(i).transpose()
paths.append(bezier.Curve.from_nodes(nodes))
else:
for i in points_list:
nodes = np.asfortranarray(i).transpose()
paths.append(bezier.Curve.from_nodes(nodes))
curve = list()
for i in paths:
s_v = np.linspace(0, 1, resolution)
curve.append(i.evaluate_multi(s_v).transpose())
for i, j in enumerate(curve[:-1]):
curve[i] = j[:-1]
self.curve = np.concatenate(curve)
self.length = sum([i.length for i in paths])
self.paths = list()
per = 0
for i in paths:
self.paths.append((per, i))
per += i.length / self.length
def get_point(self, percentage):
"""
get cords on slider
:param percentage: percentage along slider
:return: cords
"""
clost = self.paths[0][1]
clostper = 0
for i, j in self.paths:
if percentage >= i >= clostper:
clostper = i
clost = j
loc = (percentage * self.length - clostper * self.length) / clost.length
return clost.evaluate(loc)
def split_on_double(item_list):
"""
split list every time a element is doubled
:param item_list: list
:return: list of lists
"""
last = item_list[0]
l_index = 0
split_list = list()
for i, j in list(enumerate(item_list))[1:]:
if j == last:
split_list.append(item_list[l_index:i])
l_index = i
last = j
split_list.append(item_list[l_index:])
return split_list
class PerfectSlider:
def __init__(self, points):
points = np.array(points)
self.center, self.radius = get_circumcircle(points)
min_theta = np.arctan2(points[0][1] - self.center[1], points[0][0] - self.center[0])
max_theta = np.arctan2(points[2][1] - self.center[1], points[2][0] - self.center[0])
pass_through = np.arctan2(points[1][1] - self.center[1], points[1][0] - self.center[0])
mi = (min_theta + np.pi * 2) % (np.pi * 2)
ma = (max_theta + np.pi * 2) % (np.pi * 2)
pa = (pass_through + np.pi * 2) % (np.pi * 2)
p2 = (points[2][1] - self.center[1], points[2][0] - self.center[0])
p1 = (points[0][1] - self.center[1], points[0][0] - self.center[0])
if not mi < pa < ma:
dist = np.arctan2(*p2) - np.arctan2(*p1)
else:
dist = np.pi * 2 - (np.arctan2(*p1) - np.arctan2(*p2))
self.min_theta = min_theta
self.length = abs(dist)
self.dist = dist
def evaluate(self, percentage):
theta = percentage * self.dist + self.min_theta
data = np.ndarray(shape=(2, 1), dtype=float)
data[0][0] = self.center[0] + self.radius * np.cos(theta)
data[1][0] = self.center[1] + self.radius * np.sin(theta)
return data
def evaluate_multi(self, s_v):
pn = np.array(list(map(self.evaluate, s_v)))
return pn.transpose()[0]
def get_circumcircle(triangle):
assert triangle.shape == (3, 2)
aSq = distance(triangle[1] - triangle[2]) ** 2
bSq = distance(triangle[0] - triangle[2]) ** 2
cSq = distance(triangle[0] - triangle[1]) ** 2
if almost_equals(aSq, 0) or almost_equals(bSq, 0) or almost_equals(cSq, 0):
raise DegenerateTriangle
s = aSq * (bSq + cSq - aSq)
t = bSq * (aSq + cSq - bSq)
u = cSq * (aSq + bSq - cSq)
if almost_equals(sum([s, u, t]), 0):
raise DegenerateTriangle
line1 = perpendicular_line(triangle[1], triangle[0])
line2 = perpendicular_line(triangle[1], triangle[2])
coef1 = line1.coeffs
coef2 = line2.coeffs
x_center = (coef1[1] - coef2[1]) / (coef2[0] - coef1[0])
y_center = line1(x_center)
center = np.array([x_center, y_center])
dist = triangle[0] - center
radius = distance(dist)
return center, radius
def perpendicular_line(point1, point2):
center = (point1 + point2) / 2
slope_diff = point1 - point2
if 0 in slope_diff:
slope_diff += 1e-10
# raise DegenerateTriangle
slope = slope_diff[1] / slope_diff[0]
new_slope = -1 / slope
offset = center[1] - new_slope * center[0]
return np.poly1d([new_slope, offset])
def almost_equals(value1, value2, acceptable_distance=None):
if acceptable_distance is None:
acceptable_distance = 1e-3
return abs(value1 - value2) <= acceptable_distance
def distance(point):
return np.sqrt(point[0] ** 2 + point[1] ** 2)
class ScoreReplay:
def __init__(self, beatmap_obj, replay):
beat_durations = dict()
last_non = 1000
beatmap_obj.timing_points[0].time = 0
mspb = dict()
for i in beatmap_obj.timing_points:
if i.ms_per_beat > 0:
last_non = i.ms_per_beat
duration = i.ms_per_beat
mspb[i.time] = i.ms_per_beat
else:
duration = last_non * abs(i.ms_per_beat) / 100
beat_durations[i.time] = duration
self.objects = {"circle": list(), "slider": list(), "spinner": list()}
for j, i in enumerate(beatmap_obj.hitobjects):
duration = [j for j in beat_durations if j <= i.time][-1]
msperb = [v for j, v in mspb.items() if j <= i.time][-1]
if i.typestr() == "circle":
self.objects["circle"].append((j + 1, {"time": i.time,
"position": (i.data.pos.x, i.data.pos.y),
"pressed": False}))
elif i.typestr() == "spinner":
self.objects["spinner"].append((j + 1, {"time": i.time, "end_time": i.data.end_time}))
else:
slider_duration = i.data.distance / (100.0 * beatmap_obj.sv) \
* beat_durations[duration]
slider = SliderCurve([(i.data.pos.x, i.data.pos.y)]
+ [(a.x, a.y) for a in i.data.points], i.data.type)
num_of_ticks = beatmap_obj.tick_rate * slider_duration / beat_durations[duration]
ticks_once = {i: False for i in percent_positions(int(num_of_ticks))}
ticks = {i + 1: ticks_once for i in range(i.data.repetitions)}
for tickset in ticks.copy():
if tickset % 3 > 1:
ticks[tickset] = {1 - i: False for i in ticks[tickset]}
endings = {i + 1: False for i in range(i.data.repetitions)}
self.objects["slider"].append((j + 1, {"time": i.time, "slider": slider,
"speed": (100.0 * beatmap_obj.sv) * beat_durations[duration],
"duration": slider_duration, "repetitions": i.data.repetitions,
"start": False, "ticks": ticks, "end": endings}))
self.replay = replay
self.score = pd.DataFrame(columns=["offset", "combo", "hit", "bonuses", "displacement", "object"])
self.raw50 = 0
self.hit_window50 = 0
self.hit_window100 = 0
self.hit_window300 = 0
self.k1 = 1 << 0
self.k2 = 1 << 1
self.circle_radius = 0
self.follow_circle = 0
self.speed = 1
self.spins_per_second = 0
self.compensate = replay["offset"].diff().median() / 2.5
def generate_score(self, *args, **kwargs):
return asyncio.run(self.mark_all(*args, **kwargs))
async def mark_all(self, od, cs, speed=1, ms_compensate=None):
self.score = pd.DataFrame(columns=["offset", "combo", "hit", "bonuses", "displacement", "object"])
# calculate score and accuracy afterwords
if ms_compensate is not None:
self.compensate = ms_compensate
self.speed = speed
self.raw50 = (150 + 50 * (5 - od) / 5)
self.hit_window50 = (150 + 50 * (5 - od) / 5) + self.compensate
self.hit_window100 = (100 + 40 * (5 - od) / 5) + self.compensate
self.hit_window300 = (50 + 30 * (5 - od) / 5) + self.compensate
self.circle_radius = (512 / 16) * (1 - (0.7 * (cs - 5) / 5))
self.follow_circle = (512 / 16) * (1 - (0.5 * (cs - 5) / 7)) * 10
if od > 5:
self.spins_per_second = 5 + 2.5 * (od - 5) / 5
elif od < 5:
self.spins_per_second = 5 - 2 * (5 - od) / 5
else:
self.spins_per_second = 5
circle_data, slider_data, spinner_data = \
await asyncio.gather(self.mark_circle(self.objects["circle"]),
self.mark_slider(self.objects["slider"]),
self.mark_spinner(self.objects["spinner"]))
score = pd.concat([circle_data, slider_data, spinner_data]).sort_index()
combo = 0
for i, j in score.iterrows():
if j["object"] == "circle" or j["object"] == "spinner":
if j["hit"] >= 50:
combo += 1
else:
combo = 0
if j["object"] == "slider":
slider_parts = j["displacement"]
if slider_parts["slider start"]:
combo += 1
else:
combo = 0
if "slider repeats" in slider_parts:
repeats = len(slider_parts["slider repeats"])
does_repeat = True
else:
repeats = 1
does_repeat = False
index = 0
for repeat in range(repeats):
if "slider ticks" in slider_parts:
interval = int(len(slider_parts["slider ticks"]) / repeats)
for tick in slider_parts["slider ticks"][index:interval]:
if tick:
combo += 1
else:
combo = 0
index += interval
if does_repeat:
if slider_parts["slider repeats"].iloc[repeat]:
combo += 1
else:
combo = 0
if slider_parts["slider end"]:
combo += 1
score.loc[i]["combo"] = combo
self.score = score
return self.score
async def mark_circle(self, hit_circles, alternated_hit_window=0.):
circles = pd.DataFrame(columns=["offset", "combo", "hit", "bonuses", "displacement", "object"])
for place_index, hit_circle in hit_circles:
lower = self.replay[self.replay["offset"] >= hit_circle["time"]
- self.hit_window50 + alternated_hit_window]
upper = lower[lower["offset"] <= hit_circle["time"] + self.hit_window50]
key1 = False
key2 = False
offset = None
deviance = None
clicks = list()
for j in upper.iterrows():
time_action = j[1]
last_key1 = key1
last_key2 = key2
key1 = int(time_action["clicks"]) & self.k1
key2 = int(time_action["clicks"]) & self.k2
if (not last_key1 and key1) or (not last_key2 and key2):
if np.sqrt((time_action["x pos"] - hit_circle["position"][0]) ** 2 +
(time_action["y pos"] - hit_circle["position"][1]) ** 2) <= self.circle_radius:
hit_circle["pressed"] = True
if hit_circle["time"] - self.hit_window300 <= \
time_action["offset"] <= hit_circle["time"] + self.hit_window300:
clicks.append(time_action["offset"] - hit_circle["time"])
elif hit_circle["time"] - self.hit_window100 <= \
time_action["offset"] <= hit_circle["time"] + self.hit_window100:
clicks.append(time_action["offset"] - hit_circle["time"])
elif hit_circle["time"] - self.hit_window50 <= \
time_action["offset"] <= hit_circle["time"] + self.hit_window50:
clicks.append(time_action["offset"] - hit_circle["time"])
elif hit_circle["time"] - self.hit_window50 > time_action["offset"]:
offset = time_action["offset"]
hit_circle["pressed"] = False
deviance = time_action["offset"] - hit_circle["time"]
break
if hit_circle["pressed"]:
closet_click = min([(abs(i), i) for i in clicks])
if closet_click[0] <= self.hit_window300:
hit = 300.
elif closet_click[0] <= self.hit_window100:
hit = 100.
else:
hit = 50
offset = closet_click[1] + hit_circle["time"]
deviance = closet_click[1]
else:
if offset is None:
offset = hit_circle["time"]
hit = 0.
if deviance is None:
deviance = np.nan
bonuses = 0.
combo = np.nan
circles.at[place_index] = [offset, combo, hit, bonuses, deviance, "circle"]
return circles
async def mark_spinner(self, spinners):
spinner_list = pd.DataFrame(columns=["offset", "combo", "hit", "bonuses", "displacement", "object"])
for place_index, spinner in spinners:
length = (spinner["end_time"] - spinner["time"]) / 1000
required_spins = np.floor(self.spins_per_second * length * .55)
lower = self.replay[self.replay["offset"] >= spinner["time"]]
upper = lower[lower["offset"] <= spinner["end_time"]]
hold = upper[upper["clicks"] != 0]
x_pos = hold.loc[:, "x pos"] - 512 / 2
y_pos = hold.loc[:, "y pos"] - 384 / 2
d_theta = np.arctan2(y_pos, x_pos).diff() / np.pi * 180
spins_index = (d_theta[abs(d_theta) > 200]).index
spins = hold.loc[spins_index]
rpm = pd.Series(name="rotations per minute")
last_revolution = spinner["time"]
for spin in spins.iterrows():
rpm.at[spin[1]["offset"]] = spin[1]["offset"] - last_revolution
last_revolution = spin[1]["offset"]
extra_spin = hold.iloc[-1]["offset"] - spins.iloc[-1]["offset"]
if len(rpm) >= required_spins:
hit = 300.
bonuses = 1000. * (len(rpm) - required_spins)
elif len(rpm) + extra_spin / 360 >= required_spins / 2 * .5 + required_spins / 2:
hit = 100.
bonuses = 0.
elif len(rpm) + extra_spin / 360 >= required_spins / 2:
hit = 50.
bonuses = 0.
else:
hit = 0.
bonuses = 0.
combo = np.nan
offset = spinner["time"]
deviance = 1000 / rpm * 60 * self.speed
spinner_list.at[place_index] = [offset, combo, hit, bonuses, deviance, "spinner"]
return spinner_list
async def mark_slider(self, sliders):
slider_list = pd.DataFrame(columns=["offset", "combo", "hit", "bonuses", "displacement", "object"])
for place_index, slider in sliders:
slider_parts = pd.Series(name="data on slider")
first_click = await asyncio.gather(
self.mark_circle([(0, {"type": "slider_start", "time": slider["time"],
"position": [i[0] for i in slider["slider"].get_point(0)],
"pressed": False})], self.raw50 * 1.0075))
first_click = first_click[0]
if first_click.loc[0, "hit"] >= 50: # and 0 >= first_click.loc[0, "displacement"]:
slider["start"] = True
slider_parts.at["slider start"] = slider["start"]
slider_start = self.replay[self.replay["offset"] >= slider["time"]]
slider_end = slider_start[slider_start["offset"] <= slider["time"]
+ slider["duration"] * slider["repetitions"]]
for tickset in slider["ticks"]:
for tick in slider["ticks"][tickset]:
# tick_lower_time = slider_end[slider_end["offset"] >= slider["time"]
# + slider["duration"] * tick
# - self.circle_radius / slider["speed"]]
# tick_upper_time = tick_lower_time[tick_lower_time["offset"] <= slider["time"]
# + slider["duration"] * tick
# + self.circle_radius / slider["speed"]]
tick_slice = get_action_at_time(slider_end, slider["time"]
+ slider["duration"] * tick)
if tick_slice["clicks"] > 0:
position = slider["slider"].get_point(tick)
if np.sqrt((tick_slice["x pos"] - position[0][0]) ** 2 +
(tick_slice["y pos"] - position[1][0]) ** 2) <= self.follow_circle:
slider["ticks"][tickset][tick] = True
if "slider ticks" not in slider_parts:
slider_parts.at["slider ticks"] =
|
pd.Series()
|
pandas.Series
|
# --------------------------------------------------------------------------------------------------
# Copyright (c) 2021 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# --------------------------------------------------------------------------------------------------
# Script to reproduce Figure 2 in Section 3.3: learning curves of ANTT models
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
def main():
path = os.path.abspath('.') # Run from pwd or specify a path here
print("Plotting data from ", path)
# list all subfolders of the folder - each subfolder is considered an experiment and subfolders
# within that subfolder are separate runs of that experiment
list_subfolders_with_paths = [
f.path for f in os.scandir(path) if f.is_dir()]
print("Found following experiments: ", list_subfolders_with_paths)
experiment_names = []
colours = ['red', 'green', 'blue', 'orange', 'pink', 'yellow', 'black']
fig, axes = plt.subplots(2, 2, figsize=(20, 10), sharey=False)
for experiment, color in zip(list_subfolders_with_paths, colours):
print('{} = {}'.format(color, experiment))
run_cvss = [f.path for f in os.scandir(experiment)]
experiment_name = os.path.basename(os.path.normpath(experiment))
experiment_names.append(experiment_name)
run_dfs = []
for run in run_cvss:
run_data_frame = pd.read_csv(run)
run_dfs.append(run_data_frame)
experiment_df =
|
pd.concat(run_dfs)
|
pandas.concat
|
import pandas as pd
import pandas.testing as pdt
import pytest
import pytz
from werkzeug.exceptions import RequestEntityTooLarge
from sfa_api.conftest import (
VALID_FORECAST_JSON, VALID_CDF_FORECAST_JSON, demo_forecasts)
from sfa_api.utils import request_handling
from sfa_api.utils.errors import (
BadAPIRequest, StorageAuthError, NotFoundException)
@pytest.mark.parametrize('start,end', [
('invalid', 'invalid'),
('NaT', 'NaT')
])
def test_validate_start_end_fail(app, forecast_id, start, end):
url = f'/forecasts/single/{forecast_id}/values?start={start}&end={end}'
with pytest.raises(request_handling.BadAPIRequest):
with app.test_request_context(url):
request_handling.validate_start_end()
@pytest.mark.parametrize('start,end', [
('20190101T120000Z', '20190101T130000Z'),
('20190101T120000', '20190101T130000'),
('20190101T120000', '20190101T130000Z'),
('20190101T120000Z', '20190101T130000+00:00'),
('20190101T120000Z', '20190101T140000+01:00'),
])
def test_validate_start_end_success(app, forecast_id, start, end):
url = f'/forecasts/single/{forecast_id}/values?start={start}&end={end}'
with app.test_request_context(url):
request_handling.validate_start_end()
@pytest.mark.parametrize('query,exc', [
('?start=20200101T0000Z', {'end'}),
('?end=20200101T0000Z', {'start'}),
('?start=20200101T0000Z&end=20210102T0000Z', {'end'}),
('', {'start', 'end'}),
pytest.param('?start=20200101T0000Z&end=20200102T0000Z', {},
marks=pytest.mark.xfail(strict=True))
])
def test_validate_start_end_not_provided(app, forecast_id, query, exc):
url = f'/forecasts/single/{forecast_id}/values{query}'
with app.test_request_context(url):
with pytest.raises(BadAPIRequest) as err:
request_handling.validate_start_end()
if exc:
assert set(err.value.errors.keys()) == exc
@pytest.mark.parametrize('content_type,payload', [
('text/csv', ''),
('application/json', '{}'),
('application/json', '{"values": "nope"}'),
('text/plain', 'nope'),
])
def test_validate_parsable_fail(app, content_type, payload, forecast_id):
url = f'/forecasts/single/{forecast_id}/values/'
with pytest.raises(request_handling.BadAPIRequest):
with app.test_request_context(
url, content_type=content_type, data=payload, method='POST',
content_length=len(payload)):
request_handling.validate_parsable_values()
@pytest.mark.parametrize('content_type', [
('text/csv'),
('application/json'),
('application/json'),
])
def test_validate_parsable_fail_too_large(app, content_type, forecast_id):
url = f'/forecasts/single/{forecast_id}/values/'
with pytest.raises(RequestEntityTooLarge):
with app.test_request_context(
url, content_type=content_type, method='POST',
content_length=17*1024*1024):
request_handling.validate_parsable_values()
@pytest.mark.parametrize('content_type,payload', [
('text/csv', 'timestamp,value\n2019-01-01T12:00:00Z,5'),
('application/json', ('{"values":[{"timestamp": "2019-01-01T12:00:00Z",'
'"value": 5}]}')),
])
def test_validate_parsable_success(app, content_type, payload, forecast_id):
with app.test_request_context(f'/forecasts/single/{forecast_id}/values/',
content_type=content_type, data=payload,
method='POST'):
request_handling.validate_parsable_values()
def test_validate_observation_values():
df = pd.DataFrame({'value': [0.1, '.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
request_handling.validate_observation_values(df)
def test_validate_observation_values_bad_value():
df = pd.DataFrame({'value': [0.1, 's.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'value' in e.value.errors
def test_validate_observation_values_no_value():
df = pd.DataFrame({'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'value' in e.value.errors
def test_validate_observation_values_bad_timestamp():
df = pd.DataFrame({'value': [0.1, '.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'timestamp' in e.value.errors
def test_validate_observation_values_no_timestamp():
df = pd.DataFrame({
'value': [0.1, '.2'], 'quality_flag': [0.0, 1]})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'timestamp' in e.value.errors
@pytest.mark.parametrize('quality', [
[1, .1],
[1, '0.9'],
[2, 0],
['ham', 0]
])
def test_validate_observation_values_bad_quality(quality):
df = pd.DataFrame({'value': [0.1, .2],
'quality_flag': quality,
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'quality_flag' in e.value.errors
def test_validate_observation_values_no_quality():
df = pd.DataFrame({'value': [0.1, '.2'],
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'quality_flag' in e.value.errors
expected_parsed_df = pd.DataFrame({
'a': [1, 2, 3, 4],
'b': [4, 5, 6, 7],
})
csv_string = "a,b\n1,4\n2,5\n3,6\n4,7\n"
json_string = '{"values":{"a":[1,2,3,4],"b":[4,5,6,7]}}'
def test_parse_csv_success():
test_df = request_handling.parse_csv(csv_string)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('csv_input', [
'',
"a,b\n1,4\n2.56,2.45\n1,2,3\n"
])
def test_parse_csv_failure(csv_input):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_csv(csv_input)
def test_parse_json_success():
test_df = request_handling.parse_json(json_string)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('json_input', [
'',
"{'a':[1,2,3]}"
])
def test_parse_json_failure(json_input):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_json(json_input)
null_df = pd.DataFrame({
'timestamp': [
'2018-10-29T12:00:00Z',
'2018-10-29T13:00:00Z',
'2018-10-29T14:00:00Z',
'2018-10-29T15:00:00Z',
],
'value': [32.93, 25.17, None, None],
'quality_flag': [0, 0, 1, 0]
})
def test_parse_csv_nan():
test_df = request_handling.parse_csv("""
# comment line
timestamp,value,quality_flag
2018-10-29T12:00:00Z,32.93,0
2018-10-29T13:00:00Z,25.17,0
2018-10-29T14:00:00Z,,1 # this value is NaN
2018-10-29T15:00:00Z,NaN,0
""")
|
pdt.assert_frame_equal(test_df, null_df)
|
pandas.testing.assert_frame_equal
|
import requests
from typing import Dict, List, Optional
import sys
from pathlib import Path
import os
from shutil import rmtree
import json
import pandas as pd
import click
from joblib import Memory
from datetime import date, timedelta
# this removes cache every day to invalidate
today = date.today()
yesterday = today - timedelta(1)
memory = Memory(f"/tmp/cachedir_{today.strftime('%Y-%m-%d')}", verbose=0)
old_cache=f"/tmp/cachedir_{yesterday.strftime('%Y-%m-%d')}"
if os.path.exists(old_cache):
rmtree(old_cache)
@memory.cache
def alfred_list_docs():
c = CodaClient()
r = c.list_docs(alfred=True)
return {"items": r}
@memory.cache
def alfred_list_pages(pages:List[str]):
c = CodaClient()
r = c.list_all_pages(pages, alfred=True)
return {"items": r}
class CodaClient():
# get current date
def __init__(self):
self.docs_url = "https://coda.io/apis/v1/docs"
try:
self.TOKEN = os.environ["CODA_TOKEN"]
self.headers = {'Authorization': f'Bearer {self.TOKEN}'}
except KeyError:
print("Please set the CODA_TOKEN environment variable", sys.err)
exit()
def _auth_req(self,params:str=None, url:str=None) -> Dict[str,str]:
kwargs = {"url":url,"params":params, "headers":self.headers}
r = requests.get(**kwargs).json()
token = r.get("nextPageToken", False)
while token:
kwargs["params"]["pageToken"] = token
res = requests.get(**kwargs).json()
r["items"].extend(res["items"])
token = res.get("nextPageToken", False)
link = res.get("nextPageLink", False)
return r
def _get_fields(self, r=Dict[str,str], fields: Optional[List[str]]=None, alfred:bool=False) -> List[Dict[str, str]]:
resp: List[Dict[str,str]] = []
for i in r:
d: Dict[str,str] = {}
for k, v in i.items():
if k == "name" and alfred:
d["uid"] = v
d["title"] = v
d["subtitle"] = v
d["icon"] = "/Users/lucanaef/Downloads/coda.jpg"
continue
if alfred:
d["variables"] = d.get("variables",{})
# setting environment variables in Alfred
if k in ["id", "browserLink"]:
d["variables"][k] = v
continue
if k in fields:
d[k] = v
resp.append(d)
return resp
def list_docs(self, id: bool = False, alfred: bool=False) -> Dict[str, str]:
"""
Queries the Coda API for a doc with the given query
"""
params = {
'query': '',
}
r = self._auth_req(params=params,url=self.docs_url)["items"]
fields = ["browserLink","name"]
return self._get_fields(r, fields, alfred=alfred)
def list_all_pages(self, pages: List[str], alfred: bool=False) -> Dict[str, str]:
all: List[Dict[str,str]] = []
for i in pages:
r = self._auth_req(params={"limit":1000}, url=f"{self.docs_url}/{i}/pages/")["items"]
fields = ["browserLink","name"]
r = self._get_fields(r, fields, alfred)
all.extend(r)
return all
def print_tables(self, doc:str, max_tables:int=10):
uri = f"{self.docs_url}/{doc}/tables/"
def get_cols(table):
r = self._auth_req(params={}, url=f"{self.docs_url}/{doc}/tables/{table}/columns/")
return [i["name"] for i in r["items"]]
def get_rows(table):
r = self._auth_req(params={}, url=f"{self.docs_url}/{doc}/tables/{table}/rows/")
return [list(i["values"].values()) for i in r["items"]]
resp = self._auth_req(url=uri, params={})
k = 0
for i in resp["items"]:
if k > max_tables:
continue
idx = i["id"]
rows = get_rows(idx)
cols = get_cols(idx)
print(
|
pd.DataFrame(rows, columns=cols)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pandas as pd
from numpy import allclose, isclose
from pandapower.pf.runpp_3ph import runpp_3ph
from pandapower.results import get_relevant_elements
import pandapower as pp
def runpp_with_consistency_checks(net, **kwargs):
pp.runpp(net, **kwargs)
consistency_checks(net)
return True
def runpp_3ph_with_consistency_checks(net, **kwargs):
runpp_3ph(net, **kwargs)
consistency_checks_3ph(net)
return True
def rundcpp_with_consistency_checks(net, **kwargs):
pp.rundcpp(net, **kwargs)
consistency_checks(net, test_q=False)
return True
def consistency_checks(net, rtol=1e-3, test_q=True):
indices_consistent(net)
branch_loss_consistent_with_bus_feed_in(net, rtol)
element_power_consistent_with_bus_power(net, rtol, test_q)
def indices_consistent(net):
elements = get_relevant_elements()
for element in elements:
e_idx = net[element].index
res_idx = net["res_" + element].index
assert len(e_idx) == len(res_idx), "length of %s bus and res_%s indices do not match"%(element, element)
assert all(e_idx == res_idx), "%s bus and res_%s indices do not match"%(element, element)
def branch_loss_consistent_with_bus_feed_in(net, atol=1e-2):
"""
The surpluss of bus feed summed over all buses always has to be equal to the sum of losses in
all branches.
"""
# Active Power
bus_surplus_p = -net.res_bus.p_mw.sum()
bus_surplus_q = -net.res_bus.q_mvar.sum()
branch_loss_p = net.res_line.pl_mw.values.sum() + net.res_trafo.pl_mw.values.sum() + \
net.res_trafo3w.pl_mw.values.sum() + net.res_impedance.pl_mw.values.sum() + \
net.res_dcline.pl_mw.values.sum()
branch_loss_q = net.res_line.ql_mvar.values.sum() + net.res_trafo.ql_mvar.values.sum() + \
net.res_trafo3w.ql_mvar.values.sum() + net.res_impedance.ql_mvar.values.sum() + \
net.res_dcline.q_to_mvar.values.sum() + net.res_dcline.q_from_mvar.values.sum()
try:
assert isclose(bus_surplus_p, branch_loss_p, atol=atol)
except AssertionError:
raise AssertionError("Branch losses are %.4f MW, but power generation at the buses exceeds the feedin by %.4f MW"%(branch_loss_p, bus_surplus_p))
try:
assert isclose(bus_surplus_q, branch_loss_q, atol=atol)
except AssertionError:
raise AssertionError("Branch losses are %.4f MVar, but power generation at the buses exceeds the feedin by %.4f MVar"%(branch_loss_q, bus_surplus_q))
def element_power_consistent_with_bus_power(net, rtol=1e-2, test_q=True):
"""
The bus feed-in at each node has to be equal to the sum of the element feed ins at each node.
"""
bus_p = pd.Series(data=0., index=net.bus.index)
bus_q = pd.Series(data=0., index=net.bus.index)
for idx, tab in net.ext_grid.iterrows():
if tab.in_service:
bus_p.at[tab.bus] -= net.res_ext_grid.p_mw.at[idx]
bus_q.at[tab.bus] -= net.res_ext_grid.q_mvar.at[idx]
for idx, tab in net.gen.iterrows():
if tab.in_service:
bus_p.at[tab.bus] -= net.res_gen.p_mw.at[idx]
bus_q.at[tab.bus] -= net.res_gen.q_mvar.at[idx]
for idx, tab in net.load.iterrows():
bus_p.at[tab.bus] += net.res_load.p_mw.at[idx]
bus_q.at[tab.bus] += net.res_load.q_mvar.at[idx]
for idx, tab in net.sgen.iterrows():
bus_p.at[tab.bus] -= net.res_sgen.p_mw.at[idx]
bus_q.at[tab.bus] -= net.res_sgen.q_mvar.at[idx]
for idx, tab in net.asymmetric_load.iterrows():
bus_p.at[tab.bus] += net.res_asymmetric_load.p_mw.at[idx]
bus_q.at[tab.bus] += net.res_asymmetric_load.q_mvar.at[idx]
for idx, tab in net.asymmetric_sgen.iterrows():
bus_p.at[tab.bus] -= net.res_asymmetric_sgen.p_mw.at[idx]
bus_q.at[tab.bus] -= net.res_asymmetric_sgen.q_mvar.at[idx]
for idx, tab in net.storage.iterrows():
bus_p.at[tab.bus] += net.res_storage.p_mw.at[idx]
bus_q.at[tab.bus] += net.res_storage.q_mvar.at[idx]
for idx, tab in net.shunt.iterrows():
bus_p.at[tab.bus] += net.res_shunt.p_mw.at[idx]
bus_q.at[tab.bus] += net.res_shunt.q_mvar.at[idx]
for idx, tab in net.ward.iterrows():
bus_p.at[tab.bus] += net.res_ward.p_mw.at[idx]
bus_q.at[tab.bus] += net.res_ward.q_mvar.at[idx]
for idx, tab in net.xward.iterrows():
bus_p.at[tab.bus] += net.res_xward.p_mw.at[idx]
bus_q.at[tab.bus] += net.res_xward.q_mvar.at[idx]
assert allclose(net.res_bus.p_mw.values, bus_p.values, equal_nan=True, rtol=rtol)
if test_q:
assert allclose(net.res_bus.q_mvar.values, bus_q.values, equal_nan=True, rtol=rtol)
def consistency_checks_3ph(net, rtol=2e-3):
indices_consistent_3ph(net)
branch_loss_consistent_with_bus_feed_in_3ph(net, rtol)
element_power_consistent_with_bus_power_3ph(net, rtol)
def indices_consistent_3ph(net):
elements = get_relevant_elements("pf_3ph")
for element in elements:
e_idx = net[element].index
res_idx = net["res_" + element+"_3ph"].index
assert len(e_idx) == len(res_idx), "length of %s bus and res_%s indices do not match"%(element, element)
assert all(e_idx == res_idx), "%s bus and res_%s indices do not match"%(element, element)
def branch_loss_consistent_with_bus_feed_in_3ph(net, atol=1e-2):
"""
The surpluss of bus feed summed over all buses always has to be equal to the sum of losses in
all branches.
"""
bus_surplus_p = -net.res_bus_3ph[["p_a_mw", "p_b_mw", "p_c_mw"]].sum().sum()
bus_surplus_q = -net.res_bus_3ph[["q_a_mvar", "q_b_mvar", "q_c_mvar"]].sum().sum()
branch_loss_p = net.res_line_3ph.p_a_l_mw.sum() + net.res_trafo_3ph.p_a_l_mw.sum() + \
net.res_line_3ph.p_b_l_mw.sum() + net.res_trafo_3ph.p_b_l_mw.sum() + \
net.res_line_3ph.p_c_l_mw.sum() + net.res_trafo_3ph.p_c_l_mw.sum()
branch_loss_q = net.res_line_3ph.q_a_l_mvar.sum() + net.res_trafo_3ph.q_a_l_mvar.sum() + \
net.res_line_3ph.q_b_l_mvar.sum() + net.res_trafo_3ph.q_b_l_mvar.sum() + \
net.res_line_3ph.q_c_l_mvar.sum() + net.res_trafo_3ph.q_c_l_mvar.sum()
try:
assert isclose(bus_surplus_p, branch_loss_p, atol=atol)
except AssertionError:
raise AssertionError("Branch losses are %.4f MW, but power generation at the buses exceeds the feedin by %.4f MW"%(branch_loss_p, bus_surplus_p))
try:
assert isclose(bus_surplus_q, branch_loss_q, atol=atol)
except AssertionError:
raise AssertionError("Branch losses are %.4f MVar, but power generation at the buses exceeds the feedin by %.4f MVar"%(branch_loss_q, bus_surplus_q))
def element_power_consistent_with_bus_power_3ph(net, rtol=1e-2):
"""
The bus feed-in at each node has to be equal to the sum of the element feed ins at each node.
"""
bus_p_a = pd.Series(data=0., index=net.bus.index)
bus_q_a = pd.Series(data=0., index=net.bus.index)
bus_p_b = pd.Series(data=0., index=net.bus.index)
bus_q_b = pd.Series(data=0., index=net.bus.index)
bus_p_c =
|
pd.Series(data=0., index=net.bus.index)
|
pandas.Series
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# General
import pandas as pd
import numpy as np
from IPython.display import display
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
#Propias
import metricas
import bautizo_prepago as bt
import config_bt_prepago as cf
l_gral_lema_stem = cf.l_gral_lema_stem_v6
#prediccion_conjunto_test
# modelo_lda = lda
# diccionario = dictionary
# df_test= test_etiquetado
# pmin = prob minima para realizacion prediccion
# modo = 0 o 1 para usar como clasificador o vector de caracteristicas
def prediccion_conjunto_test(modelo_lda, diccionario, df_test, pmin=0.35, modo=0,verbose=False):
### "Bautizo topicos" ###
probabilidades_topicos=[]
palabras_topicos=[]
n_topicos = len(modelo_lda.get_topics())
for topico in modelo_lda.show_topics(num_topics = n_topicos, num_words=10,log=False, formatted=True):
palabras_topicos.append((bt.recuperacion_palabras_topicos(topico[1]),topico[0]))
probabilidades_topicos.append((bt.recuperacion_probabilidades_marginales(topico[1]),topico[0]))
diccionario_exterior = bt.creacion_lut_temas(probabilidades_topicos,palabras_topicos)
if modo == 0 and verbose:
display(diccionario_exterior)
#Creacion diccionario tema-nombre
ponderaciones_globales = bt.bautizo_topicos_ponderado(diccionario_exterior,l_gral_lema_stem)
if modo == 0 and verbose:
print(ponderaciones_globales)
dicc_temas = dict(bt.bautizo_final(ponderaciones_globales,cf.D_d_D[n_topicos]))
### Predicción ###
corpus_train_test = list(df_test["Descripción"])
corpus_train_test = [diccionario.doc2bow(text) for text in corpus_train_test]
if modo == 0:
vector =[]
for doc_nuevo in corpus_train_test:
prediccion = modelo_lda[doc_nuevo]
prediccion.sort(reverse= True,key=lambda x: x[1])
prediccion = (metricas.filtro_probs(prediccion,pmin))
vector.append(prediccion)
#Asignación clasificaciones
Pred_M1 = pd.Series([item[0][0] for item in vector])
Pred_M2 = pd.Series([item[1][0] for item in vector])
Pred_M3 = pd.Series([item[2][0] for item in vector])
df =
|
pd.concat([Pred_M1, Pred_M2, Pred_M3], axis=1)
|
pandas.concat
|
import pandas as pd
import os
# where to save or read
CSV_DIR = 'OECD_csv_datasets'
PROCESSED_DIR = 'OECD_csv_processed'
# datafile = 'OECD_csv_processed/industry_candidates.csv'
if not os.path.exists(PROCESSED_DIR):
os.makedirs(PROCESSED_DIR)
# STAGE 3:
def standardize_data(dset_id, df):
# standardized column names
stdcol_dict = {'Time Period': 'YEAR', 'Observation': 'series', 'Industry': 'INDUSTRY', 'Measure': 'MEASURE',
'Country': 'NATION'}
cols = df.columns.values.tolist()
print(dset_id, cols)
# for test
# original_df = df
# first deal with any potential tuple columns
# e.g. 'Country - distribution'
tuple_col = 'Country - distribution'
if tuple_col in cols:
split_list = tuple_col.split(' - ')
new_col_list = [split_list[0], split_list[1]]
for n, col in enumerate(new_col_list):
df[col] = df[tuple_col].apply(lambda x: x.split('-')[n])
df = df.drop(tuple_col, axis=1)
# rename common occurrence column names
# 'Time Period' to 'YEAR', 'Observation' to 'series'
# 'Industry' to 'INDUSTRY', 'Country' to 'NATION'
df.rename(stdcol_dict, axis='columns', inplace=True)
cols = df.columns.values.tolist()
# Industry 'other' rename
industry_renames = ['Activity', 'ISIC3', 'Sector']
if any(k in industry_renames for k in cols):
no = list(set(industry_renames) & set(cols))
df.rename(columns={no[0]: 'INDUSTRY'}, inplace=True)
cols = df.columns.values.tolist()
# Country 'other' rename - has do be done in order
# 'Country - distribution' is a special case already dealt with above
country_renames = ['Declaring country', 'Partner country', 'Reporting country']
for cname in country_renames:
if cname in cols:
df.rename({cname: 'NATION'}, axis='columns', inplace=True)
break
cols = df.columns.values.tolist()
print(dset_id, cols)
# now find columns that are not YEAR, series, INDUSTRY, MEASURE or NATION
stdcols_list = []
nonstdcols_list = []
measurecol = False
for k in stdcol_dict:
stdcols_list.append(stdcol_dict[k])
for cname in cols:
if cname not in stdcols_list:
nonstdcols_list.append(cname)
elif not measurecol and cname == 'MEASURE':
measurecol = True
if nonstdcols_list:
if measurecol:
df = df.rename(columns={'MEASURE': 'temp'})
nonstdcols_list.append('temp')
df['MEASURE'] = df[nonstdcols_list].apply(lambda x: ','.join(x), axis=1)
df.drop(nonstdcols_list, axis=1, inplace=True)
cols = df.columns.values.tolist()
print(dset_id, nonstdcols_list, measurecol)
print(dset_id, cols)
df.set_index('YEAR', inplace=True)
df.to_csv(os.path.join(PROCESSED_DIR, dset_id + '_C.csv'))
# STAGE 1: OECD data set CSV analysis for data sets covering industries
# criteria
criteria = ['Industry', 'Activity', 'ISIC3', 'Sector']
candidates = []
column_name = []
# iterate through each CSV file in the directory and analyse it
for filename in os.listdir(CSV_DIR):
if filename.endswith(".csv"):
dsetid = os.path.splitext(filename)[0]
fromfile = os.path.join(CSV_DIR, filename)
oecd_dataset_df = pd.read_csv(fromfile)
oecd_cols = oecd_dataset_df.columns.values.tolist()
if any(k in criteria for k in oecd_cols):
intersection = list(set(criteria) & set(oecd_cols))
candidates.append(dsetid)
occurrence = next((x for x in intersection if x == criteria[0]), None)
if occurrence is None:
column_name.append(intersection[0])
else:
column_name.append(occurrence)
print(dsetid, intersection, occurrence)
# create candidate DataFrame
candidates_df = pd.DataFrame({'KeyFamilyId': candidates, 'ColumnName': column_name})
# diagnostic info
print(len(candidates), 'industry candidates found')
# STAGE 2 : analysis of OECD industry related data set for specific industry criteria
# criteria
industryTypeKey = 'ELECTRICITY'
hasTarget = []
# find which have data on target industry type
for row in candidates_df.iterrows():
datasetId = row[1]['KeyFamilyId']
colName = row[1]['ColumnName']
dataset_df = pd.read_csv(os.path.join(CSV_DIR, datasetId + '.csv'))
print('checking', datasetId)
try:
filtered_df = dataset_df[dataset_df[colName].str.startswith(industryTypeKey)]
except ValueError:
# all NaNs in target column, nothing to see here - move on
pass
else:
if len(filtered_df.index):
# non-empty DataFrame
hasTarget.append(datasetId)
# call stage 3
standardize_data(datasetId, filtered_df)
# diagnostic info
print(len(hasTarget), 'beginning with', industryTypeKey)
print(hasTarget)
# target data frame
def_cols = ['YEAR', 'series', 'INDUSTRY', 'NATION', 'MEASURE']
combined_df =
|
pd.DataFrame(columns=def_cols)
|
pandas.DataFrame
|
import datetime
import numpy as np
import pandas as pd
import requests
from pandas.tseries.offsets import BDay
from fixed_income import util
DATE_FORMAT = "%Y%m%d"
TREASURY_KINDS = ("Bill", "Note", "Bond", "CMB", "TIPS", "FRN")
SECURITY_FIELDS = [
"cusip",
"issueDate",
"securityType",
"securityTerm",
"maturityDate",
"interestRate",
"rspoeopening",
]
def _columns_of(table):
return table.loc[0, :].values.tolist()
def _find_price(tables):
return (t for t in tables if "Bid" in _columns_of(t))
def _create_df(table):
df = table.copy()
df.columns = _columns_of(df)
df = df.drop(df.index[0])
return df
def _get_date(date):
if isinstance(date, datetime.date):
return date
elif isinstance(date, str):
return datetime.datetime.strptime(date, DATE_FORMAT)
raise NotImplementedError(f"{type(date)} not supported.")
def wsj_treasury_prices(date=None):
"""Get US Treasury Bill, Note and Bond prices from www.wsj.com
Parameters
----------
date : str
Optional, Date or date string of format %Y%m%d, e.g. 20170915
Returns
-------
pandas.DataFrame
"""
if date:
date_string = date if isinstance(date, str) else date.strftime(DATE_FORMAT)
url = f"http://www.wsj.com/mdc/public/page/2_3020-treasury-{date_string}.html?mod=mdc_pastcalendar"
else:
url = (
"http://www.wsj.com/mdc/public/page/2_3020-treasury.html?mod=3D=#treasuryB"
)
tables = pd.read_html(url)
df = pd.concat(_create_df(t) for t in _find_price(tables))
df["Maturity"] = pd.to_datetime(df["Maturity"])
df = df.sort_values(by=["Maturity", "Coupon"])
df.index = range(len(df))
return df
def treasury_direct_prices(date=None):
"""Get US Treasury prices from www.treasurydirect.gov
Parameters
----------
date : str
Optional, Date or date string of format %Y%m%d, e.g. 20170915
Returns
-------
pandas.DataFrame
"""
if date is None:
url = (
"https://www.treasurydirect.gov/GA-FI/FedInvest/todaySecurityPriceDate.htm"
)
table = pd.read_html(url)[0]
clean_date = datetime.datetime.today()
else:
clean_date = _get_date(date)
url = (
"https://www.treasurydirect.gov/GA-FI/FedInvest/selectSecurityPriceDate.htm"
)
data = {
"priceDate.month": clean_date.month,
"priceDate.day": clean_date.day,
"priceDate.year": clean_date.year,
"submit": "Show Prices",
}
response = requests.post(url, data=data)
assert response.ok
table =
|
pd.read_html(response.text)
|
pandas.read_html
|
import pandas as pd
def trades_to_candles(trades_data, price_column="price", timestamp_column="created_at", amount_column="amount",
time_interval="1min"):
"""
This function takes the trades data frame and gets candles data.
:param pd.DataFrame trades_data: Trades data frame.
:param str price_column: Price column.
:param str timestamp_column: Timestamp column.
:param str time_interval: Time interval. Must be one of https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
:param str amount_column: Amount column to calculate the trades volume.
:return: pd.DataFrame with candles data.
"""
# Input validation
if not isinstance(trades_data, pd.DataFrame):
raise ValueError(f"The parameter trades_data must be a data frame. Got {type(trades_data)} instead.")
elif not isinstance(price_column, str):
raise ValueError(f"The parameter price_column must be a string. Got {type(price_column)} instead.")
elif not isinstance(timestamp_column, str):
raise ValueError(f"The parameter timestamp_column must be a string. Got {type(timestamp_column)} instead.")
elif not isinstance(time_interval, str):
raise ValueError(f"The parameter time_interval must be a string. Got {type(time_interval)} instead.")
elif not isinstance(amount_column, str):
raise ValueError(f"The parameter amount_column must be a string. Got {type(amount_column)} instead.")
cols = list(trades_data.columns)
if price_column not in cols:
raise ValueError(f"The parameter price_column must be one of the columns of the trades_data data frame.")
elif timestamp_column not in cols:
raise ValueError(f"The parameter price_column must be one of the columns of the trades_data data frame.")
elif amount_column not in cols:
raise ValueError(f"The parameter price_column must be one of the columns of the trades_data data frame.")
# Cast timestamp column as datetime
trades_data[timestamp_column] = pd.to_datetime(trades_data[timestamp_column])
# Group by time_interval and get candles values
candles = trades_data.groupby(pd.Grouper(key=timestamp_column, freq=time_interval)).agg(
open=pd.NamedAgg(column=price_column, aggfunc="first"),
close=pd.NamedAgg(column=price_column, aggfunc="last"),
high=pd.NamedAgg(column=price_column, aggfunc="max"),
low=pd.NamedAgg(column=price_column, aggfunc="min"),
volume=
|
pd.NamedAgg(column=amount_column, aggfunc="sum")
|
pandas.NamedAgg
|
import datetime
import hashlib
import os
import time
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
safe_close,
)
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
from pandas.io.pytables import (
HDFStore,
read_hdf,
)
pytestmark = pytest.mark.single_cpu
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_no_track_times(setup_path):
# GH 32682
# enables to set track_times (see `pytables` `create_table` documentation)
def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
h = hash_factory()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.digest()
def create_h5_and_return_checksum(track_times):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put(
"table",
df,
format="table",
data_columns=True,
index=None,
track_times=track_times,
)
return checksum(path)
checksum_0_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_0_tt_true = create_h5_and_return_checksum(track_times=True)
# sleep is necessary to create h5 with different creation time
time.sleep(1)
checksum_1_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_1_tt_true = create_h5_and_return_checksum(track_times=True)
# checksums are the same if track_time = False
assert checksum_0_tt_false == checksum_1_tt_false
# checksums are NOT same if track_time = True
assert checksum_0_tt_true != checksum_1_tt_true
def test_iter_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@pytest.mark.filterwarnings("ignore:object name:tables.exceptions.NaturalNameWarning")
def test_contains(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
msg = "'NoneType' object has no attribute 'startswith'"
with pytest.raises(Exception, match=msg):
store.select("df2")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(where, expected):
# GH10143
objs = {
"df1": DataFrame([1, 2, 3]),
"df2": DataFrame([4, 5, 6]),
"df3": DataFrame([6, 7, 8]),
"df4": DataFrame([9, 10, 11]),
"s1": Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
msg = f"'HDFStore' object has no attribute '{x}'"
with pytest.raises(AttributeError, match=msg):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, f"_{x}")
def test_store_dropna(setup_path):
df_with_missing = DataFrame(
{"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]},
index=list("abc"),
)
df_without_missing = DataFrame(
{"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac")
)
# # Test to make sure defaults are to not drop.
# # Corresponding to Issue 9382
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table")
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=False)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=True)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_without_missing, reloaded)
def test_to_hdf_with_min_itemsize(setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "ss3"), concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(read_hdf(path, "ss4"), concat([df["B"], df2["B"]]))
@pytest.mark.parametrize("format", ["fixed", "table"])
def test_to_hdf_errors(format, setup_path):
data = ["\ud800foo"]
ser = Series(data, index=
|
Index(data)
|
pandas.Index
|
# -*- coding: utf-8 -*-
"""MLBA_Hakathon_fin
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1SKr50EBzZcYaqyl9jx5PxEvdUu70PjMj
"""
#Importing libraries
import glob
import pandas as pd
import numpy as np
import sys, getopt
import tensorflow as tf
import matplotlib.pyplot as plt
# for preprocessing & feature selection
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
# for Cross valiadtion
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
# for evaluating the model
from sklearn.metrics import accuracy_score
from tqdm import tqdm
from sklearn.metrics import matthews_corrcoef
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from xgboost import XGBClassifier
#For command line operation
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print('script.py -i <test.csv> -o <predict.csv>\nInput file contains features of sequences with labels under the \'Label\' column\nOutput file contains Protein IDs and their subsequent labels\nFor more details please refer to README.txt')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--Help"):
print('script.py -i <test.csv> -o <predict.csv>\nInput file contains features of sequences with labels under the \'Label\' column\nOutput file contains Protein IDs and their subsequent labels\nFor more details please refer to README.txt')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
print('Taking input from', inputfile)
elif opt in ("-o", "--ofile"):
outputfile = arg
print('Writing output into', outputfile,"\n\n")
return inputfile,outputfile
if __name__ == "__main__":
data1,data2=main(sys.argv[1:])
#print(data1,data2)
#Importing Training data features
Train_dataset = pd.read_csv('/content/drive/MyDrive/MLBA Hakathon/Train_faetures.csv', index_col=None, header=0)
Tr_label = Train_dataset[['Label']]
Tr_data = Train_dataset.loc[:,Train_dataset.columns != 'Label']
Tr_label=np.ravel(Tr_label)
X_trn = Tr_data
#Importing Validation data features
Valid_dataset = pd.read_csv(data1, index_col=None, header=0)
Valid_id = Valid_dataset[['ID']]
Valid_data = Valid_dataset.loc[:, Valid_dataset.columns != 'ID']
#Feature selection
clf = ExtraTreesClassifier(n_estimators=500,random_state=135)
clf = clf.fit(Tr_data, Tr_label)
model = SelectFromModel(clf, prefit=True)
X = model.transform(Tr_data) #38 features are selected from 77
Z = model.transform(Valid_data)
#function for writing the data output
def scoreData(data, model, IDs):
y_pred = model.predict(data)
y=pd.DataFrame(y_pred)
z=pd.DataFrame(IDs)
res=[z,y]
result = pd.concat(res, axis=1)
result.columns =['ID', 'Label']
return(result)
#Suffling and spltting of data set
train_size = int(0.8 * len(X_trn))
train_set_x = X_trn[:train_size]
train_set_y = Tr_label[:train_size]
test_set_x = X_trn[train_size:]
test_set_y = Tr_label[train_size:]
#KNeighbours Classifier
hyperparameters = dict(leaf_size=1, n_neighbors=29, p=1)
#Create new KNN object
knn = KNeighborsClassifier()
knn.fit(train_set_x,train_set_y)
knn_pred=knn.predict(test_set_x)
print("KNeighbours Accuracy:",accuracy_score(test_set_y, knn_pred))
#SVC classifier
clf = make_pipeline(StandardScaler(), SVC(gamma='auto'))
clf.fit(train_set_x, train_set_y)
Pipeline(steps=[('standardscaler', StandardScaler()),('svc', SVC(gamma='auto'))])
svc_pred=clf.predict(test_set_x)
print("SVC Accuracy:",metrics.accuracy_score(test_set_y, svc_pred))
#XGBoost Classifier
xgb = XGBClassifier(max_depth=12,
subsample=0.33,
objective='binary:logistic',
n_estimators=1500,
learning_rate = 0.01,
early_stopping_rounds=10)
xgb.fit(train_set_x, train_set_y)
# make predictions for test data
y_pred_gb = xgb.predict(test_set_x)
predict = [round(value) for value in y_pred_gb]
accuracy = accuracy_score(test_set_y, predict)
print("XGBoost Accuracy: %.2f%%" % (accuracy * 100.0))
#Ensemble of multiple classfier
models={}
models[0] = RandomForestClassifier(n_estimators = 750, random_state = 42)
models[0].fit(train_set_x, train_set_y)
models[1] = KNeighborsClassifier()
models[1].fit(train_set_x, train_set_y)
models[2] = make_pipeline(StandardScaler(), SVC(gamma='auto'))
models[2].fit(train_set_x, train_set_y)
models[3] = XGBClassifier(max_depth=12, subsample=0.33, objective='binary:logistic', n_estimators=1500, learning_rate = 0.01,early_stopping_rounds=10)
models[3].fit(train_set_x, train_set_y)
#make_pipeline(steps=[('standardscaler', StandardScaler()),('svc', SVC(gamma='auto'))])
#Final prediction by majourity vote
final_test_prediction = []
unique_labels=[1,0]
for sample in test_set_x:
labels = []
for m in models.keys():
pds = models[m].predict([sample])
labels.append(pds)
if labels.count(1)>labels.count(0):
final_test_prediction.append(1)
else:
final_test_prediction.append(0)
print("Ensemble Accuracy:",accuracy_score(test_set_y, final_test_prediction))
X_tr = np.array(X_trn)
trn_set_x = np.array(train_set_x)
tes_set_x = np.array(test_set_x)
val_data = np.array(Valid_data)
# k-fold cross validation
kf = KFold(n_splits=5, random_state =1234, shuffle= True )
kf.get_n_splits(X_tr)
#print(kf)
models={}
c=0
for train_index, test_index in kf.split(X_tr):
#print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X_tr[train_index], X_tr[test_index]
Y_train, Y_test = Tr_label[train_index], Tr_label[test_index]
models[c] = RandomForestClassifier(n_estimators=500,random_state = 42,n_jobs=-1)
models[c].fit(X_train, Y_train)
c+=1
final_test_prediction = [] #Final prediction by majourity vote
unique_labels=[1,0]
for sample in tes_set_x:
labels = []
for m in models.keys():
pds = models[m].predict([sample])
labels.append(pds)
if labels.count(1)>labels.count(0):
final_test_prediction.append(1)
else:
final_test_prediction.append(0)
print("Final Accuracy:",accuracy_score(test_set_y, final_test_prediction))
#Final prediction by majourity vote
final_trn_prediction = []
unique_labels=[1,0]
for sample in val_data:
labels = []
for m in models.keys():
pds = models[m].predict([sample])
labels.append(pds)
if labels.count(1)>labels.count(0):
final_trn_prediction.append(1)
else:
final_trn_prediction.append(0)
y=pd.DataFrame(final_trn_prediction)
z=pd.DataFrame(Valid_id)
res=[z,y]
result =
|
pd.concat(res, axis=1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx =
|
pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
|
pandas.MultiIndex.from_arrays
|
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = pd.read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = pd.concat(
list(
pd.read_sql_query(
"select * from types_test_data", self.conn, chunksize=1
)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == "sqlite":
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2 ** 25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df["B"] = df["B"].astype("object")
df["B"] = None
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# NaNs are coming back as None
df.loc[2, "B"] = None
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i["column_names"] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = "test_get_schema_create_table"
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables["dtype_test2"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.TEXT)
msg = "The type of B is not a SQLAlchemy type"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": str})
# GH9083
df.to_sql("dtype_test3", self.conn, dtype={"B": sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables["dtype_test3"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables["single_dtype_test"].columns["A"].type
sqltypeb = meta.tables["single_dtype_test"].columns["B"].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == "mysql":
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict["Bool"].type, my_type)
assert isinstance(col_dict["Date"].type, sqltypes.DateTime)
assert isinstance(col_dict["Int"].type, sqltypes.Integer)
assert isinstance(col_dict["Float"].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame(
{
"f32": Series([V], dtype="float32"),
"f64": Series([V], dtype="float64"),
"f64_as_f32": Series([V], dtype="float64"),
"i32": Series([5], dtype="int32"),
"i64": Series([5], dtype="int64"),
}
)
df.to_sql(
"test_dtypes",
self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": sqlalchemy.Float(precision=23)},
)
res = sql.read_sql_table("test_dtypes", self.conn)
# check precision of float64
assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14)
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables["test_dtypes"].columns
assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type)
assert isinstance(col_dict["f32"].type, sqltypes.Float)
assert isinstance(col_dict["f64"].type, sqltypes.Float)
assert isinstance(col_dict["i32"].type, sqltypes.Integer)
assert isinstance(col_dict["i64"].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = "SELECT test_foo_data FROM test_foo_data"
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name="test_foo_data", con=connection, if_exists="append")
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = "Hello, World!"
expected = DataFrame({"spam": [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = "temp_test"
__table_args__ = {"prefixes": ["TEMPORARY"]}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(sql=sqlalchemy.select([Temporary.spam]), con=conn)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy:
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlalchemy.create_engine("sqlite:///:memory:")
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({"a": [1, 2]}, dtype="int64")
df.to_sql("test_bigintwarning", self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table("test_bigintwarning", self.conn)
assert len(w) == 0
class _TestMySQLAlchemy:
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = "mysql"
@classmethod
def connect(cls):
url = "mysql+{driver}://root@localhost/pandas_nosetest"
return sqlalchemy.create_engine(
url.format(driver=cls.driver), connect_args=cls.connect_args
)
@classmethod
def setup_driver(cls):
pymysql = pytest.importorskip("pymysql")
cls.driver = "pymysql"
cls.connect_args = {"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
import pymysql
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
df.to_sql("test_procedure", self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except pymysql.Error:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy:
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = "postgresql"
@classmethod
def connect(cls):
url = "postgresql+{driver}://postgres@localhost/pandas_nosetest"
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
pytest.importorskip("psycopg2")
cls.driver = "psycopg2"
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql("test_schema_public", self.conn, index=False)
df.to_sql(
"test_schema_public_explicit", self.conn, index=False, schema="public"
)
df.to_sql("test_schema_other", self.conn, index=False, schema="other")
# read dataframes back in
res1 = sql.read_sql_table("test_schema_public", self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table("test_schema_public_explicit", self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table(
"test_schema_public_explicit", self.conn, schema="public"
)
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(df, res4)
msg = "Table test_schema_other not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("test_schema_other", self.conn, schema="public")
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql("test_schema_other", self.conn, schema="other", index=False)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="replace",
)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="append",
)
res = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema="other")
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, "test_schema_other2", index=False)
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="replace")
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="append")
res1 = sql.read_sql_table("test_schema_other2", self.conn, schema="other")
res2 = pdsql.read_table("test_schema_other2")
tm.assert_frame_equal(res1, res2)
def test_copy_from_callable_insertion_method(self):
# GH 8953
# Example in io.rst found under _io.sql.method
# not available in sqlite, mysql
def psql_insert_copy(table, conn, keys, data_iter):
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ", ".join(f'"{k}"' for k in keys)
if table.schema:
table_name = f"{table.schema}.{table.name}"
else:
table_name = table.name
sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV"
cur.copy_expert(sql=sql_query, file=s_buf)
expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
expected.to_sql(
"test_copy_insert", self.conn, index=False, method=psql_insert_copy
)
result = sql.read_sql_table("test_copy_insert", self.conn)
tm.assert_frame_equal(result, expected)
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
# -----------------------------------------------------------------------------
# -- Test Sqlite / MySQL fallback
@pytest.mark.single
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlite3.connect(":memory:")
def setup_connect(self):
self.conn = self.connect()
def load_test_data_and_sql(self):
self.pandasSQL = sql.SQLiteDatabase(self.conn)
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
# GH 29921
self._to_sql(method="multi")
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
self.pandasSQL.to_sql(temp_frame, "drop_test_frame")
assert self.pandasSQL.has_table("drop_test_frame")
self.pandasSQL.drop_table("drop_test_frame")
assert not self.pandasSQL.has_table("drop_test_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_date", self.conn)
if self.flavor == "sqlite":
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == "mysql":
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time, GH #8341
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_time", self.conn)
if self.flavor == "sqlite":
# comes back as strings
expected = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(res, expected)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' "
+ f"AND tbl_name = '{tbl_name}'",
self.conn,
)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute(f"PRAGMA table_info({table})")
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError(f"Table {table}, column {column} not found")
def test_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": "STRING"})
# sqlite stores Boolean values as INTEGER
assert self._get_sqlite_column_type("dtype_test", "B") == "INTEGER"
assert self._get_sqlite_column_type("dtype_test2", "B") == "STRING"
msg = r"B \(<class 'bool'>\) not a string"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": bool})
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype="STRING")
assert self._get_sqlite_column_type("single_dtype_test", "A") == "STRING"
assert self._get_sqlite_column_type("single_dtype_test", "B") == "STRING"
def test_notna_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
assert self._get_sqlite_column_type(tbl, "Bool") == "INTEGER"
assert self._get_sqlite_column_type(tbl, "Date") == "TIMESTAMP"
assert self._get_sqlite_column_type(tbl, "Int") == "INTEGER"
assert self._get_sqlite_column_type(tbl, "Float") == "REAL"
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
msg = "Empty table or column name specified"
with pytest.raises(ValueError, match=msg):
df.to_sql("", self.conn)
for ndx, weird_name in enumerate(
[
"test_weird_name]",
"test_weird_name[",
"test_weird_name`",
'test_weird_name"',
"test_weird_name'",
"_b.test_weird_name_01-30",
'"_b.test_weird_name_01-30"',
"99beginswithnumber",
"12345",
"\xe9",
]
):
df.to_sql(weird_name, self.conn)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name])
c_tbl = f"test_weird_col_name{ndx:d}"
df2.to_sql(c_tbl, self.conn)
sql.table_exists(c_tbl, self.conn)
# -----------------------------------------------------------------------------
# -- Old tests from 0.13.1 (before refactor using sqlalchemy)
def date_format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime("%Y%m%d")
_formatters = {
datetime: "'{}'".format,
str: "'{}'".format,
np.str_: "'{}'".format,
bytes: "'{}'".format,
float: "{:.8f}".format,
int: "{:d}".format,
type(None): lambda x: "NULL",
np.float64: "{:.10f}".format,
bool: "'{!s}'".format,
}
def format_query(sql, *args):
"""
"""
processed_args = []
for arg in args:
if isinstance(arg, float) and isna(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def tquery(query, con=None, cur=None):
"""Replace removed sql.tquery function"""
res = sql.execute(query, con=con, cur=cur).fetchall()
if res is None:
return None
else:
return list(res)
@pytest.mark.single
class TestXSQLite(SQLiteMixIn):
@pytest.fixture(autouse=True)
def setup_method(self, request, datapath):
self.method = request.function
self.conn = sqlite3.connect(":memory:")
# In some test cases we may close db connection
# Re-open conn here so we can perform cleanup in teardown
yield
self.method = request.function
self.conn = sqlite3.connect(":memory:")
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame =
|
tm.makeTimeDataFrame()
|
pandas._testing.makeTimeDataFrame
|
"""
The ``expected_returns`` module provides functions for estimating the expected returns of
the assets, which is a required input in mean-variance optimization.
By convention, the output of these methods is expected *annual* returns. It is assumed that
*daily* prices are provided, though in reality the functions are agnostic
to the time period (just change the ``frequency`` parameter). Asset prices must be given as
a pandas dataframe, as per the format described in the :ref:`user-guide`.
All of the functions process the price data into percentage returns data, before
calculating their respective estimates of expected returns.
Currently implemented:
- general return model function, allowing you to run any return model from one function.
- mean historical return
- exponentially weighted mean historical return
- CAPM estimate of returns
Additionally, we provide utility functions to convert from returns to prices and vice-versa.
"""
import warnings
import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import pmdarima as pm
import arch
from arch.__future__ import reindexing
import psycopg2.extensions
psycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)
conn = psycopg2.connect(
host='database-1.csuf8nkuxrw3.us-east-2.rds.amazonaws.com',
port=5432,
user='postgres',
password='<PASSWORD>',
database='can2_etfs'
)
conn.autocommit = True
cursor = conn.cursor()
pd.options.mode.chained_assignment = None # default='warn'
def returns_from_prices(prices, log_returns=False):
"""
Calculate the returns given prices.
:param prices: adjusted (daily) closing prices of the asset, each row is a
date and each column is a ticker/id.
:type prices: pd.DataFrame
:param log_returns: whether to compute using log returns
:type log_returns: bool, defaults to False
:return: (daily) returns
:rtype: pd.DataFrame
"""
if log_returns:
return np.log(1 + prices.pct_change()).dropna(how="all")
else:
return prices.pct_change().dropna(how="all")
def prices_from_returns(returns, log_returns=False):
"""
Calculate the pseudo-prices given returns. These are not true prices because
the initial prices are all set to 1, but it behaves as intended when passed
to any PyPortfolioOpt method.
:param returns: (daily) percentage returns of the assets
:type returns: pd.DataFrame
:param log_returns: whether to compute using log returns
:type log_returns: bool, defaults to False
:return: (daily) pseudo-prices.
:rtype: pd.DataFrame
"""
if log_returns:
ret = np.exp(returns)
else:
ret = 1 + returns
ret.iloc[0] = 1 # set first day pseudo-price
return ret.cumprod()
def return_model(prices, method="mean_historical_return", **kwargs):
"""
Compute an estimate of future returns, using the return model specified in ``method``.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param method: the return model to use. Should be one of:
- ``mean_historical_return``
- ``ema_historical_return``
- ``capm_return``
:type method: str, optional
:raises NotImplementedError: if the supplied method is not recognised
:return: annualised sample covariance matrix
:rtype: pd.DataFrame
"""
if method == "mean_historical_return":
return mean_historical_return(prices, **kwargs)
elif method == "ema_historical_return":
return ema_historical_return(prices, **kwargs)
elif method == "capm_return":
return capm_return(prices, **kwargs)
else:
raise NotImplementedError("Return model {} not implemented".format(method))
def mean_historical_return(
prices, returns_data=False, compounding=True, frequency=252, log_returns=False
):
"""
Calculate annualised mean (daily) historical return from input (daily) asset prices.
Use ``compounding`` to toggle between the default geometric mean (CAGR) and the
arithmetic mean.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
These **should not** be log returns.
:type returns_data: bool, defaults to False.
:param compounding: computes geometric mean returns if True,
arithmetic otherwise, optional.
:type compounding: bool, defaults to True
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:param log_returns: whether to compute using log returns
:type log_returns: bool, defaults to False
:return: annualised mean (daily) return for each asset
:rtype: pd.Series
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
if returns_data:
returns = prices
else:
returns = returns_from_prices(prices, log_returns)
if compounding:
return (1 + returns).prod() ** (frequency / returns.count()) - 1
else:
return returns.mean() * frequency
def ema_historical_return(
prices,
returns_data=False,
compounding=True,
span=500,
frequency=252,
log_returns=False,
):
"""
Calculate the exponentially-weighted mean of (daily) historical returns, giving
higher weight to more recent data.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
These **should not** be log returns.
:type returns_data: bool, defaults to False.
:param compounding: computes geometric mean returns if True,
arithmetic otherwise, optional.
:type compounding: bool, defaults to True
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:param span: the time-span for the EMA, defaults to 500-day EMA.
:type span: int, optional
:param log_returns: whether to compute using log returns
:type log_returns: bool, defaults to False
:return: annualised exponentially-weighted mean (daily) return of each asset
:rtype: pd.Series
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
if returns_data:
returns = prices
else:
returns = returns_from_prices(prices, log_returns)
if compounding:
return (1 + returns.ewm(span=span).mean().iloc[-1]) ** frequency - 1
else:
return returns.ewm(span=span).mean().iloc[-1] * frequency
def capm_return(
prices,
market_prices=None,
returns_data=False,
risk_free_rate=0.02,
compounding=True,
frequency=252,
log_returns=False,
):
"""
Compute a return estimate using the Capital Asset Pricing Model. Under the CAPM,
asset returns are equal to market returns plus a :math:`\beta` term encoding
the relative risk of the asset.
.. math::
R_i = R_f + \\beta_i (E(R_m) - R_f)
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param market_prices: adjusted closing prices of the benchmark, defaults to None
:type market_prices: pd.DataFrame, optional
:param returns_data: if true, the first arguments are returns instead of prices.
:type returns_data: bool, defaults to False.
:param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02.
You should use the appropriate time period, corresponding
to the frequency parameter.
:type risk_free_rate: float, optional
:param compounding: computes geometric mean returns if True,
arithmetic otherwise, optional.
:type compounding: bool, defaults to True
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:param log_returns: whether to compute using log returns
:type log_returns: bool, defaults to False
:return: annualised return estimate
:rtype: pd.Series
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
market_returns = None
if returns_data:
returns = prices.copy()
if market_prices is not None:
market_returns = market_prices
else:
returns = returns_from_prices(prices, log_returns)
if market_prices is not None:
market_returns = returns_from_prices(market_prices, log_returns)
# Use the equally-weighted dataset as a proxy for the market
if market_returns is None:
# Append market return to right and compute sample covariance matrix
returns["mkt"] = returns.mean(axis=1)
else:
market_returns.columns = ["mkt"]
returns = returns.join(market_returns, how="left")
# Compute covariance matrix for the new dataframe (including markets)
cov = returns.cov()
# The far-right column of the cov matrix is covariances to market
betas = cov["mkt"] / cov.loc["mkt", "mkt"]
betas = betas.drop("mkt")
# Find mean market return on a given time period
if compounding:
mkt_mean_ret = (1 + returns["mkt"]).prod() ** (
frequency / returns["mkt"].count()
) - 1
else:
mkt_mean_ret = returns["mkt"].mean() * frequency
# CAPM formula
return risk_free_rate + betas * (mkt_mean_ret - risk_free_rate)
"""
The ``risk_models`` module provides functions for estimating the covariance matrix given
historical returns.
The format of the data input is the same as that in :ref:`expected-returns`.
**Currently implemented:**
- fix non-positive semidefinite matrices
- general risk matrix function, allowing you to run any risk model from one function.
- sample covariance
- semicovariance
- exponentially weighted covariance
- minimum covariance determinant
- shrunk covariance matrices:
- manual shrinkage
- Ledoit Wolf shrinkage
- Oracle Approximating shrinkage
- covariance to correlation matrix
"""
def _is_positive_semidefinite(matrix):
"""
Helper function to check if a given matrix is positive semidefinite.
Any method that requires inverting the covariance matrix will struggle
with a non-positive semidefinite matrix
:param matrix: (covariance) matrix to test
:type matrix: np.ndarray, pd.DataFrame
:return: whether matrix is positive semidefinite
:rtype: bool
"""
try:
# Significantly more efficient than checking eigenvalues (stackoverflow.com/questions/16266720)
np.linalg.cholesky(matrix + 1e-16 * np.eye(len(matrix)))
return True
except np.linalg.LinAlgError:
return False
def fix_nonpositive_semidefinite(matrix, fix_method="spectral"):
"""
Check if a covariance matrix is positive semidefinite, and if not, fix it
with the chosen method.
The ``spectral`` method sets negative eigenvalues to zero then rebuilds the matrix,
while the ``diag`` method adds a small positive value to the diagonal.
:param matrix: raw covariance matrix (may not be PSD)
:type matrix: pd.DataFrame
:param fix_method: {"spectral", "diag"}, defaults to "spectral"
:type fix_method: str, optional
:raises NotImplementedError: if a method is passed that isn't implemented
:return: positive semidefinite covariance matrix
:rtype: pd.DataFrame
"""
if _is_positive_semidefinite(matrix):
return matrix
warnings.warn(
"The covariance matrix is non positive semidefinite. Amending eigenvalues."
)
# Eigendecomposition
q, V = np.linalg.eigh(matrix)
if fix_method == "spectral":
# Remove negative eigenvalues
q = np.where(q > 0, q, 0)
# Reconstruct matrix
fixed_matrix = V @ np.diag(q) @ V.T
elif fix_method == "diag":
min_eig = np.min(q)
fixed_matrix = matrix - 1.1 * min_eig * np.eye(len(matrix))
else:
raise NotImplementedError("Method {} not implemented".format(fix_method))
if not _is_positive_semidefinite(fixed_matrix): # pragma: no cover
warnings.warn(
"Could not fix matrix. Please try a different risk model.", UserWarning
)
# Rebuild labels if provided
if isinstance(matrix, pd.DataFrame):
tickers = matrix.index
return pd.DataFrame(fixed_matrix, index=tickers, columns=tickers)
else:
return fixed_matrix
def risk_matrix(prices, method="sample_cov", **kwargs):
"""
Compute a covariance matrix, using the risk model supplied in the ``method``
parameter.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param method: the risk model to use. Should be one of:
- ``sample_cov``
- ``semicovariance``
- ``exp_cov``
- ``ledoit_wolf``
- ``ledoit_wolf_constant_variance``
- ``ledoit_wolf_single_factor``
- ``ledoit_wolf_constant_correlation``
- ``oracle_approximating``
:type method: str, optional
:raises NotImplementedError: if the supplied method is not recognised
:return: annualised sample covariance matrix
:rtype: pd.DataFrame
"""
if method == "sample_cov":
return sample_cov(prices, **kwargs)
elif method == "semicovariance" or method == "semivariance":
return semicovariance(prices, **kwargs)
elif method == "exp_cov":
return exp_cov(prices, **kwargs)
elif method == "ledoit_wolf" or method == "ledoit_wolf_constant_variance":
return CovarianceShrinkage(prices, **kwargs).ledoit_wolf()
elif method == "ledoit_wolf_single_factor":
return CovarianceShrinkage(prices, **kwargs).ledoit_wolf(
shrinkage_target="single_factor"
)
elif method == "ledoit_wolf_constant_correlation":
return CovarianceShrinkage(prices, **kwargs).ledoit_wolf(
shrinkage_target="constant_correlation"
)
elif method == "oracle_approximating":
return CovarianceShrinkage(prices, **kwargs).oracle_approximating()
else:
raise NotImplementedError("Risk model {} not implemented".format(method))
def sample_cov(prices, returns_data=False, frequency=252, log_returns=False, **kwargs):
"""
Calculate the annualised sample covariance matrix of (daily) asset returns.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:param log_returns: whether to compute using log returns
:type log_returns: bool, defaults to False
:return: annualised sample covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("data is not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
if returns_data:
returns = prices
else:
returns = returns_from_prices(prices, log_returns)
return fix_nonpositive_semidefinite(
returns.cov() * frequency, kwargs.get("fix_method", "spectral")
)
def semicovariance(
prices,
returns_data=False,
benchmark=0.000079,
frequency=252,
log_returns=False,
**kwargs
):
"""
Estimate the semicovariance matrix, i.e the covariance given that
the returns are less than the benchmark.
.. semicov = E([min(r_i - B, 0)] . [min(r_j - B, 0)])
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param benchmark: the benchmark return, defaults to the daily risk-free rate, i.e
:math:`1.02^{(1/252)} -1`.
:type benchmark: float
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year). Ensure that you use the appropriate
benchmark, e.g if ``frequency=12`` use the monthly risk-free rate.
:type frequency: int, optional
:param log_returns: whether to compute using log returns
:type log_returns: bool, defaults to False
:return: semicovariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("data is not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
if returns_data:
returns = prices
else:
returns = returns_from_prices(prices, log_returns)
drops = np.fmin(returns - benchmark, 0)
T = drops.shape[0]
return fix_nonpositive_semidefinite(
(drops.T @ drops) / T * frequency, kwargs.get("fix_method", "spectral")
)
def _pair_exp_cov(X, Y, span=180):
"""
Calculate the exponential covariance between two timeseries of returns.
:param X: first time series of returns
:type X: pd.Series
:param Y: second time series of returns
:type Y: pd.Series
:param span: the span of the exponential weighting function, defaults to 180
:type span: int, optional
:return: the exponential covariance between X and Y
:rtype: float
"""
covariation = (X - X.mean()) * (Y - Y.mean())
# Exponentially weight the covariation and take the mean
if span < 10:
warnings.warn("it is recommended to use a higher span, e.g 30 days")
return covariation.ewm(span=span).mean().iloc[-1]
def exp_cov(
prices, returns_data=False, span=180, frequency=252, log_returns=False, **kwargs
):
"""
Estimate the exponentially-weighted covariance matrix, which gives
greater weight to more recent data.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param span: the span of the exponential weighting function, defaults to 180
:type span: int, optional
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:param log_returns: whether to compute using log returns
:type log_returns: bool, defaults to False
:return: annualised estimate of exponential covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("data is not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
assets = prices.columns
if returns_data:
returns = prices
else:
returns = returns_from_prices(prices, log_returns)
N = len(assets)
# Loop over matrix, filling entries with the pairwise exp cov
S = np.zeros((N, N))
for i in range(N):
for j in range(i, N):
S[i, j] = S[j, i] = _pair_exp_cov(
returns.iloc[:, i], returns.iloc[:, j], span
)
cov = pd.DataFrame(S * frequency, columns=assets, index=assets)
return fix_nonpositive_semidefinite(cov, kwargs.get("fix_method", "spectral"))
def min_cov_determinant(
prices,
returns_data=False,
frequency=252,
random_state=None,
log_returns=False,
**kwargs
): # pragma: no cover
warnings.warn("min_cov_determinant is deprecated and will be removed in v1.5")
if not isinstance(prices, pd.DataFrame):
warnings.warn("data is not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
# Extra dependency
try:
import sklearn.covariance
except (ModuleNotFoundError, ImportError):
raise ImportError("Please install scikit-learn via pip or poetry")
assets = prices.columns
if returns_data:
X = prices
else:
X = returns_from_prices(prices, log_returns)
# X = np.nan_to_num(X.values)
X = X.dropna().values
raw_cov_array = sklearn.covariance.fast_mcd(X, random_state=random_state)[1]
cov = pd.DataFrame(raw_cov_array, index=assets, columns=assets) * frequency
return fix_nonpositive_semidefinite(cov, kwargs.get("fix_method", "spectral"))
def cov_to_corr(cov_matrix):
"""
Convert a covariance matrix to a correlation matrix.
:param cov_matrix: covariance matrix
:type cov_matrix: pd.DataFrame
:return: correlation matrix
:rtype: pd.DataFrame
"""
if not isinstance(cov_matrix, pd.DataFrame):
warnings.warn("cov_matrix is not a dataframe", RuntimeWarning)
cov_matrix = pd.DataFrame(cov_matrix)
Dinv = np.diag(1 / np.sqrt(np.diag(cov_matrix)))
corr = np.dot(Dinv, np.dot(cov_matrix, Dinv))
return pd.DataFrame(corr, index=cov_matrix.index, columns=cov_matrix.index)
def corr_to_cov(corr_matrix, stdevs):
"""
Convert a correlation matrix to a covariance matrix
:param corr_matrix: correlation matrix
:type corr_matrix: pd.DataFrame
:param stdevs: vector of standard deviations
:type stdevs: array-like
:return: covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(corr_matrix, pd.DataFrame):
warnings.warn("corr_matrix is not a dataframe", RuntimeWarning)
corr_matrix = pd.DataFrame(corr_matrix)
return corr_matrix * np.outer(stdevs, stdevs)
class CovarianceShrinkage:
"""
Provide methods for computing shrinkage estimates of the covariance matrix, using the
sample covariance matrix and choosing the structured estimator to be an identity matrix
multiplied by the average sample variance. The shrinkage constant can be input manually,
though there exist methods (notably Ledoit Wolf) to estimate the optimal value.
Instance variables:
- ``X`` - pd.DataFrame (returns)
- ``S`` - np.ndarray (sample covariance matrix)
- ``delta`` - float (shrinkage constant)
- ``frequency`` - int
"""
def __init__(self, prices, returns_data=False, frequency=252, log_returns=False):
"""
:param prices: adjusted closing prices of the asset, each row is a date and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param frequency: number of time periods in a year, defaults to 252 (the number of trading days in a year)
:type frequency: int, optional
:param log_returns: whether to compute using log returns
:type log_returns: bool, defaults to False
"""
# Optional import
try:
from sklearn import covariance
self.covariance = covariance
except (ModuleNotFoundError, ImportError): # pragma: no cover
raise ImportError("Please install scikit-learn via pip or poetry")
if not isinstance(prices, pd.DataFrame):
warnings.warn("data is not in a dataframe", RuntimeWarning)
prices =
|
pd.DataFrame(prices)
|
pandas.DataFrame
|
import covasim as cv
import covasim.defaults as cvd
import covasim.utils as cvu
import numba as nb
import numpy as np
import pandas as pd
from collections import defaultdict
def generate_people(n_people: int, mixing: pd.DataFrame, reference_ages: pd.Series, households: pd.Series) -> cv.People:
'''
From demographic data (cencus) households are generated, in this way we generate people and assign
them to a household in the same action. Base for generating the multi-layered network - NOT for the
simple random network.
Requires: Household mixing matrix (See https://github.com/mobs-lab/mixing-patterns)
Householder age distribution (Cencus data)
Household size distribution (Cencus data)
Number of individuals to generate.
Creates a cv.People object.
'''
# Number of households to generate
total_people = sum(households.index * households.values)
household_percent = households / total_people
n_households = (n_people * household_percent).round().astype(int)
# Adjust one-person households to match the
n_households[1] += n_people - sum(n_households * n_households.index)
# Select householder, based on householder age distribution
household_heads = np.random.choice(reference_ages.index, size=sum(n_households), p=reference_ages.values / sum(reference_ages))
# Create households, based on the formerly created householders and household mixing matrices
h_clusters, ages = _make_households(n_households, n_people, household_heads, mixing)
# Parse into a cv.People object
contacts = cv.Contacts()
contacts['H'] = clusters_to_layer(h_clusters)
people = cv.People(pars={'pop_size': n_people}, age=ages)
people.contacts = contacts
return people
def add_school_contacts(people: cv.People, mean_contacts: float):
'''
Add school contact layer, from mean classroom size and already generated people, to cv.People instance.
Actual classroom size is drawn from poisson distribution.
Everyone under 18 are assigned to a classroom cluster.
'''
classrooms = []
# Create classrooms of children of same age, assign a teacher from the adult (>21) population
for age in range(0, 18):
children_thisage = cvu.true(people.age == age)
classrooms.extend(create_clusters(children_thisage, mean_contacts))
teachers = np.random.choice(cvu.true(people.age > 21), len(classrooms), replace=False)
for i in range(len(classrooms)):
classrooms[i].append(teachers[i])
# Add to cv.People instance
people.contacts['S'] = clusters_to_layer(classrooms)
def add_work_contacts(people: cv.People, mean_contacts: float):
'''
Add work contact layer, from mean number of coworkers and already generated people, to a cv.People instance.
Actual size of workplace cluster drawn from poisson distribution.
Everyone in the age interval [18, 65] are assigned to a workplace cluster.
'''
work_inds = cvu.true((people.age > 18) & (people.age <= 65))
work_cl = create_clusters(work_inds, mean_contacts)
# Add to cv.People instance
people.contacts['W'] = clusters_to_layer(work_cl)
def add_other_contacts(people: cv.People, layers: pd.DataFrame, legacy=True):
"""
Add layers according to a layer file
Args:
people: A cv.People instance to add new layers to
layer_members: Dict containing {layer_name:[indexes]} specifying who is able to have interactions within each layer
layerfile: Dataframe from `layers.csv` where the index is the layer name
"""
for layer_name, layer in layers.iterrows():
if layer['cluster_type'] in {'home', 'school', 'work'}:
# Ignore these cluster types, as they should be instantiated with
# - home: make_people()
# - school: add_school_contacts()
# - work: add_work_contacts()
continue
age_min = 0 if pd.isna(layer['age_lb']) else layer['age_lb']
age_max = np.inf if pd.isna(layer['age_ub']) else layer['age_ub']
age_eligible = cvu.true((people.age >= age_min) & (people.age <= age_max))
n_people = int(layer['proportion'] * len(age_eligible))
inds = np.random.choice(age_eligible, n_people, replace=False)
if layer['cluster_type'] == 'cluster':
# Create a clustered layer based on the mean cluster size
assert pd.isna(layer['dynamic']), 'Dynamic clusters not supported yet'
clusters = create_clusters(inds, layer['contacts'])
people.contacts[layer_name] = clusters_to_layer(clusters)
elif layer['cluster_type'] == 'complete':
# For a 'complete' layer, treat the layer members as a single cluster
assert pd.isna(layer['dynamic']), 'Dynamic complete clusters not supported yet'
people.contacts[layer_name] = clusters_to_layer([inds])
elif layer['cluster_type'] == 'random':
people.contacts[layer_name] = RandomLayer(inds, layer['contacts'], layer['dispersion'], dynamic=(not pd.isna(layer['dynamic'])))
else:
raise Exception(f'Unknown clustering type {layer["cluster_type"]}')
## HELPERS
class RandomLayer(cv.Layer):
"""
Layer that can resample contacts on-demand
"""
def __init__(self, inds, mean_contacts, dispersion=None, dynamic=False):
"""
Args:
inds:
mean_contacts:
dispersion: Level
dynamic: If True, the layer will change each timestep
"""
super().__init__()
self.inds = inds
self.mean_contacts = mean_contacts
self.dispersion = dispersion
self.dynamic = dynamic
self.update(force=True)
@staticmethod
@nb.njit
def _get_contacts(inds, number_of_contacts):
"""
Efficiently generate contacts
Note that because of the shuffling operation, each person is assigned 2N contacts
(i.e. if a person has 5 contacts, they appear 5 times in the 'source' array and 5
times in the 'target' array). This is why `clusters_to_layer` must add bidirectional
contacts as well, so that all contacts are consistently specified bidirectionally.
Args:
inds: List/array of person indices
number_of_contacts: List/array the same length as `inds`
Returns: Two arrays, for source and target
"""
total_number_of_half_edges = np.sum(number_of_contacts)
count = 0
source = np.zeros((total_number_of_half_edges,), dtype=cvd.default_int)
for i, person_id in enumerate(inds):
n_contacts = number_of_contacts[i]
source[count:count + n_contacts] = person_id
count += n_contacts
target = np.random.permutation(source)
return source, target
def update(self, force: bool = False) -> None:
"""
Regenerate contacts
Args:
force: If True, ignore the `self.dynamic` flag. This is required for initialization.
"""
if not self.dynamic and not force:
return
n_people = len(self.inds)
# sample the number of edges from a given distribution
if
|
pd.isna(self.dispersion)
|
pandas.isna
|
import pandas as pd
import json
import os
import numpy
import glob
from zipfile import ZipFile
### -------------------------------------Test and Help function -------------------------------------------------------
def test_me():
print("Hello World")
def help():
print('''
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
.d8888b. .d888 .d8888b. 888 8888888b. 888 888 888 d8b 888
d88P Y88b d88P" d88P Y88b 888 888 Y88b 888 888 888 Y8P 888
Y88b. 888 888 888 888 888 888 888 888 888 888
"Y888b. 8888b. 888888 .d88b. 888 888d888 8888b. 88888b. 88888b. 888 d88P 888 888 888888 88888b. .d88b. 88888b. 888 888 88888b. 888d888 8888b. 888d888 888 888
"Y88b. "88b 888 d8P Y8b 888 88888 888P" "88b 888 "88b 888 "88b 8888888P" 888 888 888 888 "88b d88""88b 888 "88b 888 888 888 "88b 888P" "88b 888P" 888 888
"888 .d888888 888 88888888 888 888 888 .d888888 888 888 888 888 888 888 888 888 888 888 888 888 888 888 888 888 888 888 888 .d888888 888 888 888
Y88b d88P 888 888 888 Y8b. Y88b d88P 888 888 888 888 d88P 888 888 888 Y88b 888 Y88b. 888 888 Y88..88P 888 888 888 888 888 d88P 888 888 888 888 Y88b 888
"Y8888P" "Y888888 888 "Y8888 "Y8888P88 888 "Y888888 88888P" 888 888 888 "Y88888 "Y888 888 888 "Y88P" 888 888 88888888 888 88888P" 888 "Y888888 888 "Y88888
888 888 888
888 Y8b d88P Y8b d88P
888 "Y88P" "Y88P"
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
HELP:
Welcome to the safegraph helper function. Below you will find a list of functions and their arguments to aid in your datascience journey. If you have further questions that cannot
be answered by this help command, please do not hesitate to ask for assistance in the #python_troubleshooting slack channel.
Key:
* - Required Argument
& - Boolean value
$ - Pandas *args and **kwargs are activated
Available Functions:
+ test_me() - A function to test the Python Libray
----------------------[JSON Section]----------------------
+ unpack_json() - a function to explode JSON objects within pandas vertically into a new DF
**Arguments:
df*
json_column
key_col_name
value_col_name
+ unpack_json_and_merge() - a function to explode JSON objects within pandas vertically and add it to the current DF
**Arguments:
df*
json_column
key_col_name
value_col_name
keep_index (&)
+ explode_json_array() - This function vertically explodes an array column in SafeGraph data and creates a second new column indicating the index value from the array
**Arguments:
df*
array_column
value_col_name
place_key
file_key
array_sequence
keep_index (&)
verbose (&)
zero_index (&)
-----------------[CORE, GEO, and PATTERNS section]----------------------
+ read_core_folder() - a function that concats the core files together into 1 dataframe
**Arguments:
path_to_core*
compression
$
+ read_core_folder_zip() - used to read in the Core data from the zipped core file
**Arguments:
path_to_core*
compression
$
+ read_geo_zip() - used to read in the Core Geo data from a zipped file
**Arguments:
path_to_geo*
compression
$
+ read_pattern_single() - used to read in SafeGraph data pre June 15th
**Arguments:
f_path*
compression
$
+ read_pattern_multi() - used to read in SafeGraph pattern data that is broken into multiple files
**Arguments:
path_to_pattern*
compression
$
+ merge_core_pattern() - used to combine the core file and the pattern files on the SafeGraph ID
**Arguments:
core_df*
patterns_df*
how
$
''')
### -------------------------------------- JSON Functions ---------------------------------------------------------------
def unpack_json(df_, json_column='visitor_home_cbgs', key_col_name='visitor_home_cbg',
value_col_name='cbg_visitor_count'):
df = df_.copy()
if (df.index.unique().shape[0] < df.shape[0]):
raise ("ERROR -- non-unique index found")
df[json_column + '_dict'] = [json.loads(cbg_json) for cbg_json in df[json_column]]
all_sgpid_cbg_data = [] # each cbg data point will be one element in this list
for index, row in df.iterrows():
this_sgpid_cbg_data = [{'orig_index': index, key_col_name: key, value_col_name: value} for key, value in
row[json_column + '_dict'].items()]
all_sgpid_cbg_data = all_sgpid_cbg_data + this_sgpid_cbg_data
output = pd.DataFrame(all_sgpid_cbg_data)
output.set_index('orig_index', inplace=True)
return output
def unpack_json_and_merge(df, json_column='visitor_home_cbgs', key_col_name='visitor_home_cbg',
value_col_name='cbg_visitor_count', keep_index=False):
if (keep_index):
df['index_original'] = df.index
df = df.dropna(subset=[json_column]).copy() # Drop nan jsons
df.reset_index(drop=True, inplace=True) # Every row must have a unique index
df_exp = unpack_json(df, json_column=json_column, key_col_name=key_col_name, value_col_name=value_col_name)
df = df.merge(df_exp, left_index=True, right_index=True).reset_index(drop=True)
return df
def explode_json_array(df_, array_column = 'visits_by_day', value_col_name='day_visit_counts',place_key='safegraph_place_id', file_key='date_range_start', array_sequence='day', keep_index=False, verbose=True, zero_index=False):
df = df_.copy()
if(verbose): print("Running explode_json_array()")
if(keep_index):
df['index_original'] = df.index
df.reset_index(drop=True, inplace=True) # THIS IS IMPORTANT; explode will not work correctly if index is not unique
df[array_column+'_json'] = [json.loads(myjson) for myjson in df[array_column]]
day_visits_exp = df[[place_key, file_key, array_column+'_json']].explode(array_column+'_json')
day_visits_exp['dummy_key'] = day_visits_exp.index
day_visits_exp[array_sequence] = day_visits_exp.groupby([place_key, file_key])['dummy_key'].rank(method='first', ascending=True).astype('int64')
if(zero_index):
day_visits_exp[array_sequence] = day_visits_exp[array_sequence] -1
day_visits_exp.drop(['dummy_key'], axis=1, inplace=True)
day_visits_exp.rename(columns={array_column+'_json': value_col_name}, inplace=True)
day_visits_exp[value_col_name] = day_visits_exp[value_col_name].astype('int64')
df.drop([array_column+'_json'], axis=1, inplace=True)
df =
|
pd.merge(df, day_visits_exp, on=[place_key,file_key])
|
pandas.merge
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
|
tm.assert_numpy_array_equal(result, expected)
|
pandas._testing.assert_numpy_array_equal
|
from __future__ import division
import numpy as np
import pandas as pd
import sys, os, csv
from src.utils import metadataExtractor, cxpPrinter
from src.analysis import extractFeaturesFromWell
from skimage.filters import threshold_otsu
def getPeakThreshold(config,wellmapping):
cxpPrinter.cxpPrint('Calculating peak threshold from control wells')
# get paths and metadata
metadata_dict = metadataExtractor.import_metadata(config)
outputdir = metadata_dict["config"]["var"]["outputdir"]
resourcesdir = metadata_dict["config"]["var"]["resourcesdir"]
# get list of control wells
with open(os.path.join(resourcesdir,'well-mappings', wellmapping + '.csv'), 'r') as f:
reader = csv.reader(f)
control_wells = list(reader)
control_wells = control_wells[0][1:] + control_wells[1][1:]
# ensure well data is available; compromised data might have been removed
actualWells = metadata_dict["well_names"]
control_wells = [well for well in control_wells if well in actualWells]
# perform (minimal) feature extraction on control wells3
for well in control_wells:
extractFeaturesFromWell.extractFeaturesFromWell(config, well, controlWellsOnly=True)
# merge data from control wells
dataframes_norm = [pd.read_csv(os.path.join(outputdir,"{0}_features.csv".format(well))) for well in control_wells]
df_plate_norm =
|
pd.concat(dataframes_norm)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 16 09:04:46 2017
@author: <NAME>
pygemfxns_plotting.py produces figures of simulation results
"""
# Built-in Libraries
import os
import collections
# External Libraries
import numpy as np
import pandas as pd
#import netCDF4 as nc
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.ticker import MaxNLocator
import matplotlib.patches as mpatches
import scipy
from scipy import stats
from scipy.ndimage import uniform_filter
import cartopy
#import geopandas
import xarray as xr
from osgeo import gdal, ogr, osr
import pickle
# Local Libraries
import pygem_input as input
import pygemfxns_modelsetup as modelsetup
import pygemfxns_massbalance as massbalance
import pygemfxns_gcmbiasadj as gcmbiasadj
import class_mbdata
import class_climate
#import run_simulation
# Script options
option_plot_cmip5_normalizedchange = 1
option_plot_cmip5_runoffcomponents = 0
option_plot_cmip5_map = 0
option_output_tables = 0
option_subset_GRACE = 0
option_plot_modelparam = 0
option_plot_era_normalizedchange = 1
option_compare_GCMwCal = 0
option_plot_mcmc_errors = 0
option_plot_maxloss_issues = 0
option_plot_individual_glaciers = 0
option_plot_degrees = 0
option_plot_pies = 0
option_plot_individual_gcms = 0
#%% ===== Input data =====
netcdf_fp_cmip5 = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/simulations/spc/'
netcdf_fp_era = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/simulations/ERA-Interim/ERA-Interim_1980_2017_nochg'
#mcmc_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_allglac_1ch_tn_20190108/'
#mcmc_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_spc_20190222_adjp10/'
mcmc_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_spc_20190308_adjp12/cal_opt2/'
figure_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/figures/cmip5/'
csv_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/csv/cmip5/'
cal_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_spc_20190308_adjp12/cal_opt2/'
# Regions
rgi_regions = [13, 14, 15]
#rgi_regions = [13]
# Shapefiles
rgiO1_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/RGI/rgi60/00_rgi60_regions/00_rgi60_O1Regions.shp'
watershed_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/HMA_basins_20181018_4plot.shp'
kaab_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/kaab2015_regions.shp'
srtm_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/SRTM_HMA.tif'
srtm_contour_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/SRTM_HMA_countours_2km_gt3000m_smooth.shp'
rgi_glac_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA.shp'
#kaab_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_w_watersheds_kaab.csv'
#kaab_csv = pd.read_csv(kaab_dict_fn)
#kaab_dict = dict(zip(kaab_csv.RGIId, kaab_csv.kaab))
# GCMs and RCP scenarios
#gcm_names = ['CanESM2', 'CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'GFDL-CM3', 'GFDL-ESM2M', 'GISS-E2-R', 'IPSL-CM5A-LR',
# 'IPSL-CM5A-MR', 'MIROC5', 'MRI-CGCM3', 'NorESM1-M']
gcm_names = ['CanESM2']
#gcm_names = ['CanESM2', 'CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'GFDL-CM3', 'GFDL-ESM2M', 'GISS-E2-R', 'IPSL-CM5A-LR',
# 'MPI-ESM-LR', 'NorESM1-M']
rcps = ['rcp26', 'rcp45', 'rcp85']
#rcps = ['rcp26']
# Grouping
grouping = 'all'
#grouping = 'rgi_region'
#grouping = 'watershed'
#grouping = 'kaab'
# Variable name
vn = 'mass_change'
#vn = 'volume_norm'
#vn = 'peakwater'
# Group dictionaries
watershed_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_watershed.csv'
watershed_csv = pd.read_csv(watershed_dict_fn)
watershed_dict = dict(zip(watershed_csv.RGIId, watershed_csv.watershed))
kaab_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_kaab.csv'
kaab_csv =
|
pd.read_csv(kaab_dict_fn)
|
pandas.read_csv
|
import unittest
from pandas import (
Timestamp,
DataFrame,
concat,
MultiIndex
)
from toolbox.constitutes.constitute_adjustment import ConstituteAdjustment
class ConstituteAdjustmentTest(unittest.TestCase):
def examples(self):
self.foo_constitutes = DataFrame(data=[
# symbol entered exited
['BOB', '20090101', '20120101'], # whole thing
['LARY', '20100105', '20100107'], # added and then exited
['JEFF', '20110302', '20200302']], # added too late
columns=['symbol', 'from', 'thru']
)
self.ca = ConstituteAdjustment()
self.ca.add_index_info(start_date=Timestamp(year=2010, month=1, day=4, tz='UTC'),
end_date=Timestamp(year=2010, month=1, day=12, tz='UTC'),
index_constitutes=self.foo_constitutes, date_format='%Y%m%d')
self.foo_data = DataFrame(
data=[['BOB', '2010-01-04', 50],
['BOB', '2010-01-05', 51],
['BOB', '2010-01-06', 52],
['BOB', '2010-01-07', 53],
# ['BOB', '2010-01-08', 54], this will be missing data
['BOB', '2010-01-11', 55],
['BOB', '2010-01-12', 56],
['LARY', '2010-01-04', 20], # should not be included
['LARY', '2010-01-05', 21],
['LARY', '2010-01-06', 22],
['LARY', '2010-01-07', 23],
['LARY', '2010-01-08', 24], # should not be included
['LARY', '2010-01-11', 25], # should not be included
['LARY', '2010-01-12', 26], # should not be included
['LARY', '2010-01-13', 27], # should not be included
['FOO', '2010-01-08', 0]], # should be ignored
columns=['symbol', 'date', 'factor'])
self.adjusted_foo = DataFrame(
data=[['BOB', Timestamp('2010-01-04', tz='UTC'), 50],
['BOB', Timestamp('2010-01-05', tz='UTC'), 51],
['BOB', Timestamp('2010-01-06', tz='UTC'), 52],
['BOB', Timestamp('2010-01-07', tz='UTC'), 53],
['BOB', Timestamp('2010-01-08', tz='UTC'), None],
['BOB', Timestamp('2010-01-11', tz='UTC'), 55],
['BOB', Timestamp('2010-01-12', tz='UTC'), 56],
['LARY', Timestamp('2010-01-05', tz='UTC'), 21],
['LARY', Timestamp('2010-01-06', tz='UTC'), 22],
['LARY', Timestamp('2010-01-07', tz='UTC'), 23]],
columns=['symbol', 'date', 'factor']).set_index(['date', 'symbol'])
pricing_data = DataFrame(
data=[['LARY', Timestamp('2010-01-08', tz='UTC'), 24],
['LARY', Timestamp('2010-01-11', tz='UTC'), 25],
['LARY', Timestamp('2010-01-12', tz='UTC'), 26]],
columns=['symbol', 'date', 'factor']).set_index(['date', 'symbol'])
self.adjusted_pricing = concat([pricing_data, self.adjusted_foo]).sort_values(['symbol', 'date'])
#
# ************************************ add_index_info ************************************
#
def test_factor_add_index_info(self):
"""
testing the index generation in add_index_info
has missing data (None), data that should not be included (yet to be added, has been removed) and
irrelevant symbols
"""
self.examples()
# for factors
factor_components = [(Timestamp('2010-01-04', tz='UTC'), 'BOB'),
(Timestamp('2010-01-05', tz='UTC'), 'BOB'),
(Timestamp('2010-01-06', tz='UTC'), 'BOB'),
(
|
Timestamp('2010-01-07', tz='UTC')
|
pandas.Timestamp
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Dict
import requests
from pandas import DataFrame
from lib.concurrent import thread_map
from lib.data_source import DataSource
from lib.time import date_range, date_today
_api_url_tpl = "https://api-covid19.rnbo.gov.ua/data?to={date}"
def _get_daily_records(date: str):
records = []
url = _api_url_tpl.format(date=date)
daily_data = requests.get(url, timeout=60).json().get("ukraine", [])
for record in daily_data:
records.append(
{
"date": date,
"country_code": "UA",
"match_string": record.get("label", {}).get("en"),
"total_confirmed": record.get("confirmed"),
"total_deceased": record.get("deaths"),
"total_recovered": record.get("recovered"),
}
)
return records
class UkraineDataSource(DataSource):
def parse(self, sources: Dict[str, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
# Data can only be retrieved one day at a time, and it starts on 2020-01-22
first = "2020-01-22"
map_iter = list(date_range(first, date_today()))
records = sum(thread_map(_get_daily_records, map_iter), [])
return
|
DataFrame.from_records(records)
|
pandas.DataFrame.from_records
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Race-car Data Creation Class.
This script contains all utilities to create proper dataset.
Revision History:
2020-05-10 (Animesh): Baseline Software.
2020-08-22 (Animesh): Updated Docstring.
Example:
from _data_handler import DataHandler
"""
#___Import Modules:
import os
import random
import pandas as pd
import matplotlib.pyplot as plt
from rc_nn_utility import ParseData
#___Global Variables:
SEED = 717
#__Classes:
class DataHandler:
"""Data Creation Utility Class.
This class contains all methods to complete create datasets such as random
data set, or 5 fold cross validation dataset.
"""
def __init__(self):
"""Constructor.
"""
pass
def merge_all(self, idir, output):
"""File Merger.
This method merges contents from multiple csv files.
Args:
idir (directory path): Directory path containing all csv files.
output (csv file): File containing all contents.
Returns:
(float): Accuracy percentage.
"""
# read all files from provided folder
files = os.listdir(idir)
content = []
for ifile in files:
# collect contents from files in provided folder
if ifile[-4:] == ".csv":
content.extend(pd.read_csv(os.path.join(idir, \
ifile))['image'].to_list())
# write merged contents to output file
pd.DataFrame(content, columns =['image']).to_csv(output, index=False)
return None
def list_merge(self, lists):
"""List Merger.
This method merges contents from multiple lists.
Args:
lists (list): List of multiple lists to merge.
Returns:
data (list): Merged list.
"""
# loop over lists and put them all in one list
data = []
for list in lists:
data.extend(list)
return data
def refine_running(self, input, output, speed = 15):
"""Refine Running.
This method removes data with provided motor value from a list.
Args:
input (csv file): File containing contents to refine.
output (csv file): File containing refined contents.
speed (int): Motor value to be removed.
"""
parsedata = ParseData()
# read file contents
data = pd.read_csv(input)
file = []
for index in range(len(data)):
# parse motor data to varify speed
_,_,mot = parsedata.parse_data(data["image"][index])
# append data if car is runneing
if mot != speed:
file.append(data["image"][index])
# write merged contents to output file
pd.DataFrame(file, columns=["image"]).to_csv(output, index=False)
return None
def histogram(self, ilist, odir):
"""Plot Histogram.
This method plots histogram from servo and motor value parsed from a
list of images.
Args:
ilist (csv file): File containing list of images.
odir (directory path): Output directory.
"""
parsedata = ParseData()
# read file contents
data = pd.read_csv(ilist)
servo = []
motor = []
for index in range(len(data)):
# parse servo and motor data
_,ser,mot = parsedata.parse_data(data["image"][index])
servo.append(ser)
motor.append(mot)
# plot histogram of servo data
plt.figure()
plt.hist(servo, bins=11)
plt.title("Servo Data Histogram")
plt.savefig(os.path.join(odir,"Servo Data Histogram.png"))
# plot histogram of motor data
plt.figure()
plt.hist(motor, bins=11)
plt.title("Motor Data Histogram")
plt.savefig(os.path.join(odir,"Motor Data Histogram.png"))
return None
def devide_data(self, ilist, odir):
"""Dataset Devider.
This method devides dataset according to servo value.
Args:
ilist (csv file): File containing list of images.
odir (directory path): Output directory.
"""
parsedata = ParseData()
# read file contents
data = pd.read_csv(ilist)
data_10 = []
data_11 = []
data_12 = []
data_13 = []
data_14 = []
data_15 = []
data_16 = []
data_17 = []
data_18 = []
data_19 = []
data_20 = []
for index in range(len(data)):
# parse servo and motor data
_,servo,_ = parsedata.parse_data(data["image"][index])
# devide dataset
if servo == 10:
data_10.append(data["image"][index])
elif servo == 11:
data_11.append(data["image"][index])
elif servo == 12:
data_12.append(data["image"][index])
elif servo == 13:
data_13.append(data["image"][index])
elif servo == 14:
data_14.append(data["image"][index])
elif servo == 15:
data_15.append(data["image"][index])
elif servo == 16:
data_16.append(data["image"][index])
elif servo == 17:
data_17.append(data["image"][index])
elif servo == 18:
data_18.append(data["image"][index])
elif servo == 19:
data_19.append(data["image"][index])
elif servo == 20:
data_20.append(data["image"][index])
# write data
pd.DataFrame(data_10, columns=["image"]).to_csv(os.path.join(odir, \
"servo_10.csv"), index=False)
pd.DataFrame(data_11, columns=["image"]).to_csv(os.path.join(odir, \
"servo_11.csv"), index=False)
pd.DataFrame(data_12, columns=["image"]).to_csv(os.path.join(odir, \
"servo_12.csv"), index=False)
pd.DataFrame(data_13, columns=["image"]).to_csv(os.path.join(odir, \
"servo_13.csv"), index=False)
pd.DataFrame(data_14, columns=["image"]).to_csv(os.path.join(odir, \
"servo_14.csv"), index=False)
pd.DataFrame(data_15, columns=["image"]).to_csv(os.path.join(odir, \
"servo_15.csv"), index=False)
pd.DataFrame(data_16, columns=["image"]).to_csv(os.path.join(odir, \
"servo_16.csv"), index=False)
pd.DataFrame(data_17, columns=["image"]).to_csv(os.path.join(odir, \
"servo_17.csv"), index=False)
pd.DataFrame(data_18, columns=["image"]).to_csv(os.path.join(odir, \
"servo_18.csv"), index=False)
pd.DataFrame(data_19, columns=["image"]).to_csv(os.path.join(odir, \
"servo_19.csv"), index=False)
pd.DataFrame(data_20, columns=["image"]).to_csv(os.path.join(odir, \
"servo_20.csv"), index=False)
return None
def train_test_dev(self, type, idir, odir, ratio=None, total=None):
"""Final Dataset Creator.
This method creates train, test and dev dataset.
Args:
type (string): Determines the type of input dataset
idir (directory path): Directory containing input CSV files.
odir (directory path): Output directory.
ratio (list): List containing ratio of train, test and dev dataset.
total (list): List containing the number of total data to be parsed
from each CSV file.
"""
if type == "random":
self.random(idir, odir, ratio)
elif type == "folded":
self.folded(idir, odir)
elif type == "controlled":
self.controlled(idir, odir, ratio, total)
return None
def random(self, idir, odir, ratio):
"""Randomly Shuffled Dataset Creator.
This method creates a randomly shuffled train, test and dev dataset.
Args:
idir (directory path): Directory containing input CSV files.
odir (directory path): Output directory.
ratio (list): List containing ratio of train, test and dev dataset.
"""
# read all files from provided folder
files = os.listdir(idir)
content = []
for ifile in files:
# collect contents from files in provided folder
if ifile[-4:] == ".csv":
content.extend(pd.read_csv(os.path.join(idir, \
ifile))['image'].to_list())
# randomly shuffle dataset
random.shuffle(content)
# devide dataset into train, test, dev set according to given ratio
train = content[0:int(ratio[0]*len(content))]
test = content[int(ratio[0]*len(content)):
int((ratio[0]+ratio[1])*len(content))]
dev = content[int((ratio[0]+ratio[1])*len(content)):]
# write data
pd.DataFrame(train, columns=["image"]).to_csv(odir + 'train.csv',
index=False)
pd.DataFrame(test, columns=["image"]).to_csv(odir + 'test.csv',
index=False)
pd.DataFrame(dev, columns=["image"]).to_csv(odir + 'dev.csv',
index=False)
return None
def folded(self, idir, odir):
"""5 Fold Cross-Validation Dataset Creator.
This method creates 5 fold cross validation dataset.
Args:
idir (directory path): Directory containing input CSV files.
odir (directory path): Output directory.
"""
# read all files from provided folder
files = os.listdir(idir)
D10 = []
D11 = []
D20 = []
D21 = []
D30 = []
D31 = []
D40 = []
D41 = []
D50 = []
D51 = []
for ifile in files:
# collect contents from files in provided folder
if ifile[-4:] == ".csv":
data = pd.read_csv(idir + ifile)
D10.extend(data['image'][0:int(len(data)/10)])
D11.extend(data['image'][int(len(data)/10):2*int(len(data)/10)])
D20.extend(data['image'][2*int(len(data)/10):3*int(len(data)/10)])
D21.extend(data['image'][3*int(len(data)/10):4*int(len(data)/10)])
D30.extend(data['image'][4*int(len(data)/10):5*int(len(data)/10)])
D31.extend(data['image'][5*int(len(data)/10):6*int(len(data)/10)])
D40.extend(data['image'][6*int(len(data)/10):7*int(len(data)/10)])
D41.extend(data['image'][7*int(len(data)/10):8*int(len(data)/10)])
D50.extend(data['image'][8*int(len(data)/10):9*int(len(data)/10)])
D51.extend(data['image'][9*int(len(data)/10):])
# create 5 folds of train, test and dev dataset
train1 = self.list_merge([D10,D11,D20,D21,D30,D31,D40,D41])
train2 = self.list_merge([D20,D21,D30,D31,D40,D41,D50,D51])
train3 = self.list_merge([D10,D11,D30,D31,D40,D41,D50,D51])
train4 = self.list_merge([D10,D11,D20,D21,D40,D41,D50,D51])
train5 = self.list_merge([D10,D11,D20,D21,D30,D31,D50,D51])
test1 = D50
test2 = D10
test3 = D20
test4 = D30
test5 = D40
dev1 = D51
dev2 = D11
dev3 = D21
dev4 = D31
dev5 = D41
# create required directories
if not os.path.exists(odir + 'fold1/'):
os.mkdir(odir + 'fold1/')
if not os.path.exists(odir + 'fold2/'):
os.mkdir(odir + 'fold2/')
if not os.path.exists(odir + 'fold3/'):
os.mkdir(odir + 'fold3/')
if not os.path.exists(odir + 'fold4/'):
os.mkdir(odir + 'fold4/')
if not os.path.exists(odir + 'fold5/'):
os.mkdir(odir + 'fold5/')
# write data
pd.DataFrame(train1,columns=["image"]).to_csv(odir + 'fold1/train.csv',
index=False)
pd.DataFrame(train2,columns=["image"]).to_csv(odir + 'fold2/train.csv',
index=False)
pd.DataFrame(train3,columns=["image"]).to_csv(odir + 'fold3/train.csv',
index=False)
pd.DataFrame(train4,columns=["image"]).to_csv(odir + 'fold4/train.csv',
index=False)
pd.DataFrame(train5,columns=["image"]).to_csv(odir + 'fold5/train.csv',
index=False)
pd.DataFrame(test1,columns=["image"]).to_csv(odir + 'fold1/test.csv',
index=False)
pd.DataFrame(test2,columns=["image"]).to_csv(odir + 'fold2/test.csv',
index=False)
pd.DataFrame(test3,columns=["image"]).to_csv(odir + 'fold3/test.csv',
index=False)
pd.DataFrame(test4,columns=["image"]).to_csv(odir + 'fold4/test.csv',
index=False)
pd.DataFrame(test5,columns=["image"]).to_csv(odir + 'fold5/test.csv',
index=False)
pd.DataFrame(dev1,columns=["image"]).to_csv(odir + 'fold1/dev.csv',
index=False)
pd.DataFrame(dev2,columns=["image"]).to_csv(odir + 'fold2/dev.csv',
index=False)
pd.DataFrame(dev3,columns=["image"]).to_csv(odir + 'fold3/dev.csv',
index=False)
|
pd.DataFrame(dev4,columns=["image"])
|
pandas.DataFrame
|
import csv
import pandas as pd
import seaborn as sns
class Recommendation(object):
def similarMovie(self):
sns.set_style('dark')
'exec(%matplotlib inline)'
ratings_data = pd.read_csv(r"C:\Users\<NAME>\Videos\ml-latest-small\ratings.csv")
ratings_data = pd.read_csv(r"C:\Users\<NAME>a\Videos\ml-latest-small\ratings.csv")
movie_names =
|
pd.read_csv(r"C:\Users\<NAME>a\Videos\ml-latest-small\movies.csv")
|
pandas.read_csv
|
# %%
# practice computer vision competition
# https://www.kaggle.com/c/digit-recognizer/
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.model_selection import train_test_split
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import datetime
# load training and test data
train_data = pd.read_csv('data/handwritten-digits_MNIST/train.csv')
test_data = pd.read_csv('data/handwritten-digits_MNIST/test.csv')
# convert to 28x28 Tensors
X = train_data.drop('label', axis=1).to_numpy()
X = X.reshape(len(X[:, 0]), 28, 28)
#X = [tf.constant(image) for image in X]
y = train_data.loc[:, 'label']
# convert test data to 28x28 Tensors
X_test = test_data.to_numpy()
X_test = X_test.reshape(len(X_test[:, 0]), 28, 28)
#X_test = [tf.constant(image) for image in X_test]
# plot a few examples
nrows = 5
ncols = 5
plt.figure(figsize=(nrows,ncols))
for i in range(nrows*ncols):
plt.subplot(nrows, ncols, i+1)
plt.imshow(X[i], cmap='Greys')
plt.axis('off')
plt.text(14, 0, str(y[i]), horizontalalignment='center', verticalalignment='center') # plot label above image
plt.show()
y = pd.get_dummies(y).to_numpy() # one-hot encoded to be compatible with model
# split train and test data
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=42)
# %%
# set up classifier
input_shape = X_train[0].shape
model = tf.keras.Sequential([
# base CNN layers
layers.Conv2D(filters=64, kernel_size=3, activation='relu', padding='same', input_shape = [28, 28, 1]),
layers.BatchNormalization(),
layers.MaxPool2D(pool_size=(2, 2)),
layers.Dropout(0.2),
layers.Conv2D(filters=64, kernel_size=3, activation='relu', padding='same'),
layers.BatchNormalization(),
layers.MaxPool2D(pool_size=(2, 2)),
layers.Dropout(0.2),
layers.Conv2D(filters=64, kernel_size=3, activation='relu', padding='same'),
layers.BatchNormalization(),
layers.MaxPool2D(pool_size=(2, 2)),
layers.Dropout(0.2),
# head neural net layers
layers.Flatten(),
layers.Dense(256, activation='relu'),
layers.Dropout(0.35),
layers.Dense(256, activation='relu'),
layers.Dropout(0.35),
layers.Dense(256, activation='relu'),
layers.Dropout(0.35),
layers.Dense(10, activation='softmax') # 10 required to account for [0,1,2,3,4,5,6,7,8,9] classes based on categorical_crossentropy
])
# compile models
model.compile(
optimizer = 'adam',
loss = 'categorical_crossentropy',
metrics = ['accuracy']
)
# %%
# fit models
epochs = 50
# I found that one has to monitor early-stopping
# if it stops after just a few epochs, the model is not well generalized and performs poorly
early_stopping = EarlyStopping(
monitor='val_accuracy',
patience=10,
restore_best_weights=True,
mode='max'
)
history = model.fit(
X_train, y_train,
validation_data = [X_valid, y_valid],
epochs = epochs,
callbacks = [early_stopping]
)
# %%
# plot loss and accuracy
history_df =
|
pd.DataFrame(history.history)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Credits: <NAME>, <NAME>
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"
# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import fire
import yaml
import tensorflow as tf
import numpy as np
from Bio import SeqIO
import pandas as pd
import ray
from utils import preprocess as pp
from pathlib import Path
from models import model_5, model_7, model_10
from joblib import load
import psutil
def predict_nn(ds_path, nn_weights_path, length, n_cpus=3, batch_size=256):
"""
Breaks down contigs into fragments
and uses pretrained neural networks to give predictions for fragments
"""
pid = psutil.Process(os.getpid())
pid.cpu_affinity(range(n_cpus))
print("loading sequences for prediction")
try:
seqs_ = list(SeqIO.parse(ds_path, "fasta"))
except FileNotFoundError:
raise Exception("test dataset was not found. Change ds variable")
print("generating viral fragments and labels")
out_table = {
"id": [],
"length": [],
"fragment": [],
"pred_plant_5": [],
"pred_vir_5": [],
"pred_bact_5": [],
"pred_plant_7": [],
"pred_vir_7": [],
"pred_bact_7": [],
"pred_plant_10": [],
"pred_vir_10": [],
"pred_bact_10": [],
}
if not seqs_:
raise ValueError("All sequences were smaller than length of the model")
test_fragments = []
test_fragments_rc = []
ray.init(num_cpus=n_cpus, num_gpus=0, include_dashboard=False)
for seq in seqs_:
fragments_, fragments_rc, _ = pp.fragmenting([seq], length, max_gap=0.8,
sl_wind_step=int(length / 2))
test_fragments.extend(fragments_)
test_fragments_rc.extend(fragments_rc)
for j in range(len(fragments_)):
out_table["id"].append(seq.id)
out_table["length"].append(len(seq.seq))
out_table["fragment"].append(j)
it = pp.chunks(test_fragments, int(len(test_fragments) / n_cpus + 1))
test_encoded = np.concatenate(ray.get([pp.one_hot_encode.remote(s) for s in it]))
it = pp.chunks(test_fragments_rc, int(len(test_fragments_rc) / n_cpus + 1))
test_encoded_rc = np.concatenate(ray.get([pp.one_hot_encode.remote(s) for s in it]))
print('Encoding sequences finished')
print(
f"{np.shape(test_encoded)[0]} + {np.shape(test_encoded_rc)[0]} fragments generated")
ray.shutdown()
print('Starting sequence prediction')
for model, s in zip([model_5.model(length), model_7.model(length), model_10.model(length)], [5, 7, 10]):
model.load_weights(Path(nn_weights_path, f"model_{s}.h5"))
prediction = model.predict([test_encoded, test_encoded_rc], batch_size)
out_table[f"pred_plant_{s}"].extend(list(prediction[..., 0]))
out_table[f"pred_vir_{s}"].extend(list(prediction[..., 1]))
out_table[f"pred_bact_{s}"].extend(list(prediction[..., 2]))
print('Exporting predictions to csv file')
return pd.DataFrame(out_table)
def predict_rf(df, rf_weights_path):
"""
Using predictions by predict_nn and weights of a trained RF classifier gives a single prediction for a fragment
"""
clf = load(Path(rf_weights_path, "RF.joblib"))
X = df[
["pred_plant_5", "pred_vir_5", "pred_plant_7", "pred_vir_7", "pred_plant_10", "pred_vir_10", ]]
y_pred = clf.predict(X)
mapping = {0: "plant", 1: "virus", 2: "bacteria"}
df["RF_decision"] = np.vectorize(mapping.get)(y_pred)
prob_classes = clf.predict_proba(X)
df["RF_pred_plant"] = prob_classes[..., 0]
df["RF_pred_vir"] = prob_classes[..., 1]
df["RF_pred_bact"] = prob_classes[..., 2]
return df
def predict_contigs(df):
"""
Based on predictions of predict_rf for fragments gives a final prediction for the whole contig
"""
df = (
df.groupby(["id", "length", 'RF_decision'], sort=False)
.size()
.unstack(fill_value=0)
)
df = df.reset_index()
df = df.reindex(['length', 'id', 'virus', 'plant', 'bacteria'], axis=1)
conditions = [
(df['virus'] > df['plant']) & (df['virus'] > df['bacteria']),
(df['plant'] > df['virus']) & (df['plant'] > df['bacteria']),
(df['bacteria'] >= df['plant']) & (df['bacteria'] >= df['virus']),
]
choices = ['virus', 'plant', 'bacteria']
df['decision'] = np.select(conditions, choices, default='bacteria')
df = df.sort_values(by='length', ascending=False)
df = df.loc[:, ['length', 'id', 'virus', 'plant', 'bacteria', 'decision']]
df = df.rename(columns={'virus': '# viral fragments', 'bacteria': '# bacterial fragments', 'plant': '# plant fragments'})
return df
def launch_predict(config):
"""
Function for realizing full prediction pipeline
"""
with open(config, "r") as yamlfile:
cf = yaml.load(yamlfile, Loader=yaml.FullLoader)
dfs_fr = []
dfs_cont = []
for l_ in 500, 1000:
df = predict_nn(
ds_path=cf[0]["predict"]["ds_path"],
nn_weights_path=cf[0]["predict"][f"nn_weights_path_{l_}"],
length=l_,
n_cpus=cf[0]["predict"]["n_cpus"],
)
df = predict_rf(
df=df,
rf_weights_path=cf[0]["predict"][f"rf_weights_path_{l_}"],
)
dfs_fr.append(df)
df = predict_contigs(df)
dfs_cont.append(df)
df_500 = dfs_fr[0][(dfs_fr[0]['length'] >= 750) & (dfs_fr[0]['length'] < 1500)]
df_1000 = dfs_fr[1][(dfs_fr[1]['length'] >= 1500)]
df =
|
pd.concat([df_1000, df_500], ignore_index=True)
|
pandas.concat
|
import typing
import pandas as pd
import copy
import os
import random
import collections
import typing
import logging
import json
import re
import io
import string
import time
import cgitb
import sys
from ast import literal_eval
from itertools import combinations
from d3m import container
from d3m import utils
from d3m.container import DataFrame as d3m_DataFrame
from d3m.container import Dataset as d3m_Dataset
from d3m.base import utils as d3m_utils
from d3m.metadata.base import DataMetadata, ALL_ELEMENTS
from collections import defaultdict
from datamart import TabularVariable, ColumnRelationship, AugmentSpec
from datamart_isi import config
from datamart_isi.augment import Augment
from datamart_isi.joiners.rltk_joiner import RLTKJoinerGeneral
from datamart_isi.joiners.rltk_joiner import RLTKJoinerWikidata
from datamart_isi.utilities.utils import Utils
from datamart_isi.utilities.timeout import timeout_call
from datamart_isi.utilities.singleton import singleton
from datamart_isi.utilities import d3m_wikifier
from datamart_isi.utilities.d3m_metadata import MetadataGenerator
from datamart_isi.utilities.download_manager import DownloadManager
from datamart_isi.cache.wikidata_cache import QueryCache
from datamart_isi.cache.general_search_cache import GeneralSearchCache
from datamart_isi.cache.metadata_cache import MetadataCache
from datamart_isi.cache.materializer_cache import MaterializerCache
# from datamart_isi.joiners.join_result import JoinResult
# from datamart_isi.joiners.joiner_base import JoinerType
__all__ = ('DatamartQueryCursor', 'Datamart', 'DatasetColumn', 'DatamartSearchResult', 'AugmentSpec',
'TabularJoinSpec', 'TemporalGranularity', 'ColumnRelationship', 'DatamartQuery',
'VariableConstraint', 'TabularVariable', 'VariableConstraint')
Q_NODE_SEMANTIC_TYPE = config.q_node_semantic_type
TEXT_SEMANTIC_TYPE = config.text_semantic_type
ATTRIBUTE_SEMANTIC_TYPE = config.attribute_semantic_type
AUGMENTED_COLUMN_SEMANTIC_TYPE = config.augmented_column_semantic_type
TIME_SEMANTIC_TYPE = config.time_semantic_type
MAX_ENTITIES_LENGTH = config.max_entities_length
P_NODE_IGNORE_LIST = config.p_nodes_ignore_list
SPECIAL_REQUEST_FOR_P_NODE = config.special_request_for_p_nodes
AUGMENT_RESOURCE_ID = config.augmented_resource_id
DEFAULT_DATAMART_URL = config.default_datamart_url
TIME_COLUMN_MARK = config.time_column_mark
random.seed(42)
class DatamartQueryCursor(object):
"""
Cursor to iterate through Datamarts search results.
"""
def __init__(self, augmenter, search_query, supplied_data, need_run_wikifier=None, connection_url=None, **kwargs):
"""
:param augmenter: The manager used to parse query and search on datamart general part(blaze graph),
because it search quick and need instance update, we should not cache this part
:param search_query: query generated from Datamart class
:param supplied_data: supplied data for search
:param need_run_wikifier: an optional parameter, can help to control whether need to run wikifier to get
wikidata-related parts, it can help to improve the speed when processing large data
:param connection_url: control paramter for the connection url
"""
self._logger = logging.getLogger(__name__)
if connection_url:
self._logger.info("Using user-defined connection url as " + connection_url)
self.connection_url = connection_url
else:
connection_url = os.getenv('DATAMART_URL_ISI', DEFAULT_DATAMART_URL)
self.connection_url = connection_url
self.supplied_data = supplied_data
if type(self.supplied_data) is d3m_Dataset:
self.res_id, self.supplied_dataframe = d3m_utils.get_tabular_resource(dataset=self.supplied_data, resource_id=None)
else:
self.supplied_dataframe = self.supplied_data
self._logger.debug("Current datamart connection url is: " + self.connection_url)
self.augmenter = augmenter
self.search_query = search_query
self.current_searching_query_index = 0
self.remained_part = None
self.wikidata_cache_manager = QueryCache()
self.q_nodes_columns = list()
self.q_node_column_names = set()
if need_run_wikifier is None:
self.need_run_wikifier = self._check_need_wikifier_or_not()
else:
self.need_run_wikifier = need_run_wikifier
self.consider_wikifier_columns_only = kwargs.get("consider_wikifier_columns_only", False)
self.augment_with_time = kwargs.get("augment_with_time", False)
self.consider_time = kwargs.get("consider_time", True)
if self.consider_wikifier_columns_only:
self._find_q_node_columns()
self.search_limit_amount = 20
def get_next_page(self, *, limit: typing.Optional[int] = 20, timeout: int = None) \
-> typing.Optional[typing.Sequence['DatamartSearchResult']]:
"""
Return the next page of results. The call will block until the results are ready.
Note that the results are not ordered; the first page of results can be returned first simply because it was
found faster, but the next page might contain better results. The caller should make sure to check
`DatamartSearchResult.score()`.
Parameters
----------
limit : int or None
Maximum number of search results to return. None means no limit.
timeout : int
Maximum number of seconds before returning results. An empty list might be returned if it is reached.
Returns
-------
Sequence[DatamartSearchResult] or None
A list of `DatamartSearchResult's, or None if there are no more results.
"""
if timeout is None:
timeout = 1800
self._logger.info("Set time limit to be " + str(timeout) + " seconds.")
if limit is not None:
self.search_limit_amount = limit
# if need to run wikifier, run it before any search
if self.current_searching_query_index == 0 and self.need_run_wikifier:
self.supplied_data = self.run_wikifier(self.supplied_data)
# if already remained enough part
current_result = self.remained_part or []
if len(current_result) > limit:
self.remained_part = current_result[limit:]
current_result = current_result[:limit]
return current_result
# start searching
while self.current_searching_query_index < len(self.search_query):
time_start = time.time()
self._logger.debug("Start searching on query No." + str(self.current_searching_query_index))
if self.search_query[self.current_searching_query_index].search_type == "wikidata":
# TODO: now wikifier can only automatically search for all possible columns and do exact match
search_res = timeout_call(timeout, self._search_wikidata, [])
elif self.search_query[self.current_searching_query_index].search_type == "general":
search_res = timeout_call(timeout, self._search_datamart, [])
elif self.search_query[self.current_searching_query_index].search_type == "vector":
search_res = timeout_call(timeout, self._search_vector, [])
elif self.search_query[self.current_searching_query_index].search_type == "geospatial":
search_res = timeout_call(timeout, self._search_geospatial_data, [])
else:
raise ValueError("Unknown search query type for " +
self.search_query[self.current_searching_query_index].search_type)
time_used = (time.time() - time_start)
timeout -= time_used
if search_res is not None:
self._logger.info("Running search on query No." + str(self.current_searching_query_index) + " used "
+ str(time_used) + " seconds and finished.")
self._logger.info("Remained searching time: " + str(timeout) + " seconds.")
elif timeout <= 0:
self._logger.error(
"Running search on query No." + str(self.current_searching_query_index) + " timeout!")
break
else:
self._logger.error("Running search on query No." + str(self.current_searching_query_index) + " failed!")
self.current_searching_query_index += 1
if search_res is not None:
self._logger.info("Totally {} results found.".format(str(len(search_res))))
current_result.extend(search_res)
if len(current_result) == 0:
self._logger.warning("No search results found!")
return None
else:
current_result = sorted(current_result, key=lambda x: x.score(), reverse=True)
if len(current_result) > limit:
self.remained_part = current_result[limit:]
current_result = current_result[:limit]
return current_result
def _check_need_wikifier_or_not(self) -> bool:
"""
Check whether need to run wikifier or not, if wikidata type column detected, this column's semantic type will also be
checked if no Q node semantic exist
:return: a bool value
True means Q nodes column already detected and skip running wikifier
False means no Q nodes column detected, need to run wikifier
"""
need_wikifier_or_not, self.supplied_data = d3m_wikifier.check_and_correct_q_nodes_semantic_type(self.supplied_data)
if not need_wikifier_or_not:
# if not need to run wikifier, we can find q node columns now
self._find_q_node_columns()
return need_wikifier_or_not
def _find_q_node_columns(self) -> None:
"""
Inner function used to find q node columns by semantic type
:return: None
"""
if len(self.q_nodes_columns) > 0 or len(self.q_node_column_names) > 0:
self._logger.warning("Q node columns has already been found once! Should not run again")
self.q_node_column_names = set()
self.q_nodes_columns = list()
if type(self.supplied_data) is d3m_Dataset:
selector_base_type = "ds"
else:
selector_base_type = "df"
# check whether Qnode is given in the inputs, if given, use this to search
metadata_input = self.supplied_data.metadata
for i in range(self.supplied_dataframe.shape[1]):
if selector_base_type == "ds":
metadata_selector = (self.res_id, ALL_ELEMENTS, i)
else:
metadata_selector = (ALL_ELEMENTS, i)
if Q_NODE_SEMANTIC_TYPE in metadata_input.query(metadata_selector)["semantic_types"]:
# if no required variables given, attach any Q nodes found
self.q_nodes_columns.append(i)
self.q_node_column_names.add(self.supplied_dataframe.columns[i])
def _find_time_ranges(self) -> dict:
"""
inner function that used to find the time information from search queries
:return: a dict with start time, end time and time granularity
"""
info = defaultdict(list)
for i, each_search_query in enumerate(self.search_query):
if each_search_query.search_type == "general":
for each_variable in each_search_query.variables:
if each_variable.key.startswith(TIME_COLUMN_MARK):
start_time, end_time, granularity = each_variable.values.split("____")
info["start"].append(pd.to_datetime(start_time).isoformat())
info["end"].append(pd.to_datetime(end_time).isoformat())
info["granularity"].append(Utils.map_granularity_to_value(granularity))
# if no time information found
if len(info) == 0:
return {}
time_column_info = {
"start": min(info["start"]),
"end": max(info["end"]),
"granularity": min(info["granularity"]),
}
return time_column_info
def run_wikifier(self, input_data: d3m_Dataset) -> d3m_Dataset:
"""
function used to run wikifier, and then return a d3m_dataset as the wikified results if success,
otherwise return original input
:return: None
"""
self._logger.debug("Start running wikifier for supplied data in search...")
results = d3m_wikifier.run_wikifier(supplied_data=input_data)
self._logger.info("Wikifier running finished.")
self.need_run_wikifier = False
self._find_q_node_columns()
return results
def _search_wikidata(self, query=None, supplied_data: typing.Union[d3m_DataFrame, d3m_Dataset] = None,
search_threshold=0.5) -> typing.List["DatamartSearchResult"]:
"""
The search function used for wikidata search
:param query: JSON object describing the query.
:param supplied_data: the data you are trying to augment.
:param search_threshold: the minimum appeared times of the properties
:return: list of search results of DatamartSearchResult
"""
self._logger.debug("Start running search on wikidata...")
if supplied_data is None:
supplied_data = self.supplied_data
wikidata_results = []
try:
if len(self.q_nodes_columns) == 0:
self._logger.warning("No wikidata Q nodes detected on corresponding required_variables!")
self._logger.warning("Will skip wikidata search part")
return wikidata_results
else:
self._logger.info("Wikidata Q nodes inputs detected! Will search with it.")
self._logger.info("Totally " + str(len(self.q_nodes_columns)) + " Q nodes columns detected!")
# do a wikidata search for each Q nodes column
for each_column in self.q_nodes_columns:
self._logger.debug("Start searching on column " + str(each_column))
q_nodes_list = self.supplied_dataframe.iloc[:, each_column].tolist()
p_count = collections.defaultdict(int)
p_nodes_needed = []
# old method, the generated results are not very good
"""
http_address = 'http://minds03.isi.edu:4444/get_properties'
headers = {"Content-Type": "application/json"}
requests_data = str(q_nodes_list)
requests_data = requests_data.replace("'", '"')
r = requests.post(http_address, data=requests_data, headers=headers)
results = r.json()
for each_p_list in results.values():
for each_p in each_p_list:
p_count[each_p] += 1
"""
# TODO: temporary change to call wikidata service, may change back in the future
# Q node format (wd:Q23)(wd: Q42)
q_node_query_part = ""
# ensure every time we get same order of q nodes so the hash tag will be same
unique_qnodes = set(q_nodes_list)
# updated v2020.1.7, use blacklist to filter q nodes
unique_qnodes = unique_qnodes - DownloadManager.fetch_blacklist_nodes()
unique_qnodes = list(unique_qnodes)
unique_qnodes.sort()
# updated v2020.1.6, not skip if unique Q nodes are too few
if len(unique_qnodes) == 0:
self._logger.warning("No Q nodes detected on column No.{} need to search, skip.".format(str(each_column)))
continue
if len(unique_qnodes) > config.max_q_node_query_size:
unique_qnodes = random.sample(unique_qnodes, config.max_q_node_query_size)
for each in unique_qnodes:
if len(each) > 0:
q_node_query_part += "(wd:" + each + ")"
sparql_query = "select distinct ?item ?property where \n{\n VALUES (?item) {" + q_node_query_part \
+ " }\n ?item ?property ?value .\n ?wd_property wikibase:directClaim ?property ." \
+ " values ( ?type ) \n {\n ( wikibase:Quantity )\n" \
+ " ( wikibase:Time )\n ( wikibase:Monolingualtext )\n }" \
+ " ?wd_property wikibase:propertyType ?type .\n}\norder by ?item ?property "
results = self.wikidata_cache_manager.get_result(sparql_query)
if results is None:
# if response none, it means get wikidata query results failed
self._logger.error("Can't get wikidata search results for column No." + str(each_column) + "(" +
self.supplied_dataframe.columns[each_column] + ")")
continue
self._logger.debug("Response from server for column No." + str(each_column) + "(" +
self.supplied_dataframe.columns[each_column] + ")" +
" received, start parsing the returned data from server.")
# count the appeared times and find the p nodes appeared rate that higher than threshold
for each in results:
if "property" not in each:
self._logger.error("Wikidata query returned wrong results!!! Please check!!!")
raise ValueError("Wikidata query returned wrong results!!! Please check!!!")
p_count[each['property']['value'].split("/")[-1]] += 1
for key, val in p_count.items():
if float(val) / len(unique_qnodes) >= search_threshold:
p_nodes_needed.append(key)
wikidata_search_result = {"p_nodes_needed": p_nodes_needed,
"target_q_node_column_name": self.supplied_dataframe.columns[each_column]}
wikidata_results.append(DatamartSearchResult(search_result=wikidata_search_result,
supplied_data=supplied_data,
query_json=query,
search_type="wikidata")
)
self._logger.debug("Running search on wikidata finished.")
return wikidata_results
except Exception as e:
self._logger.error("Searching with wikidata failed!")
self._logger.debug(e, exc_info=True)
finally:
return wikidata_results
def _search_datamart(self) -> typing.List["DatamartSearchResult"]:
"""
function used for searching in datamart with blaze graph database
:return: List[DatamartSearchResult]
"""
self._logger.debug("Start searching on datamart...")
search_result = []
variables_search = self.search_query[self.current_searching_query_index].variables_search
keywords_search = self.search_query[self.current_searching_query_index].keywords_search
# COMMENT: title does not used, may delete later
variables, title = dict(), dict()
variables_temp = dict() # this temp is specially used to store variable for time query
if self.augment_with_time:
time_information = self._find_time_ranges()
if len(time_information) == 0:
self._logger.warning("Required to search with time but no time column found from supplied data!")
return []
for each_variable in self.search_query[self.current_searching_query_index].variables:
# updated v2019.12.11, now we only search "time column only" if augment_with_time is set to false
if each_variable.key.startswith(TIME_COLUMN_MARK):
if self.augment_with_time:
self._logger.warning("Not search with time only if augment_with_time is set to True")
return []
elif self.consider_time is False:
self._logger.warning("Not search with time only if consider_time is set to False")
return []
else:
variables_temp[each_variable.key.split("____")[1]] = each_variable.values
start_time, end_time, granularity = each_variable.values.split("____")
variables_search = {
"temporal_variable":
{
"start": start_time,
"end": end_time,
"granularity": granularity
}
}
else:
# updated v2019.12.18: if consider wikifier columns only, not search on other columns
if self.consider_wikifier_columns_only and each_variable.key not in self.q_node_column_names:
self._logger.warning(
"Set to consider wikifier columns only, will not search for column {}".format(each_variable.key))
return []
variables[each_variable.key] = each_variable.values
query = {"keywords": self.search_query[self.current_searching_query_index].keywords,
"variables": variables,
"keywords_search": keywords_search,
"variables_search": variables_search,
}
if self.augment_with_time:
query["variables_time"] = time_information
query_results = self.augmenter.query_by_sparql(query=query,
dataset=self.supplied_data,
consider_wikifier_columns_only=self.consider_wikifier_columns_only,
augment_with_time=self.augment_with_time,
limit_amount=self.search_limit_amount)
if len(variables_temp) != 0:
query["variables"] = variables_temp
for i, each_result in enumerate(query_results):
# self._logger.debug("Get returned No." + str(i) + " query result as ")
# self._logger.debug(str(each_result))
# the special way to calculate the score of temporal variable search
if "start_time" in each_result.keys() and "end_time" in each_result.keys():
if self.augment_with_time:
tv = time_information
else:
tv = query["variables_search"]["temporal_variable"]
start_date = pd.to_datetime(tv["start"]).timestamp()
end_date = pd.to_datetime(tv["end"]).timestamp() # query time
start_time = pd.to_datetime(each_result['start_time']['value']).timestamp()
end_time = pd.to_datetime(each_result['end_time']['value']).timestamp() # dataset
denominator = float(end_date - start_date)
if end_date > end_time:
if start_date > end_time:
time_score = 0.0
elif start_date >= start_time and end_time >= start_date:
time_score = (end_time - start_date) / denominator
elif start_time > start_date:
time_score = (end_time - start_time) / denominator
elif end_date >= start_time and end_time >= end_date:
if start_date >= start_time:
time_score = 1.0
elif start_time > start_date:
time_score = (end_date - start_time) / denominator
elif start_time > end_date:
time_score = 0.0
if time_score != 0.0 and 'score' in each_result.keys():
old_score = float(each_result['score']['value'])
each_result['score']['value'] = old_score + time_score
else:
each_result['score'] = {"value": time_score}
temp = DatamartSearchResult(search_result=each_result, supplied_data=self.supplied_data, query_json=query,
search_type="general")
search_result.append(temp)
search_result.sort(key=lambda x: x.score(), reverse=True)
self._logger.debug("Searching on datamart finished.")
# need to add time on join pairs
if self.augment_with_time:
search_result = self._search_with_time_columns(search_result)
return search_result
def _search_vector(self) -> typing.List["DatamartSearchResult"]:
"""
The search function used for vector search
:return: List[DatamartSearchResult]
"""
self._logger.debug("Start running search on Vectors...")
vector_results = []
try:
if len(self.q_nodes_columns) == 0:
self._logger.warning("No Wikidata Q nodes detected!")
self._logger.warning("Will skip vector search part")
return vector_results
else:
self._logger.info("Wikidata Q nodes inputs detected! Will search with it.")
self._logger.info("Totally " + str(len(self.q_nodes_columns)) + " Q nodes columns detected!")
# do a vector search for each Q nodes column
for each_column in self.q_nodes_columns:
self._logger.debug("Start searching on column " + str(each_column))
q_nodes_list = list(filter(None, self.supplied_dataframe.iloc[:, each_column].dropna().tolist()))
unique_qnodes = list(set(q_nodes_list))
unique_qnodes.sort()
# updated v2020.1.6, not skip if unique Q nodes are too few
if len(unique_qnodes) < config.min_q_node_query_size_percent * len(q_nodes_list):
self._logger.warning("Too few Q nodes (rate = {}/{}) found on column {}, will skip this column.".
format(str(len(unique_qnodes)),
str(config.min_q_node_query_size_percent * len(q_nodes_list)),
str(each_column)))
continue
vector_search_result = {"number_of_vectors": str(len(unique_qnodes)),
"target_q_node_column_name": self.supplied_dataframe.columns[each_column],
"q_nodes_list": unique_qnodes}
vector_results.append(DatamartSearchResult(search_result=vector_search_result,
supplied_data=self.supplied_data,
query_json=None,
search_type="vector")
)
self._logger.debug("Running search on vector finished.")
return vector_results
except Exception as e:
self._logger.error("Searching with wikidata vector failed!")
self._logger.debug(e, exc_info=True)
finally:
return vector_results
def _search_geospatial_data(self) -> typing.List["DatamartSearchResult"]:
"""
function used for searching geospatial data
:return: List[DatamartSearchResult]
"""
self._logger.debug("Start searching geospatial data on wikidata and datamart...")
search_results = []
# try to find possible columns of latitude and longitude
possible_longitude_or_latitude = []
for each in range(len(self.supplied_dataframe.columns)):
if type(self.supplied_data) is d3m_Dataset:
selector = (self.res_id, ALL_ELEMENTS, each)
else:
selector = (ALL_ELEMENTS, each)
each_column_meta = self.supplied_data.metadata.query(selector)
if "https://metadata.datadrivendiscovery.org/types/Location" in each_column_meta["semantic_types"]:
try:
column_data = self.supplied_dataframe.iloc[:, each].astype(float).dropna()
if max(column_data) <= config.max_longitude_val and min(column_data) >= config.min_longitude_val:
possible_longitude_or_latitude.append(each)
elif max(column_data) <= config.max_latitude_val and min(column_data) >= config.min_latitude_val:
possible_longitude_or_latitude.append(each)
except:
pass
if len(possible_longitude_or_latitude) < 2:
self._logger.debug("Supplied dataset does not have geospatial data!")
return search_results
else:
self._logger.debug(
"Finding columns:" + str(possible_longitude_or_latitude) + " which might be geospatial data columns...")
possible_la_or_long_comb = list(combinations(possible_longitude_or_latitude, 2))
for column_index_comb in possible_la_or_long_comb:
latitude_index, longitude_index = -1, -1
# try to get the correct latitude and longitude pairs
for each_column_index in column_index_comb:
try:
column_data = self.supplied_dataframe.iloc[:, each_column_index].astype(float).dropna()
column_name = self.supplied_dataframe.columns[each_column_index]
# must be longitude when its min is in [-180, -90), or max is in (90, 180]
if config.max_latitude_val < max(column_data) <= config.max_longitude_val \
or (config.min_latitude_val > min(column_data) >= config.min_longitude_val):
longitude_index = each_column_index
else:
# determine the type by header [latitude, longitude]
if any([True for i in column_name if i in ['a', 'A']]):
latitude_index = each_column_index
elif any([True for i in column_name if i in ['o', 'O', 'g', 'G']]):
longitude_index = each_column_index
except Exception as e:
self._logger.debug(e, exc_info=True)
self._logger.error("Can't parse location information for column No." + str(each_column_index)
+ " with column name " + column_name)
# search on datamart and wikidata by city qnodes
if latitude_index != -1 and longitude_index != -1:
self._logger.info(
"Latitude column is: " + str(latitude_index) + " and longitude is: " + str(longitude_index) + "...")
granularity = {'city'}
radius = 100
for gran in granularity:
search_variables = {'metadata': {
'search_result': {
'latitude_index': latitude_index,
'longitude_index': longitude_index,
'radius': radius,
'granularity': gran
},
'search_type': 'geospatial'
}}
# do wikidata query service to find city q-node columns
return_ds = DownloadManager.query_geospatial_wikidata(self.supplied_data, search_variables,
self.connection_url)
_, return_df = d3m_utils.get_tabular_resource(dataset=return_ds, resource_id=None)
if return_df.columns[-1].startswith('Geo_') and return_df.columns[-1].endswith('_wikidata'):
qnodes = return_df.iloc[:, -1]
qnodes_set = list(set(qnodes))
coverage_score = len(qnodes_set) / len(qnodes)
# search on datamart
qnodes_str = " ".join(qnodes_set)
variables = [VariableConstraint(key=return_df.columns[-1], values=qnodes_str)]
self.search_query[self.current_searching_query_index].variables = variables
search_res = timeout_call(1800, self._search_datamart, [])
search_results.extend(search_res)
# search on wikidata
temp_q_nodes_columns = self.q_nodes_columns
self.q_nodes_columns = [-1]
search_res = timeout_call(1800, self._search_wikidata, [None, return_df])
search_results.extend(search_res)
self.q_nodes_columns = temp_q_nodes_columns
if search_results:
for each_result in search_results:
# change metadata's score
old_score = each_result.score()
new_score = old_score * coverage_score
each_result.metadata_manager.score = new_score
# change score in datamart_search_result
if "score" in each_result.search_result.keys():
each_result.search_result["score"]["value"] = new_score
search_results.sort(key=lambda x: x.score(), reverse=True)
self._logger.debug("Running search on geospatial data finished.")
return search_results
def _search_with_time_columns(self, search_results: typing.List["DatamartSearchResult"]) \
-> typing.List["DatamartSearchResult"]:
"""
function used to update the search results from join with one column to join with both this column and time column
:param search_results: list of "DatamartSearchResult"
:return: list of "DatamartSearchResult"
:return:
"""
# find time columns first
# get time ranges on supplied data
time_columns_left = list()
for i in range(self.supplied_dataframe.shape[1]):
if type(self.supplied_data) is d3m_Dataset:
each_selector = (self.res_id, ALL_ELEMENTS, i)
else:
each_selector = (ALL_ELEMENTS, i)
each_column_metadata = self.supplied_data.metadata.query(each_selector)
if "semantic_types" not in each_column_metadata:
self._logger.warning("column No.{} {} do not have semantic type on metadata!".
format(str(i), str(self.supplied_dataframe.columns[i])))
continue
if TIME_SEMANTIC_TYPE in each_column_metadata['semantic_types']:
# if we got original time granularity from metadata, use it directly
time_column = self.supplied_dataframe.iloc[:, i]
if 'time_granularity' in each_column_metadata.keys():
granularity_d3m_format = each_column_metadata['time_granularity']
granularity = Utils.map_d3m_granularity_to_value(granularity_d3m_format['unit'])
else:
try:
granularity_datamart_format = Utils.get_time_granularity(time_column)
granularity = Utils.map_granularity_to_value(granularity_datamart_format)
except ValueError:
self._logger.error("Can't continue because unable to get the time granularity on column No.{} {}".
format(str(i), str(self.supplied_dataframe.columns[i])))
continue
self._logger.info("Get the time granularity of column No.{} {} as {}".
format(str(i), str(self.supplied_dataframe.columns[i]), str(granularity)))
if "datetime" not in time_column.dtype.name:
time_column = pd.to_datetime(time_column)
time_columns_left.append({
"granularity": granularity,
"start_time": min(time_column),
"end_time": max(time_column),
"column_number": i,
})
# get time ranges on search results
time_columns_right = list()
for each_search_result in search_results:
if each_search_result.search_type == "general":
for i in range(each_search_result.d3m_metadata.query((ALL_ELEMENTS,))['dimension']['length']):
each_column_metadata = each_search_result.d3m_metadata.query((ALL_ELEMENTS, i))
# TODO: it seems our current system can't handle multiple time data's condition
if TIME_SEMANTIC_TYPE in each_column_metadata['semantic_types']:
time_information_query = self.augmenter.get_dataset_time_information(each_search_result.id())
if len(time_information_query) == 0:
self._logger.warning("Detect timestamp on dataset {} {} but no time information was found!"
.format(each_search_result.id(),
each_search_result.search_result['title']['value']))
continue
time_columns_right.append({
"granularity": int(time_information_query[0]['time_granularity']['value']),
"start_time": pd.Timestamp(time_information_query[0]['start_time']['value']),
"end_time": pd.Timestamp(time_information_query[0]['end_time']['value']),
"column_number": i,
"dataset_id": each_search_result.id()
})
# only keep the datasets that has overlaped time range and same time granularity
can_consider_datasets = defaultdict(list)
for left_time_info in time_columns_left:
for right_time_info in time_columns_right:
left_range = [left_time_info['start_time'], left_time_info['end_time']]
right_range = [right_time_info['start_time'], right_time_info['end_time']]
# ensure the format are correct
for i in range(len(left_range)):
if isinstance(left_range[i], pd.Timestamp):
left_range[i] = left_range[i].tz_localize('UTC')
elif isinstance(left_range[i], str):
left_range[i] = pd.Timestamp(left_range[i])
# TODO: if time granularity different but time range overlap? should we consider it or not
if left_time_info['granularity'] >= right_time_info['granularity'] and Utils.overlap(left_range, right_range):
can_consider_datasets[right_time_info['dataset_id']].append(
{
"left_column_number": left_time_info["column_number"],
"right_dataset_id": right_time_info['dataset_id'],
"right_join_column_number": right_time_info['column_number'],
"right_join_start_time": right_time_info['start_time'],
"right_join_end_time": right_time_info['end_time'],
"right_join_time_granularity": right_time_info['granularity']
})
filtered_search_result = []
for each_search_result in search_results:
if each_search_result.search_type == "general":
if each_search_result.id() in can_consider_datasets:
for each_combine in can_consider_datasets[each_search_result.id()]:
each_search_result_copied = copy.copy(each_search_result)
# update join pairs information
right_index = None
right_join_column_name = each_search_result.search_result['variableName']['value']
for i in range(each_search_result.d3m_metadata.query((ALL_ELEMENTS,))['dimension']['length']):
each_column_metadata = each_search_result.d3m_metadata.query((ALL_ELEMENTS, i))
if each_column_metadata['name'] == right_join_column_name:
right_index = i
break
if len(each_search_result.query_json['variables'].keys()) > 1:
self._logger.warning("Mutiple variables join results update for time related not supported yet!")
left_join_column_name = list(each_search_result.query_json['variables'].keys())[0]
left_index = self.supplied_dataframe.columns.tolist().index(left_join_column_name)
# right_index = right_df.columns.tolist().index(right_join_column_name)
original_left_index_column = DatasetColumn(resource_id=self.res_id, column_index=left_index)
original_right_index_column = DatasetColumn(resource_id=None, column_index=right_index)
left_columns = [
DatasetColumn(resource_id=self.res_id, column_index=each_combine["left_column_number"]),
original_left_index_column
]
right_columns = [
DatasetColumn(resource_id=None, column_index=each_combine["right_join_column_number"]),
original_right_index_column
]
updated_join_pairs = [TabularJoinSpec(left_columns=[left_columns], right_columns=[right_columns])]
each_search_result_copied.set_join_pairs(updated_join_pairs)
# update the search result with time information
time_search_keyword = TIME_COLUMN_MARK + "____" + right_join_column_name
each_search_result_copied.query_json['keywords'].append(time_search_keyword)
each_search_result_copied.search_result['start_time'] = str(each_combine["right_join_start_time"])
each_search_result_copied.search_result['end_time'] = str(each_combine["right_join_end_time"])
each_search_result_copied.search_result['time_granularity'] = str(
each_combine["right_join_time_granularity"])
filtered_search_result.append(each_search_result_copied)
return filtered_search_result
@singleton
class Datamart(object):
"""
ISI implement of datamart
"""
def __init__(self, connection_url: str = None) -> None:
self._logger = logging.getLogger(__name__)
if connection_url:
self._logger.info("Using user-defined connection url as " + connection_url)
self.connection_url = connection_url
else:
connection_url = os.getenv('DATAMART_URL_ISI', DEFAULT_DATAMART_URL)
self.connection_url = connection_url
self._logger.debug("Current datamart connection url is: " + self.connection_url)
self.augmenter = Augment()
self.supplied_dataframe = None
def search(self, query: 'DatamartQuery') -> DatamartQueryCursor:
"""This entry point supports search using a query specification.
The query specification supports querying datasets by keywords, named entities, temporal ranges, and geospatial ranges.
Datamart implementations should return a DatamartQueryCursor immediately.
Parameters
----------
query : DatamartQuery
Query specification.
Returns
-------
DatamartQueryCursor
A cursor pointing to search results.
"""
return DatamartQueryCursor(augmenter=self.augmenter, search_query=[query], supplied_data=None,
connection_url=self.connection_url, need_run_wikifier=False)
def search_with_data(self, query: 'DatamartQuery', supplied_data: container.Dataset, **kwargs) \
-> DatamartQueryCursor:
"""
Search using on a query and a supplied dataset.
This method is a "smart" search, which leaves the Datamart to determine how to evaluate the relevance of search
result with regard to the supplied data. For example, a Datamart may try to identify named entities and date
ranges in the supplied data and search for companion datasets which overlap.
To manually specify query constraints using columns of the supplied data, use the `search_with_data_columns()`
method and `TabularVariable` constraints.
Datamart implementations should return a DatamartQueryCursor immediately.
Parameters
----------
query : DatamartQuery
Query specification
supplied_data : container.Dataset
The data you are trying to augment.
kwargs : dict
Some extra control parameters. For example:
need_wikidata: (Default is True) If set to Ture, the program will run wikifier on supplied data and find possible
Q nodes, then search for possible attributes with those Q nodes and search for vectors
augment_with_time: (Default is False) If set to True, a pair with two columns will be searched, only data with
both join columns like [time, key] will be considered
consider_time: (Default is True) If set to True, no time columns on datamart will be considered as candidates.
This control parameter will be useless if augment_with_time was True
consider_wikifier_columns_only: (Default is False) If set to True, only columns with Q nodes will be considered
as join candiadates
Returns
-------
DatamartQueryCursor
A cursor pointing to search results containing possible companion datasets for the supplied data.
"""
# update v2019.10.24, add keywords search in search queries
if query.keywords:
query_keywords = []
for each in query.keywords:
translator = str.maketrans(string.punctuation, ' ' * len(string.punctuation))
words_processed = str(each).lower().translate(translator).split()
query_keywords.extend(words_processed)
else:
query_keywords = None
need_wikidata = kwargs.get("need_wikidata", True)
consider_wikifier_columns_only = kwargs.get("consider_wikifier_columns_only", False)
augment_with_time = kwargs.get("augment_with_time", False)
consider_time = kwargs.get("consider_time", True)
if consider_time is False and augment_with_time is True:
self._logger.warning("Augment with time is set to be true! consider_time parameter will be useless.")
# add some special search query in the first search queries
if not need_wikidata:
search_queries = [DatamartQuery(search_type="geospatial")]
need_run_wikifier = False
else:
need_run_wikifier = None
search_queries = [DatamartQuery(search_type="wikidata"),
DatamartQuery(search_type="vector"),
DatamartQuery(search_type="geospatial")]
# try to update with more correct metadata if possible
updated_result = MetadataCache.check_and_get_dataset_real_metadata(supplied_data)
if updated_result[0]: # [0] store whether it success find the metadata
supplied_data = updated_result[1]
if type(supplied_data) is d3m_Dataset:
res_id, self.supplied_dataframe = d3m_utils.get_tabular_resource(dataset=supplied_data, resource_id=None)
else:
raise ValueError("Incorrect supplied data type as " + str(type(supplied_data)))
# if query is None:
# if not query given, try to find the Text columns from given dataframe and use it to find some candidates
can_query_columns = []
for each in range(len(self.supplied_dataframe.columns)):
if type(supplied_data) is d3m_Dataset:
selector = (res_id, ALL_ELEMENTS, each)
else:
selector = (ALL_ELEMENTS, each)
each_column_meta = supplied_data.metadata.query(selector)
# try to parse each column to DateTime type. If success, add new semantic type, otherwise do nothing
try:
pd.to_datetime(self.supplied_dataframe.iloc[:, each])
new_semantic_type = {"semantic_types": (TIME_SEMANTIC_TYPE, ATTRIBUTE_SEMANTIC_TYPE)}
supplied_data.metadata = supplied_data.metadata.update(selector, new_semantic_type)
except:
pass
if TEXT_SEMANTIC_TYPE in each_column_meta["semantic_types"] \
or TIME_SEMANTIC_TYPE in each_column_meta["semantic_types"]:
can_query_columns.append(each)
if len(can_query_columns) == 0:
self._logger.warning("No column can be used for augment with datamart!")
for each_column_index in can_query_columns:
column_formated = DatasetColumn(res_id, each_column_index)
tabular_variable = TabularVariable(columns=[column_formated], relationship=ColumnRelationship.CONTAINS)
each_search_query = self.generate_datamart_query_from_data(supplied_data=supplied_data,
data_constraints=[tabular_variable])
# if we get keywords from input search query, add it
if query_keywords:
each_search_query.keywords_search = query_keywords
search_queries.append(each_search_query)
return DatamartQueryCursor(augmenter=self.augmenter, search_query=search_queries, supplied_data=supplied_data,
need_run_wikifier=need_run_wikifier, connection_url=self.connection_url,
consider_wikifier_columns_only=consider_wikifier_columns_only,
augment_with_time=augment_with_time,
consider_time=consider_time)
def search_with_data_columns(self, query: 'DatamartQuery', supplied_data: container.Dataset,
data_constraints: typing.List['TabularVariable']) -> DatamartQueryCursor:
"""
Search using a query which can include constraints on supplied data columns (TabularVariable).
This search is similar to the "smart" search provided by `search_with_data()`, but caller must manually specify
constraints using columns from the supplied data; Datamart will not automatically analyze it to determine
relevance or joinability.
Use of the query spec enables callers to compose their own "smart search" implementations.
Datamart implementations should return a DatamartQueryCursor immediately.
Parameters
------_---
query : DatamartQuery
Query specification
supplied_data : container.Dataset
The data you are trying to augment.
data_constraints : list
List of `TabularVariable` constraints referencing the supplied data.
Returns
-------
DatamartQueryCursor
A cursor pointing to search results containing possible companion datasets for the supplied data.
"""
# put entities of all given columns from "data_constraints" into the query's variable part and run the query
# try to update with more correct metadata if possible
updated_result = MetadataCache.check_and_get_dataset_real_metadata(supplied_data)
if updated_result[0]: # [0] store whether it success find the metadata
supplied_data = updated_result[1]
search_query = self.generate_datamart_query_from_data(supplied_data=supplied_data,
data_constraints=data_constraints)
return DatamartQueryCursor(augmenter=self.augmenter, search_query=[search_query], supplied_data=supplied_data,
connection_url=self.connection_url)
def generate_datamart_query_from_data(self, supplied_data: container.Dataset,
data_constraints: typing.List['TabularVariable']) -> "DatamartQuery":
"""
Inner function used to generate the isi implemented datamart query from given dataset
:param supplied_data: a Dataset format supplied data
:param data_constraints:
:return: a DatamartQuery can be used in isi datamart
"""
all_query_variables = []
keywords = []
translator = str.maketrans(string.punctuation, ' ' * len(string.punctuation))
for each_constraint in data_constraints:
for each_column in each_constraint.columns:
each_column_index = each_column.column_index
each_column_res_id = each_column.resource_id
all_value_str_set = set()
each_column_meta = supplied_data.metadata.query((each_column_res_id, ALL_ELEMENTS, each_column_index))
treat_as_a_text_column = False
if TIME_SEMANTIC_TYPE in each_column_meta["semantic_types"]:
try:
column_data = supplied_data[each_column_res_id].iloc[:, each_column_index]
column_data_datetime_format = pd.to_datetime(column_data)
start_date = min(column_data_datetime_format)
end_date = max(column_data_datetime_format)
time_granularity = Utils.get_time_granularity(column_data_datetime_format)
# for time type, we create a special type of keyword and variables
# so that we can detect it later in general search part
each_keyword = TIME_COLUMN_MARK + "____" + supplied_data[each_column_res_id].columns[each_column_index]
keywords.append(each_keyword)
all_value_str = str(start_date) + "____" + str(end_date) + "____" + time_granularity
all_query_variables.append(VariableConstraint(key=each_keyword, values=all_value_str))
except Exception as e:
self._logger.debug(e, exc_info=True)
self._logger.error("Can't parse current datetime for column No." + str(each_column_index)
+ " with column name " + supplied_data[each_column_res_id].columns[each_column_index])
treat_as_a_text_column = True
# for some special condition (DA_medical_malpractice), a column could have a DateTime tag but unable to be parsed
# in such condition, we should search and treat it as a Text column then
if 'http://schema.org/Text' in each_column_meta["semantic_types"] or treat_as_a_text_column:
column_values = supplied_data[each_column_res_id].iloc[:, each_column_index].astype(str)
query_column_entities = list(set(column_values.tolist()))
random.seed(42) # ensure always get the same random number
if len(query_column_entities) > MAX_ENTITIES_LENGTH:
query_column_entities = random.sample(query_column_entities, MAX_ENTITIES_LENGTH)
for each in query_column_entities:
words_processed = str(each).lower().translate(translator).split()
for word in words_processed:
all_value_str_set.add(word)
all_value_str_list = list(all_value_str_set)
# ensure the order we get are always same
all_value_str_list.sort()
all_value_str = " ".join(all_value_str_list)
each_keyword = supplied_data[each_column_res_id].columns[each_column_index]
keywords.append(each_keyword)
all_query_variables.append(VariableConstraint(key=each_keyword, values=all_value_str))
search_query = DatamartQuery(keywords=keywords, variables=all_query_variables)
return search_query
class DatasetColumn:
"""
Specify a column of a dataframe in a D3MDataset
"""
def __init__(self, resource_id: typing.Optional[str], column_index: int) -> None:
self.resource_id = resource_id
self.column_index = column_index
class DatamartSearchResult:
"""
This class represents the search results of a datamart search.
Different datamarts will provide different implementations of this class.
"""
def __init__(self, search_result: dict,
supplied_data: typing.Union[d3m_DataFrame, d3m_Dataset, None],
query_json: dict, search_type: str, connection_url: str = None):
self._logger = logging.getLogger(__name__)
self.search_result = search_result
self.supplied_data = supplied_data
if type(supplied_data) is d3m_Dataset:
self.res_id, self.supplied_dataframe = d3m_utils.get_tabular_resource(dataset=supplied_data,
resource_id=None)
self.selector_base_type = "ds"
elif type(supplied_data) is d3m_DataFrame:
self.res_id = None
self.supplied_dataframe = supplied_data
self.selector_base_type = "df"
else:
self.res_id = None
self.supplied_dataframe = None
if connection_url:
self._logger.info("Using user-defined connection url as " + connection_url)
self.connection_url = connection_url
else:
connection_url = os.getenv('DATAMART_URL_ISI', DEFAULT_DATAMART_URL)
self.connection_url = connection_url
self.wikidata_cache_manager = QueryCache()
self.general_search_cache_manager = GeneralSearchCache()
self.query_json = query_json
self.search_type = search_type
self.pairs = None
self.join_pairs = None
self.right_df = None
extra_information = self.search_result.get('extra_information')
if extra_information is not None:
extra_information = json.loads(extra_information['value'])
self.special_requirement = extra_information.get("special_requirement")
else:
self.special_requirement = None
self.metadata_manager = MetadataGenerator(supplied_data=self.supplied_data, search_result=self.search_result,
search_type=self.search_type, connection_url=self.connection_url,
wikidata_cache_manager=self.wikidata_cache_manager)
self.d3m_metadata = self.metadata_manager.generate_d3m_metadata_for_search_result()
def _get_first_ten_rows(self) -> pd.DataFrame:
"""
Inner function used to get first 10 rows of the search results
:return:
"""
return_res = ""
try:
if self.search_type == "general":
return_res = json.loads(self.search_result['extra_information']['value'])['first_10_rows']
elif self.search_type == "wikidata":
materialize_info = self.search_result
return_df = MaterializerCache.materialize(materialize_info, run_wikifier=False)
return_df = return_df[:10]
return_res = return_df.to_csv()
elif self.search_type == "vector":
sample_q_nodes = self.search_result["q_nodes_list"][:10]
return_df = DownloadManager.fetch_fb_embeddings(sample_q_nodes, self.search_result["target_q_node_column_name"])
return_res = return_df.to_csv(index=False)
else:
self._logger.error("unknown format of search result as {}!".format(str(self.search_type)))
except Exception as e:
self._logger.error("failed on getting first ten rows of search results")
self._logger.debug(e, exc_info=True)
finally:
return return_res
def display(self) -> pd.DataFrame:
"""
function used to see what found inside this search result class in a human vision
contains information for search result's title, columns and join hints
:return: a pandas DataFrame
"""
return self.metadata_manager.get_simple_view()
def download(self, supplied_data: typing.Union[d3m_Dataset, d3m_DataFrame] = None,
connection_url: str = None, generate_metadata=True, return_format="ds", run_wikifier=True) \
-> typing.Union[container.Dataset, container.DataFrame]:
"""
Produces a D3M dataset (data plus metadata) corresponding to the search result.
Every time the download method is called on a search result, it will produce the exact same columns
(as specified in the metadata -- get_metadata), but the set of rows may depend on the supplied_data.
Datamart is encouraged to return a dataset that joins well with the supplied data, e.g., has rows that match
the entities in the supplied data. Datamarts may ignore the supplied_data and return the same data regardless.
If the supplied_data is None, Datamarts may return None or a default dataset, based on the search query.
Parameters
---------
:param supplied_data : container.Dataset
A D3M dataset containing the dataset that is the target for augmentation. Datamart will try to download data
that augments the supplied data well.
:param connection_url : str
A connection string used to connect to a specific Datamart deployment. If not provided, the one provided to
the `Datamart` constructor is used.
:param generate_metadata: bool
Whether need to get the auto-generated metadata or not, only valid in isi datamart
:param return_format: str
A control parameter to set which type of output should get, the default value is "ds" as dataset
Optional choice is to get dataframe type output. Only valid in isi datamart
:param run_wikifier: str
A control parameter to set whether to run wikifier on this search result
"""
if connection_url:
# if a new connection url given
if self.connection_url != connection_url:
self.connection_url = connection_url
self.wikidata_cache_manager = QueryCache()
self.general_search_cache_manager = GeneralSearchCache()
self.metadata_manager = MetadataGenerator(supplied_data=supplied_data, search_result=self.search_result,
search_type=self.search_type, connection_url=connection_url,
wikidata_cache_manager=self.wikidata_cache_manager)
self._logger.info("New connection url given from download part as " + self.connection_url)
if type(supplied_data) is d3m_Dataset:
self.res_id, self.supplied_dataframe = d3m_utils.get_tabular_resource(dataset=supplied_data, resource_id=None)
elif type(supplied_data) is d3m_DataFrame:
self.supplied_dataframe = supplied_data
else:
self._logger.warning("No supplied data given, will try to use the exist one")
if self.supplied_dataframe is None and self.supplied_data is None:
raise ValueError("No supplied data found!")
# get the results without metadata
if self.search_type == "general":
res = self._download_general(run_wikifier=run_wikifier)
elif self.search_type == "wikidata":
res = self._download_wikidata()
elif self.search_type == "vector":
res = self._download_vector()
else:
raise ValueError("Unknown search type with " + self.search_type)
# sometime the index will be not continuous after augment, need to reset to ensure the index is continuous
res.reset_index(drop=True)
if return_format == "ds":
return_df = d3m_DataFrame(res, generate_metadata=False)
resources = {AUGMENT_RESOURCE_ID: return_df}
return_result = d3m_Dataset(resources=resources, generate_metadata=False)
elif return_format == "df":
return_result = d3m_DataFrame(res, generate_metadata=False)
else:
raise ValueError("Invalid return format was given as " + str(return_format))
if generate_metadata:
return_result = self.metadata_manager.generate_metadata_for_download_result(return_result, supplied_data)
return return_result
def _download_general(self, run_wikifier) -> pd.DataFrame:
"""
Specified download function for general datamart Datasets
:return: a dataset or a dataframe depending on the input
"""
self._logger.debug("Start downloading for datamart...")
join_pairs_result = []
candidate_join_column_scores = []
# start finding pairs
left_df = copy.deepcopy(self.supplied_dataframe)
if self.right_df is None:
self.right_df = MaterializerCache.materialize(metadata=self.search_result, run_wikifier=run_wikifier)
right_df = self.right_df
else:
self._logger.info("Find downloaded data from previous time, will use that.")
right_df = self.right_df
self._logger.debug("Download finished, start finding pairs to join...")
# left_metadata = Utils.generate_metadata_from_damax_entities_lengthtaframe(data=left_df, original_meta=None)
# right_metadata = Utils.generate_metadata_from_dataframe(data=right_df, original_meta=None)
if self.join_pairs is None:
candidate_join_column_pairs = self.get_join_hints(left_df=left_df, right_df=right_df, left_df_src_id=self.res_id)
else:
candidate_join_column_pairs = self.join_pairs
if len(candidate_join_column_pairs) > 1:
logging.warning("multiple joining column pairs found! Will only check first one.")
elif len(candidate_join_column_pairs) < 1:
logging.error("Getting joining pairs failed")
is_time_query = False
if "start_time" in self.search_result and "end_time" in self.search_result:
for each in self.query_json['keywords']:
if TIME_COLUMN_MARK in each:
is_time_query = True
break
if is_time_query:
# if it is the dataset fond with time query, we should transform time column to same format and same granularity
# then we can run RLTK with exact join same as str join
time_granularity = self.search_result.get("time_granularity")
if isinstance(time_granularity, str) or isinstance(time_granularity, int):
time_granularity = int(time_granularity)
elif isinstance(time_granularity, dict) and "value" in time_granularity:
time_granularity = int(time_granularity["value"])
elif time_granularity is None:
# if not get time granularity, set as unknown, then try to get the real value
self._logger.info("Unable to get time granularity! Will try to guess.")
time_granularity = 8
else:
raise ValueError("Can't parse time granularity from {}".format(str(time_granularity)))
if self.join_pairs is None:
right_join_column_name = self.search_result['variableName']['value']
right_df[right_join_column_name] =
|
pd.to_datetime(right_df[right_join_column_name])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import copy
import numpy as np
import random
import math
from creature_ability_list import creature_ability_dict
from creature_ability_conditions import creature_ability_condition_dict
from spell_ability_list import spell_ability_dict
from amulet_ability_list import amulet_ability_dict
from cost_change_ability_list import cost_change_ability_dict
from battle_ability_list import battle_ability_dict
from trigger_ability_list import trigger_ability_dict
#from numba import jit
from collections import deque
from my_moduler import get_module_logger
mylogger = get_module_logger(__name__)
from my_enum import *
import csv
import pandas as pd
import warnings
# warnings.simplefilter('ignore', NumbaWarning)
def tsv_to_card_list(tsv_name):
card_list = {}
card_category = tuple(tsv_name.split("_"))[1]
with open("Card_List_TSV/" + tsv_name) as f:
reader = csv.reader(f, delimiter='\t', lineterminator='\n')
for row in reader:
#mylogger.info("row:{}".format(row))
card_id = int(row[0])
# card_cost=int(row[1])
card_cost = int(row[2])
# assert card_category in ["Creature","Spell","Amulet"]
if card_id not in card_list: card_list[card_id] = []
card_name = row[1]
card_class = None
card_list[card_id].append(card_cost)
card_traits = None
has_count = None
if card_category == "Creature":
card_class = LeaderClass[row[-2]].value
card_traits = Trait[row[-1]].value
power = int(row[3])
toughness = int(row[4])
ability = []
if row[5] != "":
txt = list(row[5].split(","))
ability = [int(ele) for ele in txt]
card_list[card_id].extend([power, toughness, ability])
elif card_category == "Amulet":
# mylogger.info("row_contents:{}".format(row))
card_traits = Trait[row[-2]].value
card_class = LeaderClass[row[-3]].value
has_count = False
if row[-1] != "False":
has_count = int(row[-1])
ability = []
if row[3] != "":
txt = tuple(row[3].split(","))
ability = [int(ele) for ele in txt]
card_list[card_id].append(ability)
elif card_category == "Spell":
card_traits = Trait[row[-1]].value
card_class = LeaderClass[row[-2]].value
else:
assert False, "{}".format(card_category)
if card_class == LeaderClass["RUNE"].value:
spell_boost = tuple(row[-3 - int(card_category == "Amulet")].split(","))
check_spellboost = [bool(int(cell)) for cell in spell_boost]
card_list[card_id].append([card_class, check_spellboost, card_traits])
else:
card_list[card_id].append([card_class, card_traits])
if has_count != None:
card_list[card_id].append(has_count)
card_list[card_id].append(card_name)
return card_list
def tsv_to_dataframe(tsv_name):
card_category = tuple(tsv_name.split("_"))[1]
my_columns = []
sample = []
assert card_category in ["Creature", "Spell", "Amulet"]
if card_category == "Creature":
my_columns = ["Card_id", "Card_name", "Cost", "Power", "Toughness", "Ability", "Class", "Trait", "Spell_boost"]
sample = [0, "Sample", 0, 0, 0, [], "NEUTRAL", "NONE", "None"]
elif card_category == "Spell":
my_columns = ["Card_id", "Card_name", "Cost", "Class", "Trait", "Spell_boost"]
sample = [0, "Sample", 0, "NEUTRAL", "NONE", "None"]
elif card_category == "Amulet":
my_columns = ["Card_id", "Card_name", "Cost", "Ability", "Class", "Trait", "Spell_boost", "Count_down"]
sample = [0, "Sample", 0, [], "NEUTRAL", "NONE", "None", "None"]
df =
|
pd.DataFrame([sample], columns=my_columns)
|
pandas.DataFrame
|
import numpy as np
import operator
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import pandas as pd
import sys
#Function to calculate PCA
def CalculatePCA(pdata):
cv_mat = np.cov(pdata.T)
eig_val,eig_vec = np.linalg.eigh(cv_mat)
eig_vec = eig_vec.transpose()
d = dict()
for i in range(eig_vec.shape[1]):
d[eig_val[i]] = eig_vec[i]
eig_mat = sorted(d.items(), key=operator.itemgetter(0),reverse=True)
eig_mat = eig_mat[:2]
dataPCA = np.concatenate((eig_mat[0][1][:,None],eig_mat[1][1][:,None]), axis = 1)
Y = pdata.dot(dataPCA)
return Y
#Function to calculate SVD
def CalculateSVD(sdata):
u,s,v = np.linalg.svd(sdata.T)
u = u.transpose()
u = u[:2]
dataSVD = np.concatenate((u[0][:,None],u[1][:,None]), axis = 1)
W_SVD = sdata.dot(dataSVD)
return W_SVD
#Function to calculate TNSE
def CalculateTNSE(tdata):
u_tnse = TSNE(n_components=2).fit_transform(tdata.T)
u_tnse = u_tnse.transpose()
u_tnse = u_tnse[:2]
dataTNSE = np.concatenate((u_tnse[0][:,None],u_tnse[1][:,None]), axis = 1)
W_TNSE = tdata.dot(dataTNSE)
return W_TNSE
def main():
#Getting command line input data from user
fname = sys.argv[1]
pddata = pd.read_csv(fname,sep='\t',header=None)
fname = fname.split("/")[-1]
ncols = len(pddata.columns)
data = pddata.iloc[:,:-1]
data = data.values
origdata = data.copy()
data -= data.mean(axis=0)
#Running for file pca_a.txt - The PCA Matrix is stored in the variable data.
Y = CalculatePCA(data)
#Plotting Scatter Plot for the returned data
xval = pd.DataFrame(Y)[0]
yval = pd.DataFrame(Y)[1]
lbls = set(pddata[ncols-1])
fig1 = plt.figure(1)
for lbl in lbls:
cond = pddata[ncols-1] == lbl
plt.plot(xval[cond], yval[cond], linestyle='none', marker='o', label=lbl)
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.legend(numpoints=1)
plt.subplots_adjust(bottom=.20, left=.20)
fig1.suptitle("Algorithm - PCA, Text File - "+fname[:-4],fontsize=20)
fig1.savefig("PCA_"+fname+".png")
#plt.show()
#Calling SVD
SVDData = CalculateSVD(origdata)
#Plotting SVD
X_SVD = pd.DataFrame(SVDData)[0]
Y_SVD =
|
pd.DataFrame(SVDData)
|
pandas.DataFrame
|
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import math
import random
import operator
import sys
sys.setrecursionlimit(10000)
xl=
|
pd.ExcelFile("mpd2018.xlsx")
|
pandas.ExcelFile
|
import pandas as pd
import os, glob
def get_negative_cols(pais,hh_df):
try: negative_dict = pd.read_csv('output/hh_survey_negative_values.csv').set_index('pais')
except: negative_dict = pd.DataFrame(columns=['negative_values'])
negative_cols = [_c for _c in hh_df.columns if ((hh_df[_c].dtype == 'float32' or hh_df[_c].dtype == 'float64')
and ('ict' not in _c) and ('ing' not in _c or 'ct' in _c or 'trsgob' in _c)
and (hh_df[_c].min() < 0))]
out_str = ''
if len(negative_cols) == 0: out_str = '--, '
else:
for i in negative_cols: out_str += i+', '
negative_dict.loc[pais,'negative_values'] = out_str[:-2]
negative_dict.index.name = 'pais'
negative_dict.sort_index().to_csv('output/hh_survey_negative_values.csv')
if len(negative_cols)==0: return None
return negative_cols
def get_hh_survey(pais):
hh_survey = None
if pais == 'chl': pais = 'chi'
try:
file_name = 'consumption_and_household_surveys/2017-10-13/Household_survey_with_new_file_name/'+pais+'_household_expenditure_survey.dta'
hh_survey = pd.read_stata(file_name).set_index('cod_hogar')
except:
file_name = 'consumption_and_household_surveys/Expansion_Countries/'
for f in glob.glob(file_name+pais.upper()+'*'):
if 'PERSONA' not in f:
hh_survey = pd.read_stata(f)
try: hh_survey['cod_hogar'] = hh_survey['cod_hogar'].astype('int')
except: pass
hh_survey = hh_survey.reset_index().set_index('cod_hogar')
hh_survey = hh_survey.drop([i for i in ['index'] if i in hh_survey.columns],axis=1)
break
if 'miembros_hogar' not in hh_survey.columns:
hh_survey['miembros_hogar'] = get_miembros_hogar(pais)
if hh_survey['miembros_hogar'].shape[0] != hh_survey['miembros_hogar'].dropna().shape[0]:
n_fail = hh_survey['miembros_hogar'].shape[0] - hh_survey['miembros_hogar'].dropna().shape[0]
print('Finding',n_fail,'hh with no info on miembros hogar! ('+str(int(100.*n_fail/hh_survey['miembros_hogar'].shape[0]))+'% of hh)')
assert(False)
print('\nLOADED miembros hogar')
if pais == 'ury':
hh_survey['gasto_vca'] = hh_survey['gasto_vca'].fillna(0)
hh_survey['gasto_viv'] -= hh_survey['gasto_vca']
hh_survey['gasto_vleca'] -= hh_survey['gasto_vca']
hh_survey['gasto_vca'] = 0
hh_survey = hh_survey.loc[hh_survey['gct']>0]
# Check whether there are any hh that don't return CT info, but do show a difference between total receipts & other transfers
#print(hh_survey.loc[(hh_survey.ing_tpub!=0)&(hh_survey.ing_tpub!=hh_survey.ing_trsgob),['ing_tpub','ing_ct','ing_trsgob']].head())
if (pais == 'col'
or pais == 'gtm'
or pais == 'pan'
or pais == 'nic'
or pais == 'pry'
or pais == 'hnd'): hh_survey['gasto_vgn'] = hh_survey['gasto_vgn'].fillna(1E-6)
if pais == 'nic': hh_survey['gasto_vag'] = hh_survey['gasto_vag'].fillna(1E-6)
if pais == 'pry': hh_survey['gasto_vele'] = hh_survey['gasto_vele'].fillna(1E-6)
hh_survey = hh_survey.rename(columns={'factor_expansion_1':'factor_expansion'}).fillna(0)
n_hh = hh_survey.shape[0]
negative_cols = get_negative_cols(pais, hh_survey)
if negative_cols is not None:
for _n in negative_cols:
#if pais == 'arg':
# -> This code would reduce % of hh dropped from 4.9% to 0.1%
# hh_survey.loc[hh_survey['gasto_totros']<0,'gct'] -= hh_survey.loc[hh_survey['gasto_totros']<0,'gasto_totros']
# hh_survey.loc[hh_survey['gasto_totros']<0,'gasto_trans'] -= hh_survey.loc[hh_survey['gasto_totros']<0,'gasto_totros']
# hh_survey.loc[hh_survey['gasto_totros']<0,'gasto_totros'] -= hh_survey.loc[hh_survey['gasto_totros']<0,'gasto_totros']
if 'ing' in _n: hh_survey[_n] = hh_survey[_n].clip(lower=0.)
else:
hh_survey.loc[(hh_survey[_n]>=-1E-2)&(hh_survey[_n]<0),_n] = 0.
hh_survey = hh_survey.loc[hh_survey[_n]>=0]
percent_dropped = str(round(100.*(1-hh_survey.shape[0]/n_hh),1))
print('Dropping '+percent_dropped+'% of surveyed hh in',pais)
try: dropped_record =
|
pd.read_csv('./output/percent_of_survey_dropped_negative_values.csv')
|
pandas.read_csv
|
from google.cloud import bigquery, firestore
import json
import pandas as pd
import time
import requests
import geojson
import numpy as np
from matplotlib.path import Path
from time import sleep
def get_all_region_info():
if not hasattr(get_all_region_info, "updateTime"):
get_all_region_info.updateTime = 0
get_all_region_info.collection = firestore.Client().collection('region_info')
get_all_region_info.params = None
if time.time() - get_all_region_info.updateTime > (3600):
params = {doc_ref.id: doc_ref.to_dict() for doc_ref in get_all_region_info.collection.stream()}
get_all_region_info.updateTime = time.time()
get_all_region_info.params = params
return get_all_region_info.params
def bbox_to_vertices(bbox):
if bbox is None:
return None
vertices = [
(bbox['north'], bbox['west']),
(bbox['north'], bbox['east']),
(bbox['south'], bbox['east']),
(bbox['south'], bbox['west'])
]
return vertices
def get_region_bbox(region):
return get_all_region_info()[region]['bbox']
def get_region_vertices(region):
return bbox_to_vertices(get_region_bbox(region))
def isQueryInBoundingBox(bounding_box_vertices, query_lat, query_lon):
verts = [(0, 0)] * len(bounding_box_vertices)
for elem in bounding_box_vertices:
verts[elem[0]] = (elem[2], elem[1])
# Add first vertex to end of verts so that the path closes properly
verts.append(verts[0])
codes = [Path.MOVETO]
codes += [Path.LINETO] * (len(verts) - 2)
codes += [Path.CLOSEPOLY]
boundingBox = Path(verts, codes)
return boundingBox.contains_point((query_lon, query_lat))
def getAreaModelByLocation(lat, lon, string=None):
area_models = get_all_region_info()
if string is None:
for key in area_models:
if isQueryInBoundingBox(area_models[key]['bbox'], lat, lon):
return area_models[key]
else:
try:
return area_models[string]
except:
return None
def applyRegionalLabelsToDataFrame(df, null_value=np.nan, trim=False):
df['Label'] = null_value
for region_name, region_info in get_all_region_info().items():
bbox = get_region_bbox(region_name)
if bbox is None:
continue
df.loc[
(df['Lat'] >= bbox['south']) &
(df['Lat'] <= bbox['north']) &
(df['Lon'] >= bbox['west']) &
(df['Lon'] <= bbox['east']),
'Label'
] = region_info['name']
if trim:
x = len(df)
df = df.dropna(subset=['Label'])
print(f"Dropped {x - len(df)} unlabeled rows.")
return df
def chunk_list(ls, chunk_size=10000):
'''
BigQuery only allows inserts <=10,000 rows
'''
for i in range(0, len(ls), chunk_size):
yield ls[i: i + chunk_size]
def setPMSModels(df, col_name):
pms_models = ['PMS1003', 'PMS3003', 'PMS5003', 'PMS7003']
for model in pms_models:
df.loc[df['Type'].str.contains(model), col_name] = model
return df
def setChildFromParent(df, pairings, col_name):
df.loc[pairings.index, col_name] = df.loc[pairings, col_name].values
return df
def getParentChildPairing(df):
'''
Purple Air devices have two PM sensors inside. Data is reported for both sensors seperately,
but one sensor is considered the "parent" and one is the "child". The child has
lots of missing information, like DEVICE_LOCATIONTYPE, Flag, Type. So we create
this Series to link parents and children, then later use this Series to fill in
missing data for the children with data from their parents.
Beware: sometimes we find orphans - rows with a non-null ParentID, but no corresponding
row with an ID equal to the value of that ParentID.
'''
# Get the rows where ParentID is not Null (ParentID values are the IDs of the parent sensors)
pairings = df['ParentID'].loc[~df['ParentID'].isnull()].astype(int)
# Eliminate orphans (sorry orphans)
pairings = pairings[pairings.isin(df.index)]
return pairings
def main(data, context):
response = None
try:
response = json.loads(requests.get('https://www.purpleair.com/json?a').text)
results = response['results']
except Exception as e:
print('Could not download data. Exception: ', str(e), response)
try:
print('trying again after 15 seconds')
sleep(20)
response = json.loads(requests.get('https://www.purpleair.com/json?a').text)
results = response['results']
except:
print('Could not download data (take 2). Exception: ', str(e), response)
return
# Convert JSON response to a Pandas DataFrame
df =
|
pd.DataFrame(results)
|
pandas.DataFrame
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for USEquityPricingLoader and related classes.
"""
from parameterized import parameterized
import sys
import numpy as np
from numpy.testing import (
assert_allclose,
assert_array_equal,
)
import pandas as pd
from pandas.testing import assert_frame_equal
from toolz.curried.operator import getitem
from zipline.lib.adjustment import Float64Multiply
from zipline.pipeline.domain import US_EQUITIES
from zipline.pipeline.loaders.synthetic import (
NullAdjustmentReader,
make_bar_data,
expected_bar_values_2d,
)
from zipline.pipeline.loaders.equity_pricing_loader import (
USEquityPricingLoader,
)
from zipline.errors import WindowLengthTooLong
from zipline.pipeline.data import USEquityPricing
from zipline.testing import (
seconds_to_timestamp,
str_to_seconds,
MockDailyBarReader,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
ZiplineTestCase,
)
import pytest
# Test calendar ranges over the month of June 2015
# June 2015
# Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
TEST_CALENDAR_START = pd.Timestamp("2015-06-01", tz="UTC")
TEST_CALENDAR_STOP = pd.Timestamp("2015-06-30", tz="UTC")
TEST_QUERY_START = pd.Timestamp("2015-06-10", tz="UTC")
TEST_QUERY_STOP = pd.Timestamp("2015-06-19", tz="UTC")
# One asset for each of the cases enumerated in load_raw_arrays_from_bcolz.
EQUITY_INFO = pd.DataFrame(
[
# 1) The equity's trades start and end before query.
{"start_date": "2015-06-01", "end_date": "2015-06-05"},
# 2) The equity's trades start and end after query.
{"start_date": "2015-06-22", "end_date": "2015-06-30"},
# 3) The equity's data covers all dates in range.
{"start_date": "2015-06-02", "end_date": "2015-06-30"},
# 4) The equity's trades start before the query start, but stop
# before the query end.
{"start_date": "2015-06-01", "end_date": "2015-06-15"},
# 5) The equity's trades start and end during the query.
{"start_date": "2015-06-12", "end_date": "2015-06-18"},
# 6) The equity's trades start during the query, but extend through
# the whole query.
{"start_date": "2015-06-15", "end_date": "2015-06-25"},
],
index=np.arange(1, 7),
columns=["start_date", "end_date"],
).astype(np.datetime64)
EQUITY_INFO["symbol"] = [chr(ord("A") + n) for n in range(len(EQUITY_INFO))]
EQUITY_INFO["exchange"] = "TEST"
TEST_QUERY_SIDS = EQUITY_INFO.index
# ADJUSTMENTS use the following scheme to indicate information about the value
# upon inspection.
#
# 1s place is the equity
#
# 0.1s place is the action type, with:
#
# splits, 1
# mergers, 2
# dividends, 3
#
# 0.001s is the date
SPLITS = pd.DataFrame(
[
# Before query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-03"),
"ratio": 1.103,
"sid": 1,
},
# First day of query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-10"),
"ratio": 3.110,
"sid": 3,
},
# Third day of query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 3.112,
"sid": 3,
},
# After query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-21"),
"ratio": 6.121,
"sid": 6,
},
# Another action in query range, should have last_row of 1
{
"effective_date": str_to_seconds("2015-06-11"),
"ratio": 3.111,
"sid": 3,
},
# Last day of range. Should have last_row of 7
{
"effective_date": str_to_seconds("2015-06-19"),
"ratio": 3.119,
"sid": 3,
},
],
columns=["effective_date", "ratio", "sid"],
)
MERGERS = pd.DataFrame(
[
# Before query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-03"),
"ratio": 1.203,
"sid": 1,
},
# First day of query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-10"),
"ratio": 3.210,
"sid": 3,
},
# Third day of query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 3.212,
"sid": 3,
},
# After query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-25"),
"ratio": 6.225,
"sid": 6,
},
# Another action in query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 4.212,
"sid": 4,
},
# Last day of range. Should have last_row of 7
{
"effective_date": str_to_seconds("2015-06-19"),
"ratio": 3.219,
"sid": 3,
},
],
columns=["effective_date", "ratio", "sid"],
)
DIVIDENDS = pd.DataFrame(
[
# Before query range, should be excluded.
{
"declared_date": pd.Timestamp("2015-05-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-03", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-05", tz="UTC").to_datetime64(),
"amount": 90.0,
"sid": 1,
},
# First day of query range, should be excluded.
{
"declared_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-10", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-15", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-17", tz="UTC").to_datetime64(),
"amount": 80.0,
"sid": 3,
},
# Third day of query range, should have last_row of 2
{
"declared_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-12", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-15", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-17", tz="UTC").to_datetime64(),
"amount": 70.0,
"sid": 3,
},
# After query range, should be excluded.
{
"declared_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-25", tz="UTC").to_datetime64(),
"record_date":
|
pd.Timestamp("2015-06-28", tz="UTC")
|
pandas.Timestamp
|
# Copyright (c) Facebook, Inc. and its affiliates.
from factor_learning.utils import utils
from factor_learning.dataio.DigitImageTfDataset import DigitImageTfDataset
from factor_learning.dataio.DigitImageTfPairsDataset import DigitImageTfPairsDataset
from subprocess import call
import os
from scipy import linalg
import numpy as np
import cv2
from PIL import Image
import math
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.optim as optim
import torch
import seaborn as sns
from pandas.plotting import scatter_matrix
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.patches import Rectangle, Circle
plt.rcParams.update({'font.size': 14})
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
def visualize_correlation(feat_ij, pose_ij):
data_tensor = torch.cat([feat_ij, pose_ij], 1)
data = data_tensor.data.numpy()
data_frame = pd.DataFrame(data)
data_frame.columns = ['$f_{ij}[0]$: cx', '$f_{ij}[1]$: cy', '$f_{ij}[2]$: szx',
'$f_{ij}$[3]: szy', '$f_{ij}[4]$: c$\\alpha$', '$f_{ij}[5]$: s$\\alpha$',
'$T_{ij}[0]$: tx', '$T_{ij}[1]$: ty', '$T_{ij}[2]$: c$\\theta$', '$T_{ij}[3]$: s$\\theta$']
corr_matrix = data_frame.corr()
# plot correlation scatter plot
fig1 = plt.figure()
|
scatter_matrix(data_frame)
|
pandas.plotting.scatter_matrix
|
"""
Test the _dummy module.
"""
import re
import numpy as np
import pandas as pd
from sklearn.model_selection import ParameterGrid
import pytest
from sportsbet.datasets import DummySoccerDataLoader
def test_get_all_params():
"""Test all parameters."""
dataloader = DummySoccerDataLoader()
all_params = dataloader.get_all_params()
assert all_params == [
{'division': 1, 'year': 1998},
{'division': 1, 'league': 'France', 'year': 2000},
{'division': 1, 'league': 'France', 'year': 2001},
{'division': 1, 'league': 'Greece', 'year': 2017},
{'division': 1, 'league': 'Greece', 'year': 2019},
{'division': 1, 'league': 'Spain', 'year': 1997},
{'division': 2, 'league': 'England', 'year': 1997},
{'division': 2, 'league': 'Spain', 'year': 1999},
{'division': 3, 'league': 'England', 'year': 1998},
]
def test_get_odds_types():
"""Test all parameters."""
dataloader = DummySoccerDataLoader()
assert dataloader.get_odds_types() == ['interwetten', 'williamhill']
def test_param_grid_default():
"""Test the default parameters grid."""
dataloader = DummySoccerDataLoader()
dataloader.extract_train_data()
params = pd.DataFrame(dataloader.param_grid_)
expected_params = pd.DataFrame(
ParameterGrid(
[
{param: [val] for param, val in params.items()}
for params in dataloader.get_all_params()
]
)
)
cols = list(params.columns)
pd.testing.assert_frame_equal(
params[cols].sort_values(cols, ignore_index=True),
expected_params[cols].sort_values(cols, ignore_index=True),
)
def test_param_grid():
"""Test the parameters grid."""
dataloader = DummySoccerDataLoader(param_grid={'division': [1]})
dataloader.extract_train_data()
params = pd.DataFrame(dataloader.param_grid_)
expected_params = pd.DataFrame(
ParameterGrid(
[
{param: [val] for param, val in params.items()}
for params in dataloader.get_all_params()
]
)
)
expected_params = expected_params[expected_params["division"] == 1]
cols = list(params.columns)
pd.testing.assert_frame_equal(
params[cols].sort_values(cols, ignore_index=True),
expected_params[cols].sort_values(cols, ignore_index=True),
)
def test_param_grid_false_names():
"""Test the raise of value error for parameters grid for false names."""
false_param_grid = {'Division': [4], 'league': ['Greece']}
dataloader = DummySoccerDataLoader(false_param_grid)
with pytest.raises(
ValueError,
match=re.escape(
"Parameter grid includes the parameters name(s) ['Division'] that "
"are not not allowed by available data."
),
):
dataloader.extract_train_data()
def test_param_grid_false_values():
"""Test the raise of value error for parameters grid for false values."""
false_param_grid = {'division': [4], 'league': ['Greece']}
dataloader = DummySoccerDataLoader(false_param_grid)
with pytest.raises(
ValueError,
match=re.escape(
"Parameter grid includes the parameters value(s) "
"{'division': 4, 'league': 'Greece'} that are not allowed by "
"available data."
),
):
dataloader.extract_train_data()
def test_drop_na_thres_default():
"""Test default value for drop na threshold."""
dataloader = DummySoccerDataLoader()
dataloader.extract_train_data()
assert dataloader.drop_na_thres_ == 0.0
@pytest.mark.parametrize('drop_na_thres', [1, 0])
def test_drop_na_thres_raise_type_error(drop_na_thres):
"""Test the raise of type error for check of drop na threshold."""
dataloader = DummySoccerDataLoader()
with pytest.raises(TypeError):
dataloader.extract_train_data(drop_na_thres)
@pytest.mark.parametrize('drop_na_thres', [1.5, -0.4])
def test_drop_na_thres_raise_value_error(drop_na_thres):
"""Test the raise of value error for check of drop na threshold."""
dataloader = DummySoccerDataLoader()
with pytest.raises(ValueError):
dataloader.extract_train_data(drop_na_thres)
def test_odds_type_default():
"""Test default value for odds type."""
dataloader = DummySoccerDataLoader()
dataloader.extract_train_data()
assert dataloader.odds_type_ is None
def test_odds_type():
"""Test value of odds type."""
dataloader = DummySoccerDataLoader()
dataloader.extract_train_data(odds_type='interwetten')
assert dataloader.odds_type_ == 'interwetten'
def test_odds_type_raise_type_error():
"""Test the raise of type error for check of odds type."""
dataloader = DummySoccerDataLoader()
with pytest.raises(
ValueError,
match='Parameter `odds_type` should be a prefix of available odds columns. '
'Got `5` instead.',
):
dataloader.extract_train_data(odds_type=5)
def test_odds_type_raise_value_error():
"""Test the raise of value error for check of odds type."""
dataloader = DummySoccerDataLoader()
with pytest.raises(
ValueError,
match="Parameter `odds_type` should be a prefix of available odds columns. "
"Got `pinnacle` instead.",
):
dataloader.extract_train_data(odds_type='pinnacle')
def test_drop_na_cols_default():
"""Test the dropped columns of data loader for the default value."""
dataloader = DummySoccerDataLoader()
dataloader.extract_train_data()
pd.testing.assert_index_equal(
dataloader.dropped_na_cols_,
pd.Index(['odds__pinnacle__over_2.5__full_time_goals'], dtype=object),
)
def test_drop_na_cols():
"""Test the dropped columns of data loader."""
dataloader = DummySoccerDataLoader()
dataloader.extract_train_data(drop_na_thres=1.0)
pd.testing.assert_index_equal(
dataloader.dropped_na_cols_,
pd.Index(
[
'league',
'odds__interwetten__home_win__full_time_goals',
'odds__williamhill__draw__full_time_goals',
'odds__williamhill__away_win__full_time_goals',
'odds__pinnacle__over_2.5__full_time_goals',
],
dtype='object',
),
)
def test_input_cols_default():
"""Test the input columns for default values."""
dataloader = DummySoccerDataLoader()
dataloader.extract_train_data()
pd.testing.assert_index_equal(
dataloader.input_cols_,
pd.Index(
[
col
for col in DummySoccerDataLoader.DATA.columns
if col
not in (
'target__home_team__full_time_goals',
'target__away_team__full_time_goals',
'fixtures',
'date',
'odds__pinnacle__over_2.5__full_time_goals',
)
],
dtype=object,
),
)
def test_input_cols():
"""Test the input columns."""
dataloader = DummySoccerDataLoader()
dataloader.extract_train_data(drop_na_thres=1.0)
pd.testing.assert_index_equal(
dataloader.input_cols_,
pd.Index(
[
col
for col in DummySoccerDataLoader.DATA.columns
if col
not in (
'target__home_team__full_time_goals',
'target__away_team__full_time_goals',
'fixtures',
'odds__williamhill__draw__full_time_goals',
'odds__williamhill__away_win__full_time_goals',
'odds__pinnacle__over_2.5__full_time_goals',
'date',
'league',
'odds__interwetten__home_win__full_time_goals',
)
],
dtype=object,
),
)
def test_output_cols_default():
"""Test the output columns for default parameters."""
dataloader = DummySoccerDataLoader()
dataloader.extract_train_data()
pd.testing.assert_index_equal(
dataloader.output_cols_,
pd.Index(
[
'output__home_win__full_time_goals',
'output__away_win__full_time_goals',
'output__draw__full_time_goals',
'output__over_2.5__full_time_goals',
'output__under_2.5__full_time_goals',
],
dtype=object,
),
)
def test_output_cols():
"""Test the output columns."""
dataloader = DummySoccerDataLoader()
dataloader.extract_train_data(odds_type='interwetten')
pd.testing.assert_index_equal(
dataloader.output_cols_,
pd.Index(
[
'output__home_win__full_time_goals',
'output__draw__full_time_goals',
'output__away_win__full_time_goals',
],
dtype=object,
),
)
def test_odds_cols_default():
"""Test the odds columns for default parameters."""
dataloader = DummySoccerDataLoader()
dataloader.extract_train_data()
pd.testing.assert_index_equal(
dataloader.odds_cols_,
pd.Index([], dtype=object),
)
def test_odds_cols():
"""Test the odds columns."""
dataloader = DummySoccerDataLoader()
dataloader.extract_train_data(odds_type='williamhill')
pd.testing.assert_index_equal(
dataloader.odds_cols_,
pd.Index(
[
'odds__williamhill__home_win__full_time_goals',
'odds__williamhill__draw__full_time_goals',
'odds__williamhill__away_win__full_time_goals',
]
),
)
def test_extract_train_data_default():
"""Test the the train data columns for default parameters."""
dataloader = DummySoccerDataLoader()
X, Y, O = dataloader.extract_train_data()
pd.testing.assert_frame_equal(
X,
pd.DataFrame(
{
'division': [1, 3, 1, 2, 1, 1, 1, 1],
'league': [
'Spain',
'England',
np.nan,
'Spain',
'France',
'France',
'Greece',
'Greece',
],
'year': [1997, 1998, 1998, 1999, 2000, 2001, 2017, 2019],
'home_team': [
'Real Madrid',
'Liverpool',
'Liverpool',
'Barcelona',
'Lens',
'PSG',
'Olympiakos',
'Panathinaikos',
],
'away_team': [
'Barcelona',
'Arsenal',
'Arsenal',
'Real Madrid',
'Monaco',
'Lens',
'Panathinaikos',
'AEK',
],
'odds__interwetten__home_win__full_time_goals': [
1.5,
2.0,
np.nan,
2.5,
2.0,
3.0,
2.0,
2.0,
],
'odds__interwetten__draw__full_time_goals': [
3.5,
4.5,
2.5,
4.5,
2.5,
2.5,
2.0,
2.0,
],
'odds__interwetten__away_win__full_time_goals': [
2.5,
3.5,
3.5,
2.0,
3.0,
2.0,
2.0,
3.0,
],
'odds__williamhill__home_win__full_time_goals': [
2.5,
2.0,
4.0,
2.0,
2.5,
2.5,
2.0,
3.5,
],
'odds__williamhill__draw__full_time_goals': [
2.5,
np.nan,
np.nan,
np.nan,
2.5,
3.0,
2.0,
1.5,
],
'odds__williamhill__away_win__full_time_goals': [
np.nan,
np.nan,
np.nan,
np.nan,
3.0,
2.5,
2.0,
np.nan,
],
}
).set_index(
pd.Index(
[
|
pd.Timestamp('5/4/1997')
|
pandas.Timestamp
|
import os
import shutil
#import re
import sys
import platform
import subprocess
import numpy as np
import json
import pickle
import pandas as pd
from pandas import Series
import xml.etree.ElementTree as ET
import glob
import argparse
try:
import lvdb
except:
import pdb as lvdb
print('using pdb instead of lvdb')
pass
def ensure_dir_exists (datadir):
if not os.path.exists(datadir):
os.makedirs(datadir)
if not os.path.exists(datadir):
themessage = 'Directory {} could not be created.'.format(datadir)
if (int(platform.python_version()[0]) > 2):
raise NotADirectoryError(themessage)
else:
# python 2 doesn't have the impressive exception vocabulary 3 does
# so just raising a generic exception with a useful description
raise BaseException(themessage)
def rsync_the_file (from_location, to_location):
# Assuming that the responses for how platform.system() responds to
# different OSes given here are correct (though not assuming case):
# https://stackoverflow.com/questions/1854/python-what-os-am-i-running-on
if platform.system().lower() is 'windows':
print('Windows detected. The rsync command that is about to be', \
'executed assumes a Linux or Mac OS; no guarantee that it', \
'will work with Windows. Please be ready to transfer files', \
'via alternate means if necessary.')
subprocess.call(['rsync', '-vaPhz', from_location, to_location])
def df_to_pickle(thedf, thefilename):
thedf.to_pickle(thefilename);
def df_to_csv(thedf, thefilename):
thedf.to_csv(thefilename, index_label='index');
def df_to_json(thedf, thefilename):
thedf.to_json(thefilename, orient='records', double_precision = 10, force_ascii = True);
def glob2df(datadir, linecount, jobnum_list):
print(datadir)
thepaths = glob.iglob(datadir + '/*/')
results_dirs_used = []
df_list = []
progress_counter = 1000;
counter = 0;
for dirname in sorted(thepaths):
dirstructure = dirname.split('/')
lastdir = dirstructure[-1]
if '_job_' not in lastdir:
# handle trailing slash if present
lastdir = dirstructure[-2];
if '_job_' not in lastdir:
# something's wrong; skip this case
continue;
if '_task_' not in lastdir:
# something's wrong; skip this case
continue;
if 'latest' in lastdir:
continue;
filename = dirname + 'summary.csv'
if not os.path.isfile(filename):
print('No summary file at ', filename);
# no summary file means no results, unless results saved using a
# different mechanism, which is out of scope of this script
continue;
missionname = dirname + 'mission.xml'
if not os.path.isfile(missionname):
print('No mission file at ', missionname);
continue;
split_on_task = lastdir.split('_task_')
tasknum = int(split_on_task[-1])
jobnum = int(split_on_task[0].split('_job_',1)[1])
if jobnum_list and jobnum not in jobnum_list:
# lvdb.set_trace()
# print('Job {} not in list of jobs; skipping'.format(jobnum))
continue;
counter += 1;
if counter > progress_counter:
print('j ', jobnum, ', t ', tasknum)
counter = 0;
# thisjob_df = pd.DataFrame(index=range(1))
thisjob_df = pd.read_csv(filename)
if thisjob_df.empty:
# no actual content in df; maybe only header rows
continue;
# Add column to df for job number
thisjob_df['job_num']=jobnum
# and task number
thisjob_df['task_num']=tasknum
# and results directory
thisjob_df['results_dir']=lastdir
# add how many rows there are in the df so plot scripts know what to
# expect
thisjob_df['num_rows']=len(thisjob_df.index)
df_to_append = pd.DataFrame()
thisjob_params_df = xml_param_df_cols(missionname);
num_lines = len(thisjob_df.index)
if linecount > 0:
if num_lines < linecount:
continue;
df_to_append = pd.concat([thisjob_params_df]*num_lines, ignore_index=True);
if df_to_append.empty:
continue;
this_job_df = thisjob_df
if not df_to_append.empty:
this_job_df = pd.concat([thisjob_df, df_to_append], axis=1);
results_dirs_used.append(dirname);
# indexed_by_team_df = this_job_df.set_index(['team_id'])
# df_list.append(indexed_by_team_df)
df_list.append(this_job_df)
df = pd.concat(df_list)
print('df created for job ', jobnum)
return df, results_dirs_used;
def append_block(theblock, blk_name, nonetype_var):
thedf =
|
pd.DataFrame()
|
pandas.DataFrame
|
""" I/O functions of the aecg package: tools for annotated ECG HL7 XML files
This module implements helper functions to parse and read annotated
electrocardiogram (ECG) stored in XML files following HL7
specification.
See authors, license and disclaimer at the top level directory of this project.
"""
# Imports =====================================================================
from typing import Dict, Tuple
from lxml import etree
from aecg import validate_xpath, new_validation_row, VALICOLS, \
TIME_CODES, SEQUENCE_CODES, \
Aecg, AecgLead, AecgAnnotationSet
import copy
import logging
import pandas as pd
import re
import zipfile
# Python logging ==============================================================
logger = logging.getLogger(__name__)
def parse_annotations(xml_filename: str,
zip_filename: str,
aecg_doc: etree._ElementTree,
aecgannset: AecgAnnotationSet,
path_prefix: str,
annsset_xmlnode_path: str,
valgroup: str = "RHYTHM",
log_validation: bool = False) -> Tuple[
AecgAnnotationSet, pd.DataFrame]:
"""Parses `aecg_doc` XML document and extracts annotations
Args:
xml_filename (str): Filename of the aECG XML file.
zip_filename (str): Filename of zip file containint the aECG XML file.
If '', then xml file is not stored in a zip file.
aecg_doc (etree._ElementTree): XML document of the aECG XML file.
aecgannset (AecgAnnotationSet): Annotation set to which append found
annotations.
path_prefix (str): Prefix of xml path from which start searching for
annotations.
annsset_xmlnode_path (str): Path to xml node of the annotation set
containing the annotations.
valgroup (str, optional): Indicates whether to search annotations in
rhythm or derived waveform. Defaults to "RHYTHM".
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Tuple[AecgAnnotationSet, pd.DataFrame]: Annotation set updated with
found annotations and dataframe with results of validation.
"""
anngrpid = 0
# Annotations stored within a beat
beatnodes = aecg_doc.xpath((
path_prefix +
"/component/annotation/code[@code=\'MDC_ECG_BEAT\']").replace(
'/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'})
beatnum = 0
valpd = pd.DataFrame()
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {len(beatnodes)} annotated beats found')
for beatnode in beatnodes:
for rel_path in ["../component/annotation/"
"code[contains(@code, \"MDC_ECG_\")]"]:
annsnodes = beatnode.xpath(rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotation code
valrow2 = validate_xpath(
annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename, valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame(
[valrow2], columns=VALICOLS), ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
# Annotations type
valrow2 = validate_xpath(
annsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path + \
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
subannsnodes = annsnode.xpath(
rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
else:
subannsnodes += [annsnode]
# Exclude annotations reporting interval values only
subannsnodes = [
sa for sa in subannsnodes
if not sa.get("code").startswith("MDC_ECG_TIME_PD_")]
for subannsnode in subannsnodes:
# Annotations type
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
rel_path3 = "../support/supportingROI/component/"\
"boundary/value"
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/"\
"boundary/code"
roinodes = subannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(
roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
# Annotations type
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path +\
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/" \
"boundary/code"
roinodes = annsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4],
columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
anngrpid = anngrpid + 1
beatnum = beatnum + 1
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {beatnum} annotated beats and {anngrpid} '
f'annotations groups found')
anngrpid_from_beats = anngrpid
# Annotations stored without an associated beat
for codetype_path in ["/component/annotation/code["
"(contains(@code, \"MDC_ECG_\") and"
" not (@code=\'MDC_ECG_BEAT\'))]"]:
annsnodes = aecg_doc.xpath(
(path_prefix + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotations code
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
subannsnodes = annsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
for subannsnode in subannsnodes:
subsubannsnodes = subannsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
tmpnodes = [subannsnode]
if len(subsubannsnodes) > 0:
tmpnodes = tmpnodes + subsubannsnodes
for subsubannsnode in tmpnodes:
ann["wavecomponent"] = ""
ann["wavecomponent2"] = ""
ann["timecode"] = ""
ann["value"] = ""
ann["value_unit"] = ""
ann["low"] = ""
ann["low_unit"] = ""
ann["high"] = ""
ann["high_unit"] = ""
roi_base = "../support/supportingROI/component/boundary"
rel_path3 = roi_base + "/value"
valrow2 = validate_xpath(
subsubannsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/code"
if valrow2["VALIOUT"] == "PASSED":
if not ann["codetype"].endswith("WAVE"):
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations type
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
# if ann["wavecomponent"] == "":
# ann["wavecomponent"] = valrow2["VALUE"]
# else:
# ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value as attribute
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
else:
roi_base = "../component/annotation/support/"\
"supportingROI/component/boundary"
# Annotations type
valrow2 = validate_xpath(subsubannsnode,
"../component/annotation/"
"value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + \
"../component/annotation/value"
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotation values
if n != "":
rp = roi_base + "/value/" + n
else:
rp = roi_base + "/value"
valrow3 = validate_xpath(subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT"
"_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used by
# value and supporting ROI
for rel_path4 in ["../support/supportingROI/component/"
"boundary",
"../component/annotation/support/"
"supportingROI/component/boundary"]:
roinodes = subsubannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
"./code",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
anngrpid = anngrpid + 1
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {anngrpid-anngrpid_from_beats} annotations groups'
f' without an associated beat found')
return aecgannset, valpd
def parse_generalinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts general information
This function parses the `aecg_doc` xml document searching for general
information that includes in the returned `Aecg`: unique identifier (UUID),
ECG date and time of collection (EGDTC), and device information.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# UUID
# =======================================
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"root",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID found: {valrow["VALUE"]}')
aecg.UUID = valrow["VALUE"]
else:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID not found')
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"extension",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension found: {valrow["VALUE"]}')
aecg.UUID += valrow["VALUE"]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID updated to: {aecg.UUID}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension not found')
# =======================================
# EGDTC
# =======================================
valpd = pd.DataFrame()
egdtc_found = False
for n in ["low", "center", "high"]:
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"effectiveTime\"]/"
"*[local-name() = \"" + n + "\"]",
"",
"value",
new_validation_row(aecg.filename, "GENERAL",
"EGDTC_" + n),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
egdtc_found = True
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC {n} found: {valrow["VALUE"]}')
aecg.EGDTC[n] = valrow["VALUE"]
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if not egdtc_found:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# DEVICE
# =======================================
# DEVICE = {"manufacturer": "", "model": "", "software": ""}
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturerOrganization/name",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_manufacturer"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer found: {tmp}')
aecg.DEVICE["manufacturer"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"manufacturerModelName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_model"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model found: {tmp}')
aecg.DEVICE["model"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"softwareName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_software"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software found: {tmp}')
aecg.DEVICE["software"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_subjectinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for subject
information that includes in the returned `Aecg`: subject unique identifier
(USUBJID), gender, birthtime, and race.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# USUBJID
# =======================================
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"SUBJECTINFO",
"USUBJID_" + n))
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} found: {valrow["VALUE"]}')
aecg.USUBJID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if (aecg.USUBJID["root"] == "") and (aecg.USUBJID["extension"] == ""):
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID cannot be established.')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# SEX / GENDER
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/"
"administrativeGenderCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"SEX"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX found: {valrow["VALUE"]}')
aecg.SEX = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# BIRTHTIME
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/birthTime",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "SUBJECTINFO",
"BIRTHTIME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME found.')
aecg.BIRTHTIME = valrow["VALUE"]
# age_in_years = aecg.subject_age_in_years()
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# RACE
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/raceCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"RACE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE found: {valrow["VALUE"]}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE not found')
aecg.RACE = valrow["VALUE"]
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_trtainfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for treatment
information that includes in the returned `Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/definition/"
"treatmentGroupAssignment/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TRTA"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information found: {valrow["VALUE"]}')
aecg.TRTA = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_studyinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts study information
This function parses the `aecg_doc` xml document searching for study
information that includes in the returned `Aecg`: study unique identifier
(STUDYID), and study title.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"STUDYID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} found: {valrow["VALUE"]}')
aecg.STUDYID[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/title",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "STUDYINFO",
"STUDYTITLE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE found: {tmp}')
aecg.STUDYTITLE = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_timepoints(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts timepoints information
This function parses the `aecg_doc` xml document searching for timepoints
information that includes in the returned `Aecg`: absolute timepoint or
study event information (TPT), relative timepoint or study event relative
to a reference event (RTPT), and protocol timepoint information (PTPT).
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# TPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/code",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/reasonCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TPT_reasonCode"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode found: {valrow["VALUE"]}')
aecg.TPT["reasonCode"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/"
"effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
# =======================================
# RTPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename,
"STUDYINFO",
"RTPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT {n} found: {valrow["VALUE"]}')
aecg.RTPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"pauseQuantity",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "STUDYINFO",
"RTPT_pauseQuantity"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity value found: {valrow["VALUE"]}')
aecg.RTPT["pauseQuantity"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity value not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"pauseQuantity",
"urn:hl7-org:v3",
"unit",
new_validation_row(aecg.filename, "STUDYINFO",
"RTPT_pauseQuantity_unit"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity unit found: {valrow["VALUE"]}')
aecg.RTPT["pauseQuantity_unit"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity unit not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
# =======================================
# PTPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/"
"componentOf/protocolTimepointEvent/code",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"PTPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT {n} found: {valrow["VALUE"]}')
aecg.PTPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"protocolTimepointEvent/component/"
"referenceEvent/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"PTPT_referenceEvent"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent code found: {valrow["VALUE"]}')
aecg.PTPT["referenceEvent"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent code not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"protocolTimepointEvent/component/"
"referenceEvent/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "STUDYINFO",
"PTPT_referenceEvent_"
"displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent displayName found: '
f'{valrow["VALUE"]}')
aecg.PTPT["referenceEvent_displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent displayName not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
return aecg
def parse_rhythm_waveform_info(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts rhythm waveform information
This function parses the `aecg_doc` xml document searching for rhythm
waveform information that includes in the returned `Aecg`: waveform
identifier, code, display name, and date and time of collection.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./component/series/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "RHYTHM",
"ID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} found: {valrow["VALUE"]}')
aecg.RHYTHMID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "RHYTHM",
"CODE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM code found: {valrow["VALUE"]}')
aecg.RHYTHMCODE["code"] = valrow["VALUE"]
if aecg.RHYTHMCODE["code"] != "RHYTHM":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM unexpected code found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected value found"
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "RHYTHM",
"CODE_displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM displayName found: {valrow["VALUE"]}')
aecg.RHYTHMCODE["displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM displayName not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./component/series/effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "RHYTHM",
"EGDTC_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHMEGDTC {n} found: {valrow["VALUE"]}')
aecg.RHYTHMEGDTC[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHMEGDTC {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_derived_waveform_info(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts derived waveform information
This function parses the `aecg_doc` xml document searching for derived
waveform information that includes in the returned `Aecg`: waveform
identifier, code, display name, and date and time of collection.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"ID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} found: {valrow["VALUE"]}')
aecg.DERIVEDID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "DERIVED",
"CODE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED code found: {valrow["VALUE"]}')
aecg.DERIVEDCODE["code"] = valrow["VALUE"]
if aecg.DERIVEDCODE["code"] != "REPRESENTATIVE_BEAT":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED unexpected code found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected value found"
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "DERIVED",
"CODE_displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED displayName found: {valrow["VALUE"]}')
aecg.DERIVEDCODE["displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED displayName not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "DERIVED",
"EGDTC_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVEDEGDTC {n} found: {valrow["VALUE"]}')
aecg.DERIVEDEGDTC[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVEDEGDTC {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_rhythm_waveform_timeseries(aecg_doc: etree._ElementTree,
aecg: Aecg,
include_digits: bool = False,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts rhythm's timeseries
This function parses the `aecg_doc` xml document searching for rhythm
waveform timeseries (sequences) information that includes in the returned
:any:`Aecg`. Each found sequence is stored as an :any:`AecgLead` in the
:any:`Aecg.RHYTHMLEADS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
include_digits (bool, optional): Indicates whether to include the
digits information in the returned `Aecg`.
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
path_prefix = './component/series/component/sequenceSet/' \
'component/sequence'
seqnodes = aecg_doc.xpath((path_prefix + '/code').replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(seqnodes) > 0:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet(s) found: '
f'{len(seqnodes)} sequenceSet nodes')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet not found')
for xmlnode in seqnodes:
xmlnode_path = aecg_doc.getpath(xmlnode)
valrow = validate_xpath(aecg_doc,
xmlnode_path,
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "RHYTHM",
"SEQUENCE_CODE"),
failcat="WARNING")
valpd = pd.DataFrame()
if valrow["VALIOUT"] == "PASSED":
if not valrow["VALUE"] in SEQUENCE_CODES:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM unexpected sequenceSet code '
f'found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected sequence code found"
if valrow["VALUE"] in TIME_CODES:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code found: {valrow["VALUE"]}')
aecg.RHYTHMTIME["code"] = valrow["VALUE"]
# Retrieve time head info from value node
rel_path = "../value/head"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"value",
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_TIME_HEAD"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_HEAD found: {valrow2["VALUE"]}')
aecg.RHYTHMTIME["head"] = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_HEAD not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrieve time increment info from value node
rel_path = "../value/increment"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_TIME_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_{n} found: '
f'{valrow2["VALUE"]}')
if n == "value":
aecg.RHYTHMTIME["increment"] = float(
valrow2["VALUE"])
else:
aecg.RHYTHMTIME[n] = valrow2["VALUE"]
if log_validation:
valpd = \
valpd.append(pd.DataFrame([valrow2],
columns=VALICOLS),
ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code found: '
f'{valrow["VALUE"]}')
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'LEADNAME from RHYTHM sequenceSet code: '
f'{valrow["VALUE"]}')
# Assume is a lead
aecglead = AecgLead()
aecglead.leadname = valrow["VALUE"]
# Inherit last parsed RHYTHMTIME
aecglead.LEADTIME = copy.deepcopy(aecg.RHYTHMTIME)
# Retrive lead origin info
rel_path = "../value/origin"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM",
"SEQUENCE_LEAD_ORIGIN_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_ORIGIN_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.origin = float(valrow2["VALUE"])
except Exception as ex:
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_"\
"ORIGIN is not a "\
"number"
else:
aecglead.origin_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_ORIGIN_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrive lead scale info
rel_path = "../value/scale"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM",
"SEQUENCE_LEAD_SCALE_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.scale = float(valrow2["VALUE"])
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE '
f'value is not a valid number: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_"\
"SCALE is not a "\
"number"
else:
aecglead.scale_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Include digits if requested
if include_digits:
rel_path = "../value/digits"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"",
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_LEAD_DIGITS"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
try:
# Convert string of digits to list of integers
# remove new lines
sdigits = valrow2["VALUE"].replace("\n", " ")
# remove carriage retruns
sdigits = sdigits.replace("\r", " ")
# remove tabs
sdigits = sdigits.replace("\t", " ")
# collapse 2 or more spaces into 1 space char
# and remove leading/trailing white spaces
sdigits = re.sub("\\s+", " ", sdigits).strip()
# Convert string into list of integers
aecglead.digits = [int(s) for s in
sdigits.split(' ')]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS added to lead'
f' {aecglead.leadname} (n: '
f'{len(aecglead.digits)})')
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'Error parsing DIGITS from '
f'string to list of integers: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "Error parsing SEQUENCE_"\
"LEAD_DIGITS from string"\
" to list of integers"
else:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS not found for lead {aecglead.leadname}')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS were not requested by the user')
aecg.RHYTHMLEADS.append(copy.deepcopy(aecglead))
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if valpd.shape[0] > 0:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_derived_waveform_timeseries(aecg_doc: etree._ElementTree,
aecg: Aecg,
include_digits: bool = False,
log_validation: bool = False):
"""Parses `aecg_doc` XML document and extracts derived's timeseries
This function parses the `aecg_doc` xml document searching for derived
waveform timeseries (sequences) information that includes in the returned
:any:`Aecg`. Each found sequence is stored as an :any:`AecgLead` in the
:any:`Aecg.DERIVEDLEADS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
include_digits (bool, optional): Indicates whether to include the
digits information in the returned `Aecg`.
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
path_prefix = './component/series/derivation/derivedSeries/component'\
'/sequenceSet/component/sequence'
seqnodes = aecg_doc.xpath((path_prefix + '/code').replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(seqnodes) > 0:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet(s) found: '
f'{len(seqnodes)} sequenceSet nodes')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet not found')
for xmlnode in seqnodes:
xmlnode_path = aecg_doc.getpath(xmlnode)
valrow = validate_xpath(aecg_doc,
xmlnode_path,
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_CODE"),
failcat="WARNING")
valpd = pd.DataFrame()
if valrow["VALIOUT"] == "PASSED":
if not valrow["VALUE"] in SEQUENCE_CODES:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED unexpected sequenceSet code '
f'found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected sequence code found"
if valrow["VALUE"] in TIME_CODES:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet code found: {valrow["VALUE"]}')
aecg.DERIVEDTIME["code"] = valrow["VALUE"]
# Retrieve time head info from value node
rel_path = "../value/head"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_TIME_HEAD"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_TIME_HEAD found: '
f'{valrow2["VALUE"]}')
aecg.DERIVEDTIME["head"] = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_TIME_HEAD not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrieve time increment info from value node
rel_path = "../value/increment"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_TIME_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_TIME_{n} found: '
f'{valrow2["VALUE"]}')
if n == "value":
aecg.DERIVEDTIME["increment"] =\
float(valrow2["VALUE"])
else:
aecg.DERIVEDTIME[n] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet code found: {valrow["VALUE"]}')
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'LEADNAME from DERIVED sequenceSet code: '
f'{valrow["VALUE"]}')
# Assume is a lead
aecglead = AecgLead()
aecglead.leadname = valrow["VALUE"]
# Inherit last parsed DERIVEDTIME
aecglead.LEADTIME = copy.deepcopy(aecg.DERIVEDTIME)
# Retrive lead origin info
rel_path = "../value/origin"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_LEAD_ORIGIN_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_ORIGIN_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.origin = float(valrow2["VALUE"])
except Exception as ex:
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = \
"SEQUENCE_LEAD_ORIGIN is not a number"
else:
aecglead.origin_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_ORIGIN_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrive lead scale info
rel_path = "../value/scale"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_LEAD_SCALE_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_SCALE_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.scale = float(valrow2["VALUE"])
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_SCALE'
f' value is not a valid number: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_SCALE"\
" is not a number"
else:
aecglead.scale_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_SCALE_{n} not found')
if log_validation:
valpd = valpd.append(
|
pd.DataFrame([valrow2], columns=VALICOLS)
|
pandas.DataFrame
|
from selenium import webdriver
import pandas
from flask import Flask, render_template
driver = webdriver.Chrome()
quotesList = []
author = []
tags = []
for i in range(1, 11):
url = 'http://quotes.toscrape.com/js/page/{}'.format(i)
driver.get(url)
quotes = driver.find_elements_by_class_name('quote')
for quote in quotes:
quoteText = quote.find_element_by_class_name('text').text # Individual quote 'element' not 'elemnts'
author = quote.find_element_by_class_name('author').text # for "find_element_by_class_name"
tags = quote.find_element_by_class_name('tags').text
# To remove special characters
quoteText = quoteText.replace("“", "")
quoteText = quoteText.replace("”", "")
# Another method of converting to .cxv file also less memory
OneQuote = (quoteText, author, tags)
quotesList.append(OneQuote)
df =
|
pandas.DataFrame(quotesList, columns=['Quote', 'Author', 'Tags'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 21 11:44:20 2019
@author: tanma
"""
import pandas as pd, numpy as np
from sklearn.preprocessing import StandardScaler
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from keras.layers import Input, SpatialDropout1D, GRU, LSTM,Conv1D, concatenate, Dense
from keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, Bidirectional
from keras.layers import CuDNNLSTM, CuDNNGRU
from bearing_cal import calculate_initial_compass_bearing as cal
from geographiclib.geodesic import Geodesic
import matplotlib.pyplot as plt
id_ = 30
pnew = pd.read_csv('no.csv')
pnew = pnew[pnew.track_id == id_]
copy = pnew.drop(['time','track_id','Unnamed: 0'],axis = 1)
pnew = pnew.drop(['time','track_id','Unnamed: 0','longitude','latitude'],axis = 1)
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
agg =
|
pd.concat(cols, axis=1)
|
pandas.concat
|
#%%
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pandas as pd
import seaborn as sns
import phd.viz
import phd.stats
import pickle
colors, palette = phd.viz.phd_style()
constants = phd.thermo.load_constants()
# Load the data set
data = pd.read_csv('../../data/ch2_induction/RazoMejia_2018.csv', comment='#')
# Load the flatchains for the prediction measurements.
with open('../../data/ch2_induction/mcmc/SI_I_O2_R260.pkl', 'rb') as file:
unpickler = pickle.Unpickler(file)
gauss_flatchain = unpickler.load()
gauss_flatlnprobability = unpickler.load()
ka_fc = np.exp(-gauss_flatchain[:, 0])[::100]
ki_fc = np.exp(-gauss_flatchain[:, 1])[::100]
#%%
# Compute the theoretical property curves.
rep_range = np.logspace(0, 4, 200)
prop_df =
|
pd.DataFrame([])
|
pandas.DataFrame
|
from src.prime_system import PrimeSystem
import pytest
import pandas as pd
import numpy as np
import numpy.testing
L = 100
rho = 1025
@pytest.fixture
def ps():
yield PrimeSystem(L=L,rho=rho)
def test_dict_prime(ps):
length = 10
values = {
'length' : length,
}
units = {
'length' : 'length',
}
values_prime = ps.prime(values=values, units=units)
assert values_prime['length'] == length/L
def test_dict_unprime(ps):
length = 10
values_prime = {
'length' : length/L,
}
units = {
'length' : 'length',
}
values = ps.unprime(values=values_prime, units=units)
assert values['length'] == length
def test_df_prime(ps):
length = np.ones(10)*10
values =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from builtins import zip
from past.builtins import basestring
import unicodecsv as csv
import itertools
import re
import subprocess
import time
from tempfile import NamedTemporaryFile
import hive_metastore
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.helpers import as_flattened_list
from airflow.utils.file import TemporaryDirectory
from airflow import configuration
import airflow.security.utils as utils
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: string
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: string
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: string
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if configuration.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/_HOST@EXAMPLE.<EMAIL>")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = jdbc_url.format(**locals())
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
def _prepare_hiveconf(self, d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
itertools.izip(
["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()]
)
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
hive_conf_params = self._prepare_hiveconf(hive_conf)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue)])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
self.log.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
self.log.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
self.log.info("Testing HQL [%s (...)]", query_preview)
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
self.log.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
l = int(error_loc.group(1))
begin = max(l-2, 0)
end = min(l+3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
self.log.info("Context :\n %s", context)
else:
self.log.info("SUCCESS")
def load_df(
self,
df,
table,
create=True,
recreate=False,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param field_dict: mapping from column name to hive data type
:type field_dict: dict
:param encoding: string encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
return dict((col, DTYPE_KIND_HIVE_TYPE[dtype.kind]) for col, dtype in df.dtypes.iteritems())
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
if field_dict is None and (create or recreate):
field_dict = _infer_field_types_from_df(df)
df.to_csv(f, sep=delimiter, **pandas_kwargs)
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values
:type field_dict: dict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n"
hql += ";"
hql = hql.format(**locals())
self.log.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
self.log.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.get('core', 'security') == 'kerberos' and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
self.metastore._oprot.trans.open()
try:
self.metastore.get_partition_by_name(
schema, table, partition_name)
return True
except hive_metastore.ttypes.NoSuchObjectException:
return False
finally:
self.metastore._oprot.trans.close()
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
"""
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
"""
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
t = self.get_table(table_name, db)
return True
except Exception as e:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the impyla library
Note that the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI as in
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'PLAIN')
kerberos_service_name = None
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# impyla uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'KERBEROS':
self.log.warning(
"Detected deprecated 'KERBEROS' for authMechanism for %s. Please use 'GSSAPI' instead",
self.hiveserver2_conn_id
)
auth_mechanism = 'GSSAPI'
from impala.dbapi import connect
return connect(
host=db.host,
port=db.port,
auth_mechanism=auth_mechanism,
kerberos_service_name=kerberos_service_name,
user=db.login,
database=schema or db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
from impala.error import ProgrammingError
with self.get_conn(schema) as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
cur = conn.cursor()
for statement in hql:
cur.execute(statement)
records = []
try:
# impala Lib raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
records = cur.fetchall()
except ProgrammingError:
self.log.debug("get_results returned no records")
if records:
results = {
'data': records,
'header': cur.description,
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000):
schema = schema or 'default'
with self.get_conn(schema) as conn:
with conn.cursor() as cur:
self.log.info("Running query: %s", hql)
cur.execute(hql)
schema = cur.description
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
if output_header:
writer.writerow([c[0] for c in cur.description])
i = 0
while True:
rows = [row for row in cur.fetchmany(fetch_size) if row]
if not rows:
break
writer.writerows(rows)
i += len(rows)
self.log.info("Written %s rows so far.", i)
self.log.info("Done. Loaded a total of %s rows.", i)
def get_records(self, hql, schema='default'):
"""
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df =
|
pd.DataFrame(res['data'])
|
pandas.DataFrame
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from functools import partial
from typing import Any, Callable, Iterator, List, Optional, Tuple, Union, cast, no_type_check
import warnings
import pandas as pd
import numpy as np
from pandas.api.types import (
is_list_like,
is_interval_dtype,
is_bool_dtype,
is_categorical_dtype,
is_integer_dtype,
is_float_dtype,
is_numeric_dtype,
is_object_dtype,
)
from pandas.core.accessor import CachedAccessor
from pandas.io.formats.printing import pprint_thing
from pandas.api.types import CategoricalDtype, is_hashable
from pandas._libs import lib
from pyspark.sql import functions as F, Column
from pyspark.sql.types import FractionalType, IntegralType, TimestampType, TimestampNTZType
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Dtype, Label, Name, Scalar
from pyspark.pandas.config import get_option, option_context
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.missing.indexes import MissingPandasLikeIndex
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.spark.accessors import SparkIndexMethods
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
same_anchor,
scol_for,
verify_temp_column_name,
validate_bool_kwarg,
ERROR_MESSAGE_CANNOT_COMBINE,
log_advice,
)
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
DEFAULT_SERIES_NAME,
SPARK_DEFAULT_INDEX_NAME,
SPARK_INDEX_NAME_FORMAT,
)
class Index(IndexOpsMixin):
"""
pandas-on-Spark Index that corresponds to pandas Index logically. This might hold Spark Column
internally.
Parameters
----------
data : array-like (1-dimensional)
dtype : dtype, default None
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible.
See Also
--------
MultiIndex : A multi-level, or hierarchical, Index.
DatetimeIndex : Index of datetime64 data.
Int64Index : A special case of :class:`Index` with purely integer labels.
Float64Index : A special case of :class:`Index` with purely float labels.
Examples
--------
>>> ps.DataFrame({'a': ['a', 'b', 'c']}, index=[1, 2, 3]).index
Int64Index([1, 2, 3], dtype='int64')
>>> ps.DataFrame({'a': [1, 2, 3]}, index=list('abc')).index
Index(['a', 'b', 'c'], dtype='object')
>>> ps.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> ps.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
From a Series:
>>> s = ps.Series([1, 2, 3], index=[10, 20, 30])
>>> ps.Index(s)
Int64Index([1, 2, 3], dtype='int64')
From an Index:
>>> idx = ps.Index([1, 2, 3])
>>> ps.Index(idx)
Int64Index([1, 2, 3], dtype='int64')
"""
def __new__(
cls,
data: Optional[Any] = None,
dtype: Optional[Union[str, Dtype]] = None,
copy: bool = False,
name: Optional[Name] = None,
tupleize_cols: bool = True,
**kwargs: Any
) -> "Index":
if not is_hashable(name):
raise TypeError("Index.name must be a hashable type")
if isinstance(data, Series):
if dtype is not None:
data = data.astype(dtype)
if name is not None:
data = data.rename(name)
internal = InternalFrame(
spark_frame=data._internal.spark_frame,
index_spark_columns=data._internal.data_spark_columns,
index_names=data._internal.column_labels,
index_fields=data._internal.data_fields,
column_labels=[],
data_spark_columns=[],
data_fields=[],
)
return DataFrame(internal).index
elif isinstance(data, Index):
if copy:
data = data.copy()
if dtype is not None:
data = data.astype(dtype)
if name is not None:
data = data.rename(name)
return data
return cast(
Index,
ps.from_pandas(
pd.Index(
data=data,
dtype=dtype,
copy=copy,
name=name,
tupleize_cols=tupleize_cols,
**kwargs
)
),
)
@staticmethod
def _new_instance(anchor: DataFrame) -> "Index":
from pyspark.pandas.indexes.category import CategoricalIndex
from pyspark.pandas.indexes.datetimes import DatetimeIndex
from pyspark.pandas.indexes.multi import MultiIndex
from pyspark.pandas.indexes.numeric import Float64Index, Int64Index
instance: Index
if anchor._internal.index_level > 1:
instance = object.__new__(MultiIndex)
elif isinstance(anchor._internal.index_fields[0].dtype, CategoricalDtype):
instance = object.__new__(CategoricalIndex)
elif isinstance(
anchor._internal.spark_type_for(anchor._internal.index_spark_columns[0]), IntegralType
):
instance = object.__new__(Int64Index)
elif isinstance(
anchor._internal.spark_type_for(anchor._internal.index_spark_columns[0]), FractionalType
):
instance = object.__new__(Float64Index)
elif isinstance(
anchor._internal.spark_type_for(anchor._internal.index_spark_columns[0]),
(TimestampType, TimestampNTZType),
):
instance = object.__new__(DatetimeIndex)
else:
instance = object.__new__(Index)
instance._anchor = anchor # type: ignore[attr-defined]
return instance
@property
def _psdf(self) -> DataFrame:
return self._anchor
@property
def _internal(self) -> InternalFrame:
internal = self._psdf._internal
return internal.copy(
column_labels=internal.index_names,
data_spark_columns=internal.index_spark_columns,
data_fields=internal.index_fields,
column_label_names=None,
)
@property
def _column_label(self) -> Optional[Label]:
return self._psdf._internal.index_names[0]
def _with_new_scol(self, scol: Column, *, field: Optional[InternalField] = None) -> "Index":
"""
Copy pandas-on-Spark Index with the new Spark Column.
:param scol: the new Spark Column
:return: the copied Index
"""
internal = self._internal.copy(
index_spark_columns=[scol.alias(SPARK_DEFAULT_INDEX_NAME)],
index_fields=[
field
if field is None or field.struct_field is None
else field.copy(name=SPARK_DEFAULT_INDEX_NAME)
],
column_labels=[],
data_spark_columns=[],
data_fields=[],
)
return DataFrame(internal).index
spark = CachedAccessor("spark", SparkIndexMethods)
# This method is used via `DataFrame.info` API internally.
def _summary(self, name: Optional[str] = None) -> str:
"""
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
head, tail, total_count = tuple(
cast(
pd.DataFrame,
self._internal.spark_frame.select(
F.first(self.spark.column), F.last(self.spark.column), F.count(F.expr("*"))
).toPandas(),
).iloc[0]
)
if total_count > 0:
index_summary = ", %s to %s" % (pprint_thing(head), pprint_thing(tail))
else:
index_summary = ""
if name is None:
name = type(self).__name__
return "%s: %s entries%s" % (name, total_count, index_summary)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'],
... index=list('abcd'))
>>> df.index.size
4
>>> df.set_index('dogs', append=True).index.size
4
"""
return len(self)
@property
def shape(self) -> tuple:
"""
Return a tuple of the shape of the underlying data.
Examples
--------
>>> idx = ps.Index(['a', 'b', 'c'])
>>> idx
Index(['a', 'b', 'c'], dtype='object')
>>> idx.shape
(3,)
>>> midx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> midx # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y'),
('c', 'z')],
)
>>> midx.shape
(3,)
"""
return (len(self._psdf),)
def identical(self, other: "Index") -> bool:
"""
Similar to equals, but check that other comparable attributes are
also equal.
Returns
-------
bool
If two Index objects have equal elements and same type True,
otherwise False.
Examples
--------
>>> from pyspark.pandas.config import option_context
>>> idx = ps.Index(['a', 'b', 'c'])
>>> midx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
For Index
>>> idx.identical(idx)
True
>>> with option_context('compute.ops_on_diff_frames', True):
... idx.identical(ps.Index(['a', 'b', 'c']))
True
>>> with option_context('compute.ops_on_diff_frames', True):
... idx.identical(ps.Index(['b', 'b', 'a']))
False
>>> idx.identical(midx)
False
For MultiIndex
>>> midx.identical(midx)
True
>>> with option_context('compute.ops_on_diff_frames', True):
... midx.identical(ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]))
True
>>> with option_context('compute.ops_on_diff_frames', True):
... midx.identical(ps.MultiIndex.from_tuples([('c', 'z'), ('b', 'y'), ('a', 'x')]))
False
>>> midx.identical(idx)
False
"""
from pyspark.pandas.indexes.multi import MultiIndex
self_name = self.names if isinstance(self, MultiIndex) else self.name
other_name = other.names if isinstance(other, MultiIndex) else other.name
return (
self_name == other_name # to support non-index comparison by short-circuiting.
and self.equals(other)
)
def equals(self, other: "Index") -> bool:
"""
Determine if two Index objects contain the same elements.
Returns
-------
bool
True if "other" is an Index and it has the same elements as calling
index; False otherwise.
Examples
--------
>>> from pyspark.pandas.config import option_context
>>> idx = ps.Index(['a', 'b', 'c'])
>>> idx.name = "name"
>>> midx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> midx.names = ("nameA", "nameB")
For Index
>>> idx.equals(idx)
True
>>> with option_context('compute.ops_on_diff_frames', True):
... idx.equals(ps.Index(['a', 'b', 'c']))
True
>>> with option_context('compute.ops_on_diff_frames', True):
... idx.equals(ps.Index(['b', 'b', 'a']))
False
>>> idx.equals(midx)
False
For MultiIndex
>>> midx.equals(midx)
True
>>> with option_context('compute.ops_on_diff_frames', True):
... midx.equals(ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]))
True
>>> with option_context('compute.ops_on_diff_frames', True):
... midx.equals(ps.MultiIndex.from_tuples([('c', 'z'), ('b', 'y'), ('a', 'x')]))
False
>>> midx.equals(idx)
False
"""
if same_anchor(self, other):
return True
elif type(self) == type(other):
if get_option("compute.ops_on_diff_frames"):
# TODO: avoid using default index?
with option_context("compute.default_index_type", "distributed-sequence"):
# Directly using Series from both self and other seems causing
# some exceptions when 'compute.ops_on_diff_frames' is enabled.
# Working around for now via using frame.
return (
cast(Series, self.to_series("self").reset_index(drop=True))
== cast(Series, other.to_series("other").reset_index(drop=True))
).all()
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
else:
return False
def transpose(self) -> "Index":
"""
Return the transpose, For index, It will be index itself.
Examples
--------
>>> idx = ps.Index(['a', 'b', 'c'])
>>> idx
Index(['a', 'b', 'c'], dtype='object')
>>> idx.transpose()
Index(['a', 'b', 'c'], dtype='object')
For MultiIndex
>>> midx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> midx # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y'),
('c', 'z')],
)
>>> midx.transpose() # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y'),
('c', 'z')],
)
"""
return self
T = property(transpose)
def _to_internal_pandas(self) -> pd.Index:
"""
Return a pandas Index directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._psdf._internal.to_pandas_frame.index
def to_pandas(self) -> pd.Index:
"""
Return a pandas Index.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'],
... index=list('abcd'))
>>> df['dogs'].index.to_pandas()
Index(['a', 'b', 'c', 'd'], dtype='object')
"""
log_advice(
"`to_pandas` loads all data into the driver's memory. "
"It should only be used if the resulting pandas Index is expected to be small."
)
return self._to_internal_pandas().copy()
def to_numpy(self, dtype: Optional[Union[str, Dtype]] = None, copy: bool = False) -> np.ndarray:
"""
A NumPy ndarray representing the values in this Index or MultiIndex.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.Series([1, 2, 3, 4]).index.to_numpy()
array([0, 1, 2, 3])
>>> ps.DataFrame({'a': ['a', 'b', 'c']}, index=[[1, 2, 3], [4, 5, 6]]).index.to_numpy()
array([(1, 4), (2, 5), (3, 6)], dtype=object)
"""
log_advice(
"`to_numpy` loads all data into the driver's memory. "
"It should only be used if the resulting NumPy ndarray is expected to be small."
)
result = np.asarray(self._to_internal_pandas()._values, dtype=dtype)
if copy:
result = result.copy()
return result
def map(
self, mapper: Union[dict, Callable[[Any], Any], pd.Series], na_action: Optional[str] = None
) -> "Index":
"""
Map values using input correspondence (a dict, Series, or function).
Parameters
----------
mapper : function, dict, or pd.Series
Mapping correspondence.
na_action : {None, 'ignore'}
If ‘ignore’, propagate NA values, without passing them to the mapping correspondence.
Returns
-------
applied : Index, inferred
The output of the mapping function applied to the index.
Examples
--------
>>> psidx = ps.Index([1, 2, 3])
>>> psidx.map({1: "one", 2: "two", 3: "three"})
Index(['one', 'two', 'three'], dtype='object')
>>> psidx.map(lambda id: "{id} + 1".format(id=id))
Index(['1 + 1', '2 + 1', '3 + 1'], dtype='object')
>>> pser = pd.Series(["one", "two", "three"], index=[1, 2, 3])
>>> psidx.map(pser)
Index(['one', 'two', 'three'], dtype='object')
"""
if isinstance(mapper, dict):
if len(set(type(k) for k in mapper.values())) > 1:
raise TypeError(
"If the mapper is a dictionary, its values must be of the same type"
)
return Index(
self.to_series().pandas_on_spark.transform_batch(
lambda pser: pser.map(mapper, na_action)
)
).rename(self.name)
@property
def values(self) -> np.ndarray:
"""
Return an array representing the data in the Index.
.. warning:: We recommend using `Index.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.Series([1, 2, 3, 4]).index.values
array([0, 1, 2, 3])
>>> ps.DataFrame({'a': ['a', 'b', 'c']}, index=[[1, 2, 3], [4, 5, 6]]).index.values
array([(1, 4), (2, 5), (3, 6)], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
@property
def asi8(self) -> np.ndarray:
"""
Integer representation of the values.
.. warning:: We recommend using `Index.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
An ndarray with int64 dtype.
Examples
--------
>>> ps.Index([1, 2, 3]).asi8
array([1, 2, 3])
Returns None for non-int64 dtype
>>> ps.Index(['a', 'b', 'c']).asi8 is None
True
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
if isinstance(self.spark.data_type, IntegralType):
return self.to_numpy()
elif isinstance(self.spark.data_type, (TimestampType, TimestampNTZType)):
return np.array(list(map(lambda x: x.astype(np.int64), self.to_numpy())))
else:
return None
@property
def has_duplicates(self) -> bool:
"""
If index has duplicates, return True, otherwise False.
Examples
--------
>>> idx = ps.Index([1, 5, 7, 7])
>>> idx.has_duplicates
True
>>> idx = ps.Index([1, 5, 7])
>>> idx.has_duplicates
False
>>> idx = ps.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"])
>>> idx.has_duplicates
True
>>> idx = ps.Index(["Orange", "Apple",
... "Watermelon"])
>>> idx.has_duplicates
False
"""
sdf = self._internal.spark_frame.select(self.spark.column)
scol = scol_for(sdf, sdf.columns[0])
return sdf.select(F.count(scol) != F.countDistinct(scol)).first()[0]
@property
def is_unique(self) -> bool:
"""
Return if the index has unique values.
Examples
--------
>>> idx = ps.Index([1, 5, 7, 7])
>>> idx.is_unique
False
>>> idx = ps.Index([1, 5, 7])
>>> idx.is_unique
True
>>> idx = ps.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"])
>>> idx.is_unique
False
>>> idx = ps.Index(["Orange", "Apple",
... "Watermelon"])
>>> idx.is_unique
True
"""
return not self.has_duplicates
@property
def name(self) -> Name:
"""Return name of the Index."""
return self.names[0]
@name.setter
def name(self, name: Name) -> None:
self.names = [name]
@property
def names(self) -> List[Name]:
"""Return names of the Index."""
return [
name if name is None or len(name) > 1 else name[0]
for name in self._internal.index_names
]
@names.setter
def names(self, names: List[Name]) -> None:
if not is_list_like(names):
raise ValueError("Names must be a list-like")
if self._internal.index_level != len(names):
raise ValueError(
"Length of new names must be {}, got {}".format(
self._internal.index_level, len(names)
)
)
if self._internal.index_level == 1:
self.rename(names[0], inplace=True)
else:
self.rename(names, inplace=True)
@property
def nlevels(self) -> int:
"""
Number of levels in Index & MultiIndex.
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'], name="idx"))
>>> psdf.index.nlevels
1
>>> psdf = ps.DataFrame({'a': [1, 2, 3]}, index=[list('abc'), list('def')])
>>> psdf.index.nlevels
2
"""
return self._internal.index_level
def rename(self, name: Union[Name, List[Name]], inplace: bool = False) -> Optional["Index"]:
"""
Alter Index or MultiIndex name.
Able to set new names without level. Defaults to returning new index.
Parameters
----------
name : label or list of labels
Name(s) to set.
inplace : boolean, default False
Modifies the object directly, instead of creating a new Index or MultiIndex.
Returns
-------
Index or MultiIndex
The same type as the caller or None if inplace is True.
Examples
--------
>>> df = ps.DataFrame({'a': ['A', 'C'], 'b': ['A', 'B']}, columns=['a', 'b'])
>>> df.index.rename("c")
Int64Index([0, 1], dtype='int64', name='c')
>>> df.set_index("a", inplace=True)
>>> df.index.rename("d")
Index(['A', 'C'], dtype='object', name='d')
You can also change the index name in place.
>>> df.index.rename("e", inplace=True)
>>> df.index
Index(['A', 'C'], dtype='object', name='e')
>>> df # doctest: +NORMALIZE_WHITESPACE
b
e
A A
C B
Support for MultiIndex
>>> psidx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y')])
>>> psidx.names = ['hello', 'pandas-on-Spark']
>>> psidx # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y')],
names=['hello', 'pandas-on-Spark'])
>>> psidx.rename(['aloha', 'databricks']) # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y')],
names=['aloha', 'databricks'])
"""
names = self._verify_for_rename(name)
internal = self._psdf._internal.copy(index_names=names)
if inplace:
self._psdf._update_internal_frame(internal)
return None
else:
return DataFrame(internal).index
def _verify_for_rename(self, name: Name) -> List[Label]:
if is_hashable(name):
if is_name_like_tuple(name):
return [name]
elif is_name_like_value(name):
return [(name,)]
raise TypeError("Index.name must be a hashable type")
# TODO: add downcast parameter for fillna function
def fillna(self, value: Scalar) -> "Index":
"""
Fill NA/NaN values with the specified value.
Parameters
----------
value : scalar
Scalar value to use to fill holes (example: 0). This value cannot be a list-likes.
Returns
-------
Index :
filled with value
Examples
--------
>>> idx = ps.Index([1, 2, None])
>>> idx
Float64Index([1.0, 2.0, nan], dtype='float64')
>>> idx.fillna(0)
Float64Index([1.0, 2.0, 0.0], dtype='float64')
"""
if not isinstance(value, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(value).__name__)
sdf = self._internal.spark_frame.fillna(value)
internal = InternalFrame( # TODO: dtypes?
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
)
return DataFrame(internal).index
# TODO: ADD keep parameter
def drop_duplicates(self) -> "Index":
"""
Return Index with duplicate values removed.
Returns
-------
deduplicated : Index
See Also
--------
Series.drop_duplicates : Equivalent method on Series.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Examples
--------
Generate an pandas.Index with duplicate values.
>>> idx = ps.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
>>> idx.drop_duplicates().sort_values()
Index(['beetle', 'cow', 'hippo', 'lama'], dtype='object')
"""
sdf = self._internal.spark_frame.select(
self._internal.index_spark_columns
).drop_duplicates()
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_fields=self._internal.index_fields,
)
return DataFrame(internal).index
def to_series(self, name: Optional[Name] = None) -> Series:
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index.
Parameters
----------
name : string, optional
name of resulting Series. If None, defaults to name of original
index
Returns
-------
Series : dtype will be based on the type of the Index values.
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'],
... index=list('abcd'))
>>> df['dogs'].index.to_series()
a a
b b
c c
d d
dtype: object
"""
if not is_hashable(name):
raise TypeError("Series.name must be a hashable type")
scol = self.spark.column
field = self._internal.data_fields[0]
if name is not None:
scol = scol.alias(name_like_string(name))
field = field.copy(name=name_like_string(name))
elif self._internal.index_level == 1:
name = self.name
column_labels: List[Optional[Label]] = [name if is_name_like_tuple(name) else (name,)]
internal = self._internal.copy(
column_labels=column_labels,
data_spark_columns=[scol],
data_fields=[field],
column_label_names=None,
)
return first_series(DataFrame(internal))
def to_frame(self, index: bool = True, name: Optional[Name] = None) -> DataFrame:
"""
Create a DataFrame with a column containing the Index.
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index.
name : object, default None
The passed name should substitute for the index name (if it has
one).
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = ps.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame() # doctest: +NORMALIZE_WHITESPACE
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
To override the name of the resulting column, specify `name`:
>>> idx.to_frame(name='zoo') # doctest: +NORMALIZE_WHITESPACE
zoo
animal
Ant Ant
Bear Bear
Cow Cow
"""
if name is None:
if self._internal.index_names[0] is None:
name = (DEFAULT_SERIES_NAME,)
else:
name = self._internal.index_names[0]
elif not is_name_like_tuple(name):
if is_name_like_value(name):
name = (name,)
else:
raise TypeError("unhashable type: '{}'".format(type(name).__name__))
return self._to_frame(index=index, names=[name])
def _to_frame(self, index: bool, names: List[Label]) -> DataFrame:
if index:
index_spark_columns = self._internal.index_spark_columns
index_names = self._internal.index_names
index_fields = self._internal.index_fields
else:
index_spark_columns = []
index_names = []
index_fields = []
internal = InternalFrame(
spark_frame=self._internal.spark_frame,
index_spark_columns=index_spark_columns,
index_names=index_names,
index_fields=index_fields,
column_labels=names,
data_spark_columns=self._internal.index_spark_columns,
data_fields=self._internal.index_fields,
)
return DataFrame(internal)
def is_boolean(self) -> bool:
"""
Return if the current index type is a boolean type.
Examples
--------
>>> ps.DataFrame({'a': [1]}, index=[True]).index.is_boolean()
True
"""
return is_bool_dtype(self.dtype)
def is_categorical(self) -> bool:
"""
Return if the current index type is a categorical type.
Examples
--------
>>> ps.DataFrame({'a': [1]}, index=[1]).index.is_categorical()
False
"""
return is_categorical_dtype(self.dtype)
def is_floating(self) -> bool:
"""
Return if the current index type is a floating type.
Examples
--------
>>> ps.DataFrame({'a': [1]}, index=[1]).index.is_floating()
False
"""
return is_float_dtype(self.dtype)
def is_integer(self) -> bool:
"""
Return if the current index type is a integer type.
Examples
--------
>>> ps.DataFrame({'a': [1]}, index=[1]).index.is_integer()
True
"""
return is_integer_dtype(self.dtype)
def is_interval(self) -> bool:
"""
Return if the current index type is an interval type.
Examples
--------
>>> ps.DataFrame({'a': [1]}, index=[1]).index.is_interval()
False
"""
return is_interval_dtype(self.dtype)
def is_numeric(self) -> bool:
"""
Return if the current index type is a numeric type.
Examples
--------
>>> ps.DataFrame({'a': [1]}, index=[1]).index.is_numeric()
True
"""
return is_numeric_dtype(self.dtype)
def is_object(self) -> bool:
"""
Return if the current index type is a object type.
Examples
--------
>>> ps.DataFrame({'a': [1]}, index=["a"]).index.is_object()
True
"""
return
|
is_object_dtype(self.dtype)
|
pandas.api.types.is_object_dtype
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 15:00:00 2018
@author: <NAME>
"""
import numpy as np
import pandas as pd
from scipy.spatial import Voronoi, ConvexHull
import signature.calculations as calc
from functools import partial
class MixedCrystalSignature:
"""Class for calculation of the Mixed Crystal Signature
Description in https://doi.org/10.1103/PhysRevE.96.011301"""
L_VEC = np.array([4, 5, 6],dtype=np.int32) #Choose which l to use for calculation of qlm
MAX_L = np.max(L_VEC)
def __init__(self, solid_thresh=0.55, pool=None):
"""solid_thresh is a threshold between 0 (very disordered) and 1 (very crystalline)
pool is a pool from the multiprocessing module.
If no pool is provided, the calculation will be single-core"""
self.solid_thresh = solid_thresh
self.inner_bool = None
self.indices = None
self.outsider_indices = None
self.insider_indices = None
self.voro = None
self.neighborlist = None
self.conv_hulls = None
self.voro_vols = None
self.qlm_arrays = None
self.signature =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!python3
"""
Download gene expression data from the GDC (TCGA) database.
"""
import os
import errno
import logging
import re
import glob
import gzip
import shutil
import requests
import pandas as pd
logging.basicConfig(filename='./annotation/download.log', level=logging.INFO)
try:
os.chdir("/home/yizhou/dockers/RStudio/data/expression_count")
except BaseException:
os.chdir("C:/users/jzhou/Desktop/expression_count")
def downloadData(df, directory='./sep'):
"""Use manifest file to download data using GDC data API.
Arguements
df: [pandas data frame] of the manifest file downloaded from GDC website.
directory: a [str] showing the directory to store the downloaded data
"""
homeDir = os.getcwd()
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
os.chdir(directory)
fileNum = df.filename.count()
logging.info(f"Manifest file contains {fileNum} files.")
# exclude existing files
# change counts to FPKM if downloading FPKM data
existFile = glob.glob("./**/*.counts.*", recursive=True)
existFile = [
re.sub(r".*\/(.*\.txt)(\.gz)?$", r"\1.gz", x) for x in existFile
] # include unzipped files
fileNum = len(existFile)
logging.info(f"{fileNum} files already exist, downloading the rest...")
url = 'https://api.gdc.cancer.gov/data/'
df = df[~df.filename.isin(existFile)]
# download files
uuid = df.id.tolist()
uuid = [url + x for x in uuid]
fileNum = len(uuid)
for id in uuid:
os.system(f"curl --remote-name --remote-header-name {id}")
logging.info(f"Downloaded {fileNum} files to {directory}")
os.chdir(homeDir)
def uuidToBarcode(df, directory='./annotation'):
"""Use manifest file to retrieve barcode information using GDC API.
Arguments
df: a [pandas dataframe] of the manifest file used to download TCGA files.
directory: a [str] showing the directory to store annotation.tsv and annot.tsv
Return
annot: a [pandas dataframe] of more information, and
annotDict: a dict of {filename: barcode}.
"""
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
annotFile = glob.glob(f"{directory}/annotation.tsv", recursive=True)
if not annotFile:
uuid = df.id.tolist()
params = {
"filters": {
"op": "in",
"content": {
"field": "files.file_id",
"value": uuid
}
},
"format": "TSV",
# There must be no space after comma
"fields": "file_id,file_name,cases.samples.submitter_id,cases.samples.sample_type,cases.project.project_id,cases.diagnoses.tumor_stage,cases.case_id",
"size": len(uuid)
}
url = "https://api.gdc.cancer.gov/files"
r = requests.post(url, json=params) # API requires using POST method
with open(f"{directory}/annotation.tsv", "w") as f:
f.write(r.text) # save raw annotation file
annot = pd.read_table(f"{directory}/annotation.tsv")
annot = annot[[
'file_name', 'cases.0.project.project_id',
'cases.0.samples.0.submitter_id', 'cases.0.samples.0.sample_type',
'cases.0.diagnoses.0.tumor_stage'
]]
annot = annot.rename(columns={
'cases.0.project.project_id': 'project',
'cases.0.samples.0.submitter_id': 'barcode',
'cases.0.samples.0.sample_type': 'sample_type',
'cases.0.diagnoses.0.tumor_stage': 'tumor_stage'
})
annot.file_name = annot.file_name.str.replace(
'.gz', '') # regex in pandas dataframe
annot.project = annot.project.str.replace('TCGA.', '')
# get specific digit in barcode
annot.sample_type = pd.Series([int(x[-3]) for x in annot.barcode])
annot.loc[annot.sample_type == 0, 'sample_type'] = 'tumor'
annot.loc[annot.sample_type == 1, 'sample_type'] = 'normal'
annot.to_csv(f"{directory}/annot.tsv", index=False)
# efficiently transform to dict
annotDict = dict(zip(annot.file_name, annot.barcode))
return (annot, annotDict)
def unzipAll():
"""Unzip all txt.gz files downloaded by the GDC file transfer tool.
WARNING: will remove all zipfiles!
"""
for zipfile in glob.iglob('./**/*.gz', recursive=True):
newfile = re.sub('.gz$', '', zipfile)
with gzip.open(zipfile, 'rb') as f_in, open(newfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(zipfile)
def mergeData(annot, annotDict, directory="./results", filedir='./sep'):
"""Merge all the downloaded data by column
Arguement
annot: [pandas dataframe] annot from function `uuidToBarcode`
annotDict: [dict] from function `uuidToBarcode`
"""
# db = sqlite3.connect('./results/results.sql')
projects = annot.project.unique().tolist()
for project in projects:
# Normal
annotation = annot[(annot.project == project) &
(annot.sample_type == "normal")]
cases = annotation.file_name.tolist()
if len(cases) != 0:
df = pd.read_csv(
f'{filedir}/{cases[0]}',
sep='\t',
names=['ensembl', annotDict[cases[0]]])
cases.pop(0) # Get first case (ensembls) and remove it from list
for case in cases:
try:
dfSingle = pd.read_csv(
f'{filedir}/{case}',
sep="\t",
names=['ensembl', annotDict[case]])
df = pd.merge(df, dfSingle, how='outer', on='ensembl')
except FileNotFoundError as e:
logging.warning(e)
# df.to_sql(name=project + '_normal', con=db, if_exists='replace')
df.to_csv(
f"{directory}/{project}_normal.csv", sep='\t', index=False)
logging.info(f'{project} normal finished!')
# Tumor
annotation = annot[(annot.project == project)
& (annot.sample_type == "tumor")]
cases = annotation.file_name.tolist()
if len(cases) != 0:
df = pd.read_csv(
f'{filedir}/{cases[0]}',
sep='\t',
names=['ensembl', annotDict[cases[0]]])
cases.pop(0)
for case in cases:
try:
dfSingle = pd.read_csv(
f'{filedir}/{case}',
sep="\t",
names=['ensembl', annotDict[case]])
df = pd.merge(df, dfSingle, how='outer', on='ensembl')
except FileNotFoundError as e:
logging.warning(e)
# df.to_sql(name=project + '_tumor', con=db, if_exists='replace')
df.to_csv(
f"{directory}/{project}_tumor.csv", sep='\t', index=False)
logging.info(f'{project} tumor finished!')
# db.close()
def run(manifest="manifest.txt"):
df =
|
pd.read_csv(manifest, sep="\t")
|
pandas.read_csv
|
from builtins import range
import pandas as pd
import numpy as np
from functools import partial
from multiprocessing import cpu_count, Pool
from tensorflow.keras.utils import Progbar
from chemml.chem import Molecule
from chemml.utils import padaxis
class CoulombMatrix(object):
"""
The implementation of coulomb matrix descriptors by <NAME> et. al. 2012, PRL (All 3 different variations).
Parameters
----------
cm_type : str, optional (default='SC')
The coulomb matrix type, one of the following types:
* 'Unsorted_Matrix' or 'UM'
* 'Unsorted_Triangular' or 'UT'
* 'Eigenspectrum' or 'E'
* 'Sorted_Coulomb' or 'SC'
* 'Random_Coulomb' or 'RC'
max_n_atoms : int or 'auto', optional (default = 'auto')
Set the maximum number of atoms per molecule (to which all representations will be padded).
If 'auto', we find it based on all input molecules.
nPerm : int, optional (default = 3)
Number of permutation of coulomb matrix per molecule for Random_Coulomb (RC)
type of representation.
const : float, optional (default = 1)
The constant value for coordinates unit conversion to atomic unit
example: atomic unit -> const=1, Angstrom -> const=0.529
const/|Ri-Rj|, which denominator is the euclidean distance
between atoms i and j
n_jobs : int, optional(default=-1)
The number of parallel processes. If -1, uses all the available processes.
verbose : bool, optional(default=True)
The verbosity of messages.
Attributes
----------
n_molecules_ : int
Total number of molecules.
max_n_atoms_ : int
Maximum number of atoms in all molecules.
Examples
--------
>>> from chemml.chem import CoulombMatrix, Molecule
>>> m1 = Molecule('c1ccc1', 'smiles')
>>> m2 = Molecule('CNC', 'smiles')
>>> m3 = Molecule('CC', 'smiles')
>>> m4 = Molecule('CCC', 'smiles')
>>> molecules = [m1, m2, m3, m4]
>>> for mol in molecules: mol.to_xyz(optimizer='UFF')
>>> cm = CoulombMatrix(cm_type='SC', n_jobs=-1)
>>> features = cm.represent(molecules)
"""
def __init__(self, cm_type='SC', max_n_atoms = 'auto', nPerm=3, const=1,
n_jobs=-1, verbose=True):
self.CMtype = cm_type
self.max_n_atoms_ = max_n_atoms
self.nPerm = nPerm
self.const = const
self.n_jobs = n_jobs
self.verbose = verbose
def __cal_coul_mat(self, mol):
"""
Parameters
----------
mol: molecule object
Returns
-------
"""
if isinstance(mol, Molecule):
if mol.xyz is None:
msg = "The molecule must be a chemml.chem.Molecule object with xyz information."
raise ValueError(msg)
else:
msg = "The molecule must be a chemml.chem.Molecule object."
raise ValueError(msg)
mol = np.append(mol.xyz.atomic_numbers,mol.xyz.geometry, axis=1)
cm = []
for i in range(len(mol)):
vect = []
for k in range(0,i):
vect.append(cm[k][i])
for j in range(i,len(mol)):
if i==j:
vect.append(0.5*mol[i,0]**2.4)
else:
vect.append((mol[i,0]*mol[j,0]*self.const)/np.linalg.norm(mol[i,1:]-mol[j,1:]))
for m in range(len(mol), self.max_n_atoms_):
vect.append(0.0)
cm.append(vect)
# pad with zero values
if self.max_n_atoms_ > len(mol):
cm = padaxis(np.array(cm), self.max_n_atoms_, 0, 0)
return np.array(cm)[:self.max_n_atoms_, :self.max_n_atoms_] #shape nAtoms*nAtoms
def represent(self, molecules):
"""
provides coulomb matrix representation for input molecules.
Parameters
----------
molecules : chemml.chem.Molecule object or array
If list, it must be a list of chemml.chem.Molecule objects, otherwise we raise a ValueError.
In addition, all the molecule objects must provide the XYZ information. Please make sure the XYZ geometry has been
stored or optimized in advance.
Returns
-------
features : Pandas DataFrame
A data frame with same number of rows as number of molecules will be returned.
The exact shape of the dataframe depends on the type of CM as follows:
- shape of Unsorted_Matrix (UM): (n_molecules, max_n_atoms**2)
- shape of Unsorted_Triangular (UT): (n_molecules, max_n_atoms*(max_n_atoms+1)/2)
- shape of eigenspectrums (E): (n_molecules, max_n_atoms)
- shape of Sorted_Coulomb (SC): (n_molecules, max_n_atoms*(max_n_atoms+1)/2)
- shape of Random_Coulomb (RC): (n_molecules, nPerm * max_n_atoms * (max_n_atoms+1)/2)
"""
# check input molecules
if isinstance(molecules, (list,np.ndarray)):
molecules = np.array(molecules)
elif isinstance(molecules, Molecule):
molecules = np.array([molecules])
else:
msg = "The molecule must be a chemml.chem.Molecule object or a list of objects."
raise ValueError(msg)
if molecules.ndim >1:
msg = "The molecule must be a chemml.chem.Molecule object or a list of objects."
raise ValueError(msg)
self.n_molecules_ = molecules.shape[0]
# max number of atoms based on the list of molecules
if self.max_n_atoms_ == 'auto':
try:
self.max_n_atoms_ = max([m.xyz.atomic_numbers.shape[0] for m in molecules])
except:
msg = "The xyz representation of molecules is not available."
raise ValueError(msg)
# pool of processes
if self.n_jobs == -1:
self.n_jobs = cpu_count()
pool = Pool(processes=self.n_jobs)
# Create an iterator
# http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
# find size of each batch
batch_size = int(len(molecules) / self.n_jobs)
if batch_size == 0:
batch_size = 1
molecule_chunks = chunks(molecules, batch_size)
# MAP: CM in parallel
map_function = partial(self._represent)
if self.verbose:
print('featurizing molecules in batches of %i ...' % batch_size)
pbar = Progbar(len(molecules), width=50)
tensor_list = []
for tensors in pool.imap(map_function, molecule_chunks):
pbar.add(len(tensors[0]))
tensor_list.append(tensors)
print('Merging batch features ... ', end='')
else:
tensor_list = pool.map(map_function, molecule_chunks)
if self.verbose:
print('[DONE]')
# REDUCE: Concatenate the obtained tensors
pool.close()
pool.join()
return pd.concat(tensor_list, axis=0, ignore_index=True)
def _represent(self, molecules):
# in parallel run the number of molecules is different from self.n_molecules_
n_molecules_ = len(molecules)
if self.CMtype == "Unsorted_Matrix" or self.CMtype == 'UM':
cms = np.array([])
for mol in molecules:
cm = self.__cal_coul_mat(mol)
cms = np.append(cms, cm.ravel())
cms = cms.reshape(n_molecules_, self.max_n_atoms_ ** 2)
cms = pd.DataFrame(cms)
return cms
elif self.CMtype == "Unsorted_Triangular" or self.CMtype == 'UT':
cms = np.array([])
for mol in molecules:
cm = self.__cal_coul_mat(mol)
cms = np.append(cms, cm[np.tril_indices(self.max_n_atoms_)])
cms = cms.reshape(n_molecules_, int(self.max_n_atoms_ * (self.max_n_atoms_ + 1) / 2))
cms = pd.DataFrame(cms)
return cms
elif self.CMtype == 'Eigenspectrum' or self.CMtype == 'E':
eigenspectrums = np.array([])
for mol in molecules:
cm = self.__cal_coul_mat(mol) # Check the constant value for unit conversion; atomic unit -> 1 , Angstrom -> 0.529
eig = np.linalg.eigvals(cm)
eig[::-1].sort()
eigenspectrums = np.append(eigenspectrums,eig)
eigenspectrums = eigenspectrums.reshape(n_molecules_, self.max_n_atoms_)
eigenspectrums = pd.DataFrame(eigenspectrums)
return eigenspectrums
elif self.CMtype == 'Sorted_Coulomb' or self.CMtype == 'SC':
sorted_cm = np.array([])
for mol in molecules:
cm = self.__cal_coul_mat(mol)
lambdas = np.linalg.norm(cm,2,1)
sort_indices = np.argsort(lambdas)[::-1]
cm = cm[:,sort_indices][sort_indices,:]
# sorted_cm.append(cm)
sorted_cm = np.append(sorted_cm, cm[np.tril_indices(self.max_n_atoms_)]) # lower-triangular
sorted_cm = sorted_cm.reshape(n_molecules_, int(self.max_n_atoms_ * (self.max_n_atoms_ + 1) / 2))
sorted_cm =
|
pd.DataFrame(sorted_cm)
|
pandas.DataFrame
|
import pandas as pd
import glob
import csv
files = [
"a100-results.csv",
"clx-1S-results.csv",
"clx-results.csv",
"gen9-results.csv",
"mi100-results.csv",
# "rome-results-aocc.csv",
"rome-results-cce.csv"]
csv_frames = []
for f in files:
csv_frames.append(
|
pd.read_csv(f, skipinitialspace=True)
|
pandas.read_csv
|
# Copyright 2021 Research Institute of Systems Planning, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for ROS 2 data model."""
import pandas as pd
from tracetools_analysis.data_model import DataModel, DataModelIntermediateStorage
from trace_analysis.record.record_factory import RecordFactory, RecordsFactory
class Ros2DataModel(DataModel):
"""
Container to model pre-processed ROS 2 data for analysis.
This aims to represent the data in a ROS 2-aware way.
"""
def __init__(self) -> None:
"""Create a Ros2DataModel."""
super().__init__()
# Objects (one-time events, usually when something is created)
self._contexts: DataModelIntermediateStorage = []
self._nodes: DataModelIntermediateStorage = []
self._publishers: DataModelIntermediateStorage = []
self._subscriptions: DataModelIntermediateStorage = []
self._subscription_objects: DataModelIntermediateStorage = []
self._services: DataModelIntermediateStorage = []
self._clients: DataModelIntermediateStorage = []
self._timers: DataModelIntermediateStorage = []
self._timer_node_links: DataModelIntermediateStorage = []
self._callback_objects: DataModelIntermediateStorage = []
self._callback_symbols: DataModelIntermediateStorage = []
self._lifecycle_state_machines: DataModelIntermediateStorage = []
# Events (multiple instances, may not have a meaningful index)
self.lifecycle_transitions = RecordsFactory.create_instance()
self.callback_start_instances = RecordsFactory.create_instance()
self.callback_end_instances = RecordsFactory.create_instance()
self.dds_write_instances = RecordsFactory.create_instance()
self.dds_bind_addr_to_stamp = RecordsFactory.create_instance()
self.dds_bind_addr_to_addr = RecordsFactory.create_instance()
self.on_data_available_instances = RecordsFactory.create_instance()
self.rclcpp_intra_publish_instances = RecordsFactory.create_instance()
self.rclcpp_publish_instances = RecordsFactory.create_instance()
self.rcl_publish_instances = RecordsFactory.create_instance()
self.dispatch_subscription_callback_instances = RecordsFactory.create_instance()
self.dispatch_intra_process_subscription_callback_instances = (
RecordsFactory.create_instance()
)
self.message_construct_instances = RecordsFactory.create_instance()
def add_context(self, context_handle, timestamp, pid, version) -> None:
record = {
"context_handle": context_handle,
"timestamp": timestamp,
"pid": pid,
"version": version, # Comment out to align with Dict[str: int64_t]
}
self._contexts.append(record)
def add_node(self, node_handle, timestamp, tid, rmw_handle, name, namespace) -> None:
record = {
"node_handle": node_handle,
"timestamp": timestamp,
"tid": tid,
"rmw_handle": rmw_handle,
"namespace": namespace,
"name": name,
}
self._nodes.append(record)
def add_publisher(self, handle, timestamp, node_handle, rmw_handle, topic_name, depth) -> None:
record = {
"publisher_handle": handle,
"timestamp": timestamp,
"node_handle": node_handle,
"rmw_handle": rmw_handle,
"topic_name": topic_name,
"depth": depth,
}
self._publishers.append(record)
def add_rcl_subscription(
self, handle, timestamp, node_handle, rmw_handle, topic_name, depth
) -> None:
record = {
"subscription_handle": handle,
"timestamp": timestamp,
"node_handle": node_handle,
"rmw_handle": rmw_handle,
"topic_name": topic_name,
"depth": depth,
}
self._subscriptions.append(record)
def add_rclcpp_subscription(
self, subscription_pointer, timestamp, subscription_handle
) -> None:
record = {
"subscription": subscription_pointer,
"timestamp": timestamp,
"subscription_handle": subscription_handle,
}
self._subscription_objects.append(record)
def add_service(self, handle, timestamp, node_handle, rmw_handle, service_name) -> None:
record = {
"service_handle": timestamp,
"timestamp": timestamp,
"node_handle": node_handle,
"rmw_handle": rmw_handle,
"service_name": service_name,
}
self._services.append(record)
def add_client(self, handle, timestamp, node_handle, rmw_handle, service_name) -> None:
record = {
"client_handle": handle,
"timestamp": timestamp,
"node_handle": node_handle,
"rmw_handle": rmw_handle,
"service_name": service_name,
}
self._clients.append(record)
def add_timer(self, handle, timestamp, period, tid) -> None:
record = {
"timer_handle": handle,
"timestamp": timestamp,
"period": period,
"tid": tid,
}
self._timers.append(record)
def add_timer_node_link(self, handle, timestamp, node_handle) -> None:
record = {
"timer_handle": handle,
"timestamp": timestamp,
"node_handle": node_handle,
}
self._timer_node_links.append(record)
def add_callback_object(self, reference, timestamp, callback_object) -> None:
record = {
"reference": reference,
"timestamp": timestamp,
"callback_object": callback_object,
}
self._callback_objects.append(record)
def add_callback_symbol(self, callback_object, timestamp, symbol) -> None:
record = {
"callback_object": callback_object,
"timestamp": timestamp,
"symbol": symbol,
}
self._callback_symbols.append(record)
def add_lifecycle_state_machine(self, handle, node_handle) -> None:
record = {
"state_machine_handle": handle,
"node_handle": node_handle,
}
self._lifecycle_state_machines.append(record)
def add_lifecycle_state_transition(
self, state_machine_handle, start_label, goal_label, timestamp
) -> None:
record = RecordFactory.create_instance(
{
"state_machine_handle": state_machine_handle,
"start_label": start_label,
"goal_label": goal_label,
"timestamp": timestamp,
}
)
self.lifecycle_transitions.append(record)
def add_callback_start_instance(
self, timestamp: int, callback: int, is_intra_process: bool
) -> None:
record = RecordFactory.create_instance(
{
"callback_start_timestamp": timestamp,
"callback_object": callback,
"is_intra_process": is_intra_process,
}
)
self.callback_start_instances.append(record)
def add_callback_end_instance(self, timestamp: int, callback: int) -> None:
record = RecordFactory.create_instance(
{"callback_end_timestamp": timestamp, "callback_object": callback}
)
self.callback_end_instances.append(record)
def add_rclcpp_intra_publish_instance(
self,
timestamp: int,
publisher_handle: int,
message: int,
) -> None:
record = RecordFactory.create_instance(
{
"rclcpp_intra_publish_timestamp": timestamp,
"publisher_handle": publisher_handle,
"message": message,
}
)
self.rclcpp_intra_publish_instances.append(record)
def add_rclcpp_publish_instance(
self,
timestamp: int,
publisher_handle: int,
message: int,
) -> None:
record = RecordFactory.create_instance(
{
"rclcpp_publish_timestamp": timestamp,
"publisher_handle": publisher_handle,
"message": message,
}
)
self.rclcpp_publish_instances.append(record)
def add_rcl_publish_instance(
self,
timestamp: int,
publisher_handle: int,
message: int,
) -> None:
record = RecordFactory.create_instance(
{
"rcl_publish_timestamp": timestamp,
"publisher_handle": publisher_handle,
"message": message,
}
)
self.rcl_publish_instances.append(record)
def add_dds_write_instance(
self,
timestamp: int,
message: int,
) -> None:
record = RecordFactory.create_instance(
{
"dds_write_timestamp": timestamp,
"message": message,
}
)
self.dds_write_instances.append(record)
def add_dds_bind_addr_to_addr(
self,
timestamp: int,
addr_from: int,
addr_to: int,
) -> None:
record = RecordFactory.create_instance(
{
"dds_bind_addr_to_addr_timestamp": timestamp,
"addr_from": addr_from,
"addr_to": addr_to,
}
)
self.dds_bind_addr_to_addr.append(record)
def add_dds_bind_addr_to_stamp(
self,
timestamp: int,
addr: int,
source_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
"dds_bind_addr_to_stamp_timestamp": timestamp,
"addr": addr,
"source_timestamp": source_timestamp,
}
)
self.dds_bind_addr_to_stamp.append(record)
def add_on_data_available_instance(
self,
timestamp: int,
source_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
"on_data_available_timestamp": timestamp,
"source_timestamp": source_timestamp,
}
)
self.on_data_available_instances.append(record)
def add_message_construct_instance(
self, timestamp: int, original_message: int, constructed_message: int
) -> None:
record = RecordFactory.create_instance(
{
"message_construct_timestamp": timestamp,
"original_message": original_message,
"constructed_message": constructed_message,
}
)
self.message_construct_instances.append(record)
def add_dispatch_subscription_callback_instance(
self,
timestamp: int,
callback_object: int,
message: int,
source_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
"dispatch_subscription_callback_timestamp": timestamp,
"callback_object": callback_object,
"message": message,
"source_timestamp": source_timestamp,
}
)
self.dispatch_subscription_callback_instances.append(record)
def add_dispatch_intra_process_subscription_callback_instance(
self,
timestamp: int,
callback_object: int,
message: int,
) -> None:
record = RecordFactory.create_instance(
{
"dispatch_intra_process_subscription_callback_timestamp": timestamp,
"callback_object": callback_object,
"message": message,
}
)
self.dispatch_intra_process_subscription_callback_instances.append(record)
def _finalize(self) -> None:
self.contexts = pd.DataFrame.from_dict(self._contexts)
if self._contexts:
self.contexts.set_index("context_handle", inplace=True, drop=True)
self.nodes = pd.DataFrame.from_dict(self._nodes)
if self._nodes:
self.nodes.set_index("node_handle", inplace=True, drop=True)
self.publishers = pd.DataFrame.from_dict(self._publishers)
if self._publishers:
self.publishers.set_index("publisher_handle", inplace=True, drop=True)
self.subscriptions = pd.DataFrame.from_dict(self._subscriptions)
if self._subscriptions:
self.subscriptions.set_index("subscription_handle", inplace=True, drop=True)
self.subscription_objects = pd.DataFrame.from_dict(self._subscription_objects)
if self._subscription_objects:
self.subscription_objects.set_index("subscription", inplace=True, drop=True)
self.services = pd.DataFrame.from_dict(self._services)
if self._services:
self.services.set_index("service_handle", inplace=True, drop=True)
self.clients = pd.DataFrame.from_dict(self._clients)
if self._clients:
self.clients.set_index("client_handle", inplace=True, drop=True)
self.timers = pd.DataFrame.from_dict(self._timers)
if self._timers:
self.timers.set_index("timer_handle", inplace=True, drop=True)
self.timer_node_links = pd.DataFrame.from_dict(self._timer_node_links)
if self._timer_node_links:
self.timer_node_links.set_index("timer_handle", inplace=True, drop=True)
self.callback_objects = pd.DataFrame.from_dict(self._callback_objects)
if self._callback_objects:
self.callback_objects.set_index("reference", inplace=True, drop=True)
self.callback_symbols = pd.DataFrame.from_dict(self._callback_symbols)
if self._callback_symbols:
self.callback_symbols.set_index("callback_object", inplace=True, drop=True)
self.lifecycle_state_machines =
|
pd.DataFrame.from_dict(self._lifecycle_state_machines)
|
pandas.DataFrame.from_dict
|
import os
import pathlib
import pickle
import random
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from S2S_load_sensor_data import read_data_datefolder_hourfile
from S2S_settings import settings
FPS = settings["FPS"]
FRAME_INTERVAL = settings["FRAME_INTERVAL"]
sample_counts = settings["sample_counts"]
def load_start_time(start_time_file, vid):
"""
load start time
Args:
start_time_file: str
vid: str, video
Returns:
int, start time
"""
df_start_time = pd.read_csv(start_time_file).set_index("video_name")
if vid not in df_start_time.index:
print("Error: ", vid, " not in ", start_time_file)
exit()
start_time = df_start_time.loc[vid]["start_time"]
return int(start_time)
def reliability_df_to_consecutive_seconds(
df_sensor_rel, window_size_sec, stride_sec, threshold=sample_counts
):
"""
Convert from reliability df to consecutive seconds represented with start and end time.
Args:
df_sensor_rel: dataframe, sensor reliability
window_size_sec:, int, window_size
stride_sec: int, stride
threshold: float
Returns:
win_start_end: a list of all the possible [window_start, window_end] pairs.
"""
# use the threshold criterion to select 'good' seconds
rel_seconds = (
df_sensor_rel[df_sensor_rel["SampleCounts"] > threshold]
.sort_values(by="Time")["Time"]
.values
)
win_start_end = consecutive_seconds(rel_seconds, window_size_sec, stride_sec)
return win_start_end
def consecutive_seconds(rel_seconds, window_size_sec, stride_sec=1):
"""
Return a list of all the possible [window_start, window_end] pairs
containing consecutive seconds of length window_size_sec inside.
Args:
rel_seconds: a list of qualified seconds
window_size_sec: int
stride_sec: int
Returns:
win_start_end: a list of all the possible [window_start, window_end] pairs.
Test:
>>> rel_seconds = [2,3,4,5,6,7,9,10,11,12,16,17,18]; window_size_sec = 3; stride_sec = 1
>>> print(consecutive_seconds(rel_seconds, window_size_sec))
>>> [[2, 4], [3, 5], [4, 6], [5, 7], [9, 11], [10, 12], [16, 18]]
"""
win_start_end = []
for i in range(0, len(rel_seconds) - window_size_sec + 1, stride_sec):
if rel_seconds[i + window_size_sec - 1] - rel_seconds[i] == window_size_sec - 1:
win_start_end.append([rel_seconds[i], rel_seconds[i + window_size_sec - 1]])
return win_start_end
def load_vid_feat(vid_file, fps, start_time):
feat = np.load(vid_file)["feature"][0]
print("video feature shape:", feat.shape)
frame_len = 1000.0 / fps # duration of a frame in ms
frames = feat.shape[0] # number of frames
len_ms = frames * frame_len # duration of all frames in ms
timestamps_int = np.arange(
start_time,
start_time + len_ms,
frame_len
).astype(int)
l = min(len(timestamps_int), feat.shape[0])
timestamps_int = timestamps_int[:l]
feat = feat[:l, :]
df_flow = pd.DataFrame(
data=np.hstack((timestamps_int[:,None], feat)),
index=[i for i in range(feat.shape[0])],
columns=["time"]+['f'+str(i) for i in range(feat.shape[1])]
)
df_flow["second"] = (df_flow["time"] / 1000).astype(int)
df_flow = df_flow.reset_index()
return df_flow, len_ms
def load_sensors_cubic(
sensor_path, sub, device, sensors, sensor_col_headers, start_time, end_time, fps
):
"""
load sensor data with cubic spline resampling
Args:
sensor_path: str,
sub: str, subject
device: str
sensors: list, sensors
sensor_col_headers: list of sensor column headers
start_time: int
end_time: int
fps: float
Returns:
dataframe, sensor data
"""
df_list = []
for s, col in zip(sensors, sensor_col_headers):
df_sensor = read_data_datefolder_hourfile(
sensor_path, sub, device, s, start_time, end_time
)
df_sensor = df_sensor[["time", col]]
df_sensor["time"] = pd.to_datetime(df_sensor["time"], unit="ms")
df_sensor = df_sensor.set_index("time")
df_resample = df_sensor.resample(FRAME_INTERVAL).mean()
# FRAME_INTERVAL as 0.03336707S is the most closest value to 1/29.969664 pandas accepts
df_resample = df_resample.interpolate(method="spline", order=3) # cubic spline interpolation
df_list.append(df_resample)
df_sensors = pd.concat(df_list, axis=1)
return df_sensors
def merge_sensor_flow(
df_sensor,
df_flow,
vid_name,
win_start_end,
start_time,
end_time,
window_size_sec,
window_criterion,
fps
):
"""
merge sensor flow
Args:
df_sensor: dataframe, sensor data
df_flow: dataframe, flow data
vid_name: str, video name
win_start_end: list
start_time: int
end_time: int
window_size_sec: int
window_criterion: float
fps: float
Returns:
int, count of windows
list, a list of all dataframes of videos
list, a list of all video data information
"""
df_dataset_vid = []
info_dataset_vid = []
cnt_windows = 0
# add an offset to each window sensor-video pair
for pair in win_start_end:
start = pair[0] * 1000
end = pair[1] * 1000 + 1000
df_window_sensor = df_sensor[
(df_sensor["time"] >= pd.to_datetime(start, unit="ms"))
& (df_sensor["time"] < pd.to_datetime(end, unit="ms"))
]
# match video dataframe
df_window_flow = df_flow[
(df_flow["time"] >= pd.to_datetime(start, unit="ms"))
& (df_flow["time"] < pd.to_datetime(end, unit="ms"))
]
pd.options.mode.chained_assignment = None
df_window_flow.loc[:, "time"] = df_window_flow.loc[:, "time"]
df_window = pd.merge_asof(
df_window_sensor,
df_window_flow,
on="time",
tolerance=pd.Timedelta("29.969664ms"),
direction="nearest",
).set_index("time")
df_window = df_window.dropna(how="any")
if len(df_window) > fps * window_size_sec * window_criterion:
cnt_windows += 1
df_dataset_vid.append(df_window)
info_dataset_vid.append(
[vid_name, start, end]
) # relatively video name, sensor starttime, sensor endtime
return cnt_windows, df_dataset_vid, info_dataset_vid
def segment_video(
subject,
video,
window_size_sec,
stride_sec,
window_criterion,
starttime_file,
fps,
):
"""
Segment one smoking video.
Args:
subject: str
video: str
window_size_sec: int
stride_sec: int
window_criterion: float
starttime_file: str
fps: float
Returns:
list, a list of (video name, count of windows) pairs
list, a list of all dataframes of videos
list, a list of all video data information
"""
# ==========================================================================================
reliability_resample_path = settings['reliability_resample_path']
sensor_path = settings['sensor_path']
vid_feat_path = settings["vid_feat_path"]
# ==========================================================================================
vid_qual_win_cnt = []
df_dataset = []
info_dataset = []
device = "CHEST"
sensor = "ACCELEROMETER"
sensors = ["ACCELEROMETER_X", "ACCELEROMETER_Y", "ACCELEROMETER_Z"]
sensor_col_headers = ["accx", "accy", "accz"]
vid_file = os.path.join(
vid_feat_path, subject, "{}-flow.npz".format(video)
)
# load start end time
vid_name = subject + " " + video
start_time = load_start_time(starttime_file, vid_name)
# load optical flow data and assign unixtime to each frame
df_flow, len_ms = load_vid_feat(vid_file, fps, start_time)
end_time = int(start_time) + int(len_ms)
# load sensor reliability data
df_sensor_rel = read_data_datefolder_hourfile(
reliability_resample_path,
subject,
device,
sensor + "_reliability",
start_time,
end_time,
)
# record consecutive seconds of the length the same as window_size
win_start_end = reliability_df_to_consecutive_seconds(
df_sensor_rel, window_size_sec, stride_sec, threshold=7
)
## extract the optical flow frames of the good seconds according to sensor data
df_flow["time"] = pd.to_datetime(df_flow["time"], unit="ms")
df_flow = df_flow.set_index(
"time"
)
# extract the raw data 'ACCELEROMETER_X' (,'ACCELEROMETER_Y', 'ACCELEROMETER_Z') of consecutive chunk and resample
# according to video frame timestamp.
# note that we need to resample sensor data according to video time,
# so the input of resample function here should be raw data instead of already sampled data to avoid resample twice.
df_sensors = load_sensors_cubic(
sensor_path,
subject,
device,
sensors,
sensor_col_headers,
start_time,
end_time,
fps,
)
# concatenate df_sensors and df_flow
df_resample = pd.merge_asof(
df_flow,
df_sensors,
on="time",
tolerance=
|
pd.Timedelta("30ms")
|
pandas.Timedelta
|
# -*- coding: utf-8 -*-
"""
Created 23 April 2019
mean_traces.py
Version 1
The purpose of this script is to pull all of the mean trace files that were
saved from the initial analysis. These traces are mean subtracted and filtered
and comprise the entire 6 s of recording. The idea here is to open the files
individually, extract the data, save it to a dataframe and compile all of the
files of the same genotype into a dataframe. Then take the mean. Then plot the
means vs. all traces for both OMP and Gg8.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import platform
''' ################## Define file structure on server #################### '''
# home_dir will depend on the OS, but the rest will not
# query machine identity and set home_dir from there
machine = platform.uname()[0]
if machine == 'Darwin':
home_dir = '/Volumes/Urban'
elif machine == 'Linux':
home_dir = '/run/user/1000/gvfs/smb-share:server=192.168.3.11,share=urban'
elif machine == 'Windows':
home_dir = os.path.join('N:', os.sep, 'urban')
else:
print("OS not recognized. \nPlease see Nate for correction.")
project_dir = os.path.join(home_dir, 'Huang', 'OSN_OMPvGg8_MTC')
figure_dir = os.path.join(project_dir, 'figures')
table_dir = os.path.join(project_dir, 'tables')
data_dir = os.path.join(project_dir, 'data')
''' ##########################################################################
This is all the analysis, figures, saving
Read in file metadata, open file from igor, convert to pandas
##############################################################################
'''
# grab all files in table_dir
file_list = os.listdir(table_dir)
trace_files = []
cell_ids = []
for file in file_list:
if 'timeseries' in file:
trace_files.append(file)
cell_id = file.split('_')[1] + '_' + file.split('_')[2]
cell_ids.append(cell_id)
else:
continue
traces_df = pd.DataFrame({'file name': trace_files, 'cell id': cell_ids})
# grab data_notes to select out by cell type
analyzed_data_notes = pd.read_csv(os.path.join(table_dir +'analyzed_data_notes.csv'), index_col=0)
mc_df = analyzed_data_notes[analyzed_data_notes['Cell type'] == 'MC']
# pull out gg8 cells
mc_gg8_df = mc_df[mc_df['Genotype'] == 'Gg8']
mc_gg8_list = mc_gg8_df['Cell name'].to_list()
mc_gg8_list = [name.split('_')[0] + '_' + name.split('_')[1] for name in mc_gg8_list]
mc_gg8_df = pd.DataFrame(mc_gg8_list, columns=['cell id'])
# pull out omp cells
mc_omp_df = mc_df[mc_df['Genotype'] == 'OMP']
mc_omp_list = mc_omp_df['Cell name'].to_list()
mc_omp_list = [name.split('_')[0] + '_' + name.split('_')[1] for name in mc_omp_list]
mc_omp_df = pd.DataFrame(mc_omp_list, columns=['cell id'])
# make list of Gg8 MCs
gg8_mcs = pd.merge(traces_df, mc_gg8_df)
gg8_mc_list = gg8_mcs['file name'].to_list()
# make list of OMP MCs
omp_mcs = pd.merge(traces_df, mc_omp_df)
omp_mc_list = omp_mcs['file name'].to_list()
# create empty dataframes for gg8 and omp cells
gg8_cells = pd.DataFrame()
omp_cells = pd.DataFrame()
# loop through all files, extract data and add to appropriate dataframes
for file in gg8_mc_list:
# open file and extract data into a new dataframe
mean_trace = pd.read_csv(os.path.join(table_dir, file), header=None)
gg8_cells = pd.concat([gg8_cells, mean_trace], axis=1, ignore_index=True)
for file in omp_mc_list:
# open file and extract data into a new dataframe
mean_trace = pd.read_csv(os.path.join(table_dir, file), header=None)
omp_cells = pd.concat([omp_cells, mean_trace], axis=1, ignore_index=True)
# Make separate time series for Gg8 example MC cell control and drug traces
gg8_example_ctrl = pd.DataFrame()
gg8_example_drug =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from copy import deepcopy
import numpy as np
import pandas as pd
import networkx as nx
import statsmodels.formula.api as smf
import statsmodels.api as sm
from scipy.cluster.vq import kmeans, whiten, vq
from gmeterpy.core.readings import Readings
from gmeterpy.core.adjustment import AdjustmentResults
from gmeterpy.core.dmatrices import (dmatrix_ties,
dmatrix_relative_gravity_readings)
def _closures(df, root=None):
"""Closures analysis in the network.
"""
network = nx.from_pandas_edgelist(df, 'from', 'to',
edge_attr='delta_g',
create_using=nx.DiGraph())
basis = nx.cycle_basis(network.to_undirected(), root=root)
out = []
for closure in basis:
closure_sum = 0
for node1, node2 in zip(closure, closure[1:] + closure[:1]):
if network.has_edge(node1, node2):
dg = network[node1][node2]['delta_g']
else:
dg = -network[node2][node1]['delta_g']
closure_sum += dg
out.append((closure, round(closure_sum, 4)))
return out
class RelativeReadings(Readings):
def __init__(self, *args, **kwargs):
auto_sid = kwargs.pop('auto_sid', False)
self.auto_setup_id = kwargs.pop('auto_setup_id', False)
nos = kwargs.pop('number_of_stations', None)
super().__init__(*args, **kwargs)
if auto_sid and nos is not None:
self.auto_station_id(nos)
self.setup_id()
# TODO: auto_loop
if 'loop' not in self._data.columns:
self._data['loop'] = 1
def stations(self):
return self.data.name.unique()
def rgmeters(self):
return self.data.meter_sn.unique()
def auto_sid(self, number_of_stations):
whitened = whiten(np.asarray(self.data['g_result']))
codebook, _ = kmeans(whitened, number_of_stations, iter=100)
code, _ = vq(whitened, np.sort(codebook[::-1]))
self._data['sid'] = code
self.setup_id()
return self
def setup_id(self):
#TODO: by loop
idx = np.concatenate(([0], np.where(self.data['sid'][:-1].values !=
self.data['sid'][1:].values)[0] + 1,
[len(self.data)]))
rng = [(a, b) for a, b in zip(idx, idx[1:])]
setup = []
for i in range(len(rng)):
l, r = rng[i]
app = np.ones(r - l) * i
setup = np.append(setup, app)
self._data['setup'] = setup.astype('int') + 1
return self
def auto_loop(self):
raise NotImplementedError
@classmethod
def from_file(self, fname, **kwargs):
def parser(x): return datetime.datetime.strptime(
x, '%Y-%m-%d %H:%M:%S')
df = pd.read_csv(fname, delim_whitespace=True, parse_dates=[
['date', 'time']], index_col=0, date_parser=parser)
df.index.name = 'time'
return RelativeReadings(data=df)
def to_file(self, *args, **kwargs):
kwargs['before'] = ['sid', 'meter_sn']
kwargs['after'] = ['stdev']
super().to_file(*args, **kwargs)
def get_repeated_mask(self):
#TODO: return not only mask, but RelativeReadings
#TODO: by loop
data = self._data.copy()
rep = data.groupby('name').setup.unique().apply(len) > 1
rep = rep.reset_index()
rep.columns = ['name', 'in_repeated']
data = data.reset_index().merge(rep).set_index('time').sort_index()
mask = data.in_repeated.values
return mask
def dmatrices(self, w_col=None, **kwargs):
dm = dmatrix_relative_gravity_readings(self.data.copy(), **kwargs)
if w_col is not None:
wm = np.diag(self.data[w_col])
else:
wm = np.identity(len(dm))
y = np.asmatrix(self.data.g_result.copy()).T
return dm, wm, y
def adjust(self, gravity=True, drift_args={'drift_order':1},
sm_model=sm.RLM, sm_model_args={'M':sm.robust.norms.HuberT()},
**kwargs):
"""Least squares adjustment of the relative readings.
"""
# t0 = readings.data.jd.min()
# readings._data['dt0'] = readings.data.jd - t0
# design matrix
dm, _ , y = self.dmatrices(
gravity=gravity,
drift_args=drift_args,
**kwargs)
res = sm_model(y, dm, **sm_model_args).fit()
#readings.meta['proc']['t0'] = t0
#readings._meta.update({'proc': {
# 'drift_args' : drift_args}})
return RelativeReadingsResults(self, res)
class RelativeReadingsResults(AdjustmentResults):
def __init__(self, readings, results):
super().__init__(readings, results)
self.readings = self.model
#self.order = self.readings._meta['proc']['drift_order']
#self.scale = scale
#self.t0 = self.readings.data.jd.min()
#self.readings._data['dt0'] = self.readings.data.jd - self.t0
#self.readings._data['c_drift'] = np.around(
#self.drift(self.readings.data.dt0), 4)
#self.readings._data['resid'] = self.res.resid.values
#self.readings._data['weights'] = self.res.weights.values
def drift(self):
drift_params = self.res.params[
self.res.params.index.str.startswith('drift')]
coefs = np.append(self.res.params[-self.order:][::-1], 0)
return -np.poly1d(coefs, variable='t')
def has_ties(self):
if len(self.readings.stations()) < 2:
return False
else:
return True
def ties(self, ref=None, sort=False):
stations = self.readings.stations()
if not self.has_ties():
print('Warning: You have only one station. Nothing to tie with')
return Ties()
adjg = pd.DataFrame({
'g': self.res.params[stations],
'stdev': self.res.bse[stations]
})
if sort:
if isinstance(sort, bool):
adjg = adjg.sort_index()
elif isinstance(sort, list):
adjg = adjg.loc[sort]
if ref is None:
from_st = adjg.index.values[:-1]
to_st = adjg.index.values[1:]
delta_g = (adjg.g.shift(-1) - adjg.g).values[:-1]
elif isinstance(ref, str):
if ref not in stations:
raise Exception('Station {} does not exist.'.format(ref))
else:
from_st = ref
to_st = adjg[adjg.index != ref].index.values
delta_g = (adjg.loc[to_st].g - adjg.loc[from_st].g).values
elif isinstance(ref, list):
from_st, to_st = [p for p in zip(*ref)]
delta_g = [adjg.loc[p2].g - adjg.loc[p1].g for p1,
p2 in zip(from_st, to_st)]
ties = pd.DataFrame({
'from': from_st,
'to': to_st,
'delta_g': delta_g,
})
ties['date'] = self.readings.data.index.date[0].strftime('%Y-%m-%d')
ties['meter_sn'] = self.readings.data.meter_sn.unique()[0]
ties['operator'] = self.readings.data.operator.unique()[0]
count = self.readings.data.groupby('name').setup.unique()
for index, row in ties.iterrows():
name1 = row['from']
name2 = row['to']
var1 = self.res.bse[name1]**2
var2 = self.res.bse[name2]**2
covar = self.res.cov_params()[name1][name2]
stdev = np.sqrt(var1 + var2 - 2 * covar)
ties.loc[index, 'stdev'] = stdev
ties.loc[index, 'n'] = min(len(count[name2]), len(count[name1]))
return Ties(ties)
def report(self):
out = ''
meter = self.readings.rgmeters()[0]
out += 'Meter: '
out += str(meter) + '\n'
out += '== Parameters ==\n'
out += 'Truncate@start: '
out += str(self.readings._proc['truncate_before'])
out += '\nTruncate@end: '
out += str(self.readings._proc['truncate_after']) + '\n'
out += self.res.summary2().tables[0].to_string(index=False,
header=False)
out += '\n== Results ==\n'
out += self.res.summary2().tables[1].iloc[:, :2].to_string()
out += '\n== Covariance matrix ==\n'
pd.options.display.float_format = '{:.4E}'.format
out += self.res.cov_params().to_string()
return out
class Ties:
def __init__(self, df=None):
self.print_cols = ['from', 'to', 'date',
'meter_sn', 'operator', 'delta_g', 'stdev']
if df is not None:
self._data = df
else:
self._data = pd.DataFrame(columns=self.print_cols)
#df['meter_sn'] = df.meter_sn.astype(str)
# sort from and to
from_to = self._data[['from', 'to']].values
data = self._data[(from_to != np.sort(from_to))[:, 0]]
self._data.drop(data.index, inplace=True)
data = data.rename(index=str, columns={'from': 'to', 'to': 'from'})
data['delta_g'] = -data.delta_g
self._data = self._data.append(data, sort=True)[
self.print_cols].sort_values(['from', 'to'])
def copy(self):
return deepcopy(self)
@property
def data(self):
return self._data
@classmethod
def from_file(self, fname):
df = pd.read_csv(fname, delim_whitespace=True, parse_dates=[2])
return Ties(df=df)
def to_file(self, fname='ties.txt'):
pd.options.display.float_format = '{:.4f}'.format
with open(fname, 'w') as f:
f.write(self.__str__() + '\n')
@classmethod
def load_from_path(self, path, pattern='ties*txt'):
import os
import fnmatch
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import sys
import glob
import os
import re
import numpy as np
import logging
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format='[%(asctime)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
#inside pathx (MD)
def time_freq_filter(filex,complexName,per):
pathx = os.getcwd()
file = os.path.basename(filex)
fName = complexName
bondtype = file.split(".csv")[0].split("_merged_")[1]
first = pd.read_csv(filex)
os.chdir(pathx)
if not os.path.exists(f'{complexName}/04_time_freq_filter'):
os.makedirs(f'{complexName}/04_time_freq_filter', exist_ok=True)
pathxx=f'{pathx}/{complexName}/04_time_freq_filter'
os.chdir(pathxx)
pathy=pathxx+"/"+str(per)+"_freq_filtered"
if not os.path.exists(str(per)+"_freq_filtered"):
os.makedirs(str(per)+"_freq_filtered", exist_ok=True)
os.chdir(pathy)
if first.empty:
pathz = pathy + "/" + str(per) + "_freq"
if not os.path.exists(str(per) + "_freq"):
os.makedirs(str(per) + "_freq")
os.chdir(pathz)
morefirstxy = pd.DataFrame(columns=["donor_acceptor","NumSpp","total","percentage"])
morefirstxy.to_csv (pathz+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq.csv", index=None)
os.chdir("..")
if not os.path.exists(str(per)+"_freq_perres"):
os.makedirs(str(per)+"_freq_perres")
pathq=pathy+"/"+str(per)+"_freq_perres"
os.chdir(pathq)
first_perres=pd.DataFrame(columns=['itype', 'donor_chain', 'acceptor_chain', 'donor_resnm', 'acceptor_resnm',
'donor_resid','acceptor_resid', 'donor_atom', 'acceptor_atom','chain_type',
"prot_or_dna",'specificity',"time"])
first_perres.to_csv (pathq+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq_perres.csv", index=None)
else:
#fIRST
logging.info('Finding percentages: {}'.format(fName))
firstx = []
for adx in first.donor_acceptor.unique () :
bbx = first[first["donor_acceptor"] == adx]
firstx.append([adx,
bbx.time.unique().size/first.time.unique().size*100])
firstxy = pd.DataFrame(firstx)
firstxy.columns = ["donor_acceptor","percentage"]
logging.info('Writing to file percentage: {}'.format(fName))
morefirstxy = firstxy[firstxy.percentage > float(per)]
if len(morefirstxy.donor_acceptor) == 0:
pathz = pathy + "/" + str(per) + "_freq"
if not os.path.exists(str(per) + "_freq"):
os.makedirs(str(per) + "_freq")
os.chdir(pathz)
morefirstxy = pd.DataFrame(columns=firstxy.columns)
morefirstxy.to_csv (pathz+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq.csv", index=None)
os.chdir("..")
if not os.path.exists(str(per) + "_freq_perres"):
os.makedirs(str(per) + "_freq_perres")
pathq = pathy + "/" + str(per) + "_freq_perres"
os.chdir(pathq)
first_perres= pd.DataFrame(columns=first.columns)
first_perres.to_csv(pathq + "/" + fName + "_" + bondtype + "_" + str(per) + "_freq_perres.csv", index=None)
else:
pathz = pathy + "/" + str(per) + "_freq"
if not os.path.exists(str(per) + "_freq"):
os.makedirs(str(per) + "_freq")
os.chdir(pathz)
morefirstxy.to_csv (pathz+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq.csv", index=None)
logging.info('Writing to file list: {}'.format(fName))
first_perres = pd.DataFrame()
for da in morefirstxy.donor_acceptor.unique():
df = first[first.donor_acceptor == da]
first_perres=first_perres.append(df)
first_perres.sort_values(by="time",inplace=True)
first_perres.reset_index(drop=True)
os.chdir("..")
if not os.path.exists(str(per)+"_freq_perres"):
os.makedirs(str(per)+"_freq_perres")
pathq=pathy+"/"+str(per)+"_freq_perres"
os.chdir(pathq)
first_perres.to_csv (pathq+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq_perres.csv", index=None)
def make_freq_folders(pathy,per):
"""
Creates folders to write and read common and complex-specific bonds within 05_compare_cx_spp folder
:param pathy: path to 05_compare_cx_spp
:param per: time percentage
"""
import os
os.chdir(pathy)
pathz=pathy+"/"+str(per)+"_freq_filtered"
if not os.path.exists(str(per)+"_freq_filtered"):
os.makedirs(str(per)+"_freq_filtered",exist_ok=True)
for fold in ["_freq","_freq_perres"]:
os.chdir(pathz)
#to add freq
pathq=pathz+"/"+str(per)+fold
if not os.path.exists(str(per)+fold):
os.makedirs(str(per)+fold,exist_ok=True)
os.chdir(pathq)
pathq_common=pathq+"/common"
if not os.path.exists("common"):
os.makedirs("common",exist_ok=True)
os.chdir(pathq)
pathq_spp=pathq+"/complex_specific"
if not os.path.exists("complex_specific"):
os.makedirs("complex_specific",exist_ok=True)
def get_paths(pathy,per,fold,com_spp):
import os
os.chdir(pathy)
PathToWrite = pathy + "/" + per + "_" + "freq_filtered/" + per + fold + "/" + com_spp
return PathToWrite
def compare_bonds(complexName,per):
pathx = os.getcwd()
fName = complexName[0]
sName = complexName[1]
file_lists_freq_fName = glob.glob(f'{pathx}/{fName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}_freq/*csv')
file_lists_freq_sName = glob.glob(f'{pathx}/{sName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}_freq/*csv')
file_lists_freq = file_lists_freq_fName + file_lists_freq_sName
ToCompare = {}
for filex in file_lists_freq:
file = os.path.basename(filex)
if fName in filex:
Name = fName
else:
Name = sName
bondtype = file.split(f'{Name}_')[1].split("_")[0]
if bondtype == "ring":
bondtype = "ring_stacking"
first = pd.read_csv(filex)
if bondtype in ToCompare.keys():
ToCompare[bondtype].update({Name: first})
else:
ToCompare.update({bondtype: {Name: first}})
for bondtype in ToCompare.keys():
os.chdir(pathx)
pathy = f'{pathx}/{fName}/05_compare_complex'
if not os.path.exists(f'{pathx}/{fName}/05_compare_complex'):
os.makedirs(f'{pathx}/{fName}/05_compare_complex',exist_ok=True)
os.chdir(pathy)
pathz = f'{pathx}/{sName}/05_compare_complex'
if not os.path.exists(f'{pathx}/{sName}/05_compare_complex'):
os.makedirs(f'{pathx}/{sName}/05_compare_complex',exist_ok=True)
os.chdir(pathz)
make_freq_folders(pathy, per)
fold="_freq"
morefirstxy = ToCompare[bondtype][fName]
fold="_freq_perres"
patha=f'{pathx}/{fName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}{fold}'
first = pd.read_csv(patha+"/"+fName+"_"+bondtype+"_"+str(per)+fold+".csv")
#SECOND
make_freq_folders(pathz, per)
fold="_freq"
moresecxy = ToCompare[bondtype][sName]
logging.info("sName : {}".format(sName))
fold="_freq_perres"
patha=f'{pathx}/{sName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}{fold}'
sec = pd.read_csv(patha+"/"+sName+"_"+bondtype+"_"+str(per)+fold+".csv")
#find bonds specific to first one
logging.info("Specific to {}".format(fName))
i = 0
spp_first= pd.DataFrame(columns=morefirstxy.columns)
common_first= pd.DataFrame(columns=morefirstxy.columns)
for item in morefirstxy.donor_acceptor:
item_swapped = item.split(":")[1]+":"+item.split(":")[0]
if item in moresecxy.donor_acceptor.unique():
common_first = common_first.append(pd.DataFrame(morefirstxy.iloc[i,:]).transpose())
elif item_swapped in moresecxy.donor_acceptor.unique():
common_first = common_first.append(pd.DataFrame(morefirstxy.iloc[i,:]).transpose())
else:
spp_first = spp_first.append(pd.DataFrame(morefirstxy.iloc[i,:]).transpose())
i = i+1
spp_first.sort_values(by="donor_acceptor", ascending=False)
spp_first.reset_index(drop=True,inplace=True)
fold="_freq"
com_spp="complex_specific"
pathq_spp=get_paths(pathy,str(per),fold,com_spp)
spp_first.to_csv (pathq_spp+"/"+fName+"_"+bondtype+"_compared_spec.csv", index=False)
common_first.sort_values(by="donor_acceptor", ascending=False)
common_first.reset_index(drop=True,inplace=True)
com_spp="common"
pathq_common=get_paths(pathy,str(per),fold,com_spp)
common_first.to_csv (pathq_common+"/"+fName+"_"+bondtype+"_compared_common.csv", index=False)
#find bonds specific to second one
logging.info("Specific to {}".format(sName))
i = 0
spp_sec= pd.DataFrame(columns=moresecxy.columns)
common_sec= pd.DataFrame(columns=moresecxy.columns)
for item in moresecxy.donor_acceptor:
item_swapped = item.split(":")[1] + ":" + item.split(":")[0]
if item in morefirstxy.donor_acceptor.unique():
common_sec = common_sec.append(
|
pd.DataFrame(moresecxy.iloc[i,:])
|
pandas.DataFrame
|
import os
import re
import config
import constants
import transform
import numpy as np
import pandas as pd
import matplotlib as mpl
from scipy.spatial import distance_matrix
import plotly as py
files_location = config.data_source_file_location
files = os.listdir(files_location)
def extract_data_ci(years):
pass
def extract_data_pi(years):
pass
def extract_data_gi(years):
pass
###############################################
# Read in the datasets, this would be a good future location
# to abstract away from 2016 and have a general year or subset here.
# Or to create a function which would be given the desired frame
# to be processed.
def extract_all_lazy():
""" This is a temporary file which utilizes the 2016 data.
In the future this should use database connection and call directly from the DB.
Returns a dictionary of data frames that have been moderately transformed. Moderately transformed is subsetting rather than direct manipulation. """
#Construct filepaths: Data COMP_INFO_1
data_ci1_name = "DATA_2016_COMP_INFO_1.csv"
data_ci1_fullname = os.path.join(files_location, data_ci1_name)
#Data COMP_INFO_2
data_ci2_name = "DATA_2016_COMP_INFO_2.csv"
data_ci2_fullname = os.path.join(files_location, data_ci2_name)
#Data PROPERTY INFO
data_pi_name = "DATA_2016_PROPERTY_INFO_ST.csv"
data_pi_fullname = os.path.join(files_location, data_pi_name)
#Data General Info
data_gi_name = "DATA_2016_GENERAL_INFO.csv"
data_gi_fullname = os.path.join(files_location, data_gi_name)
#Read & Process COMP_INFO
data_ci1 =
|
pd.read_csv(data_ci1_fullname, skiprows=2, usecols = constants.keep_columns_CI, encoding='ISO-8859-1')
|
pandas.read_csv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.