prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import numpy as np
import re
from pandas import DataFrame
NWR = pd.read_excel('NWR_ALDT.xls', sheet_name='ICT')
# print(NWR.columns)
# con=(NWR['(1) ROUTE', '(21) ROUTES LOCKED'])
a: DataFrame = pd.DataFrame(NWR[['(1) ROUTE', '(21) ROUTES LOCKED']])
b = pd.DataFrame(NWR['(1) ROUTE'])
e= pd.DataFrame('ASSIGN ' + b + '.TCS.SI')
c =
|
pd.DataFrame(NWR['(21) ROUTES LOCKED'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import yfinance as yf
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import os
import math
import matplotlib.pylab as plt
import matplotlib
from Machine_Learning_for_Asset_Managers import ch2_fitKDE_find_best_bandwidth as best_bandwidth
from Machine_Learning_for_Asset_Managers import ch2_marcenko_pastur_pdf as mp
from Machine_Learning_for_Asset_Managers import ch2_monte_carlo_experiment as mc
from Machine_Learning_for_Asset_Managers import ch4_optimal_clustering as oc
import onc as onc
from Machine_Learning_for_Asset_Managers import ch5_financial_labels as fl
from Machine_Learning_for_Asset_Managers import ch7_portfolio_construction as pc
#import mlfinlab.trend_scanning as ts
#import mlfinlab.nco as nco
#import mlfinlab as ml # used for testing code
#from mlfinlab.portfolio_optimization.mean_variance import MeanVarianceOptimisation
#from mlfinlab.portfolio_optimization.returns_estimators import ReturnsEstimators
#Resources:
#Random matrix theory: https://calculatedcontent.com/2019/12/03/towards-a-new-theory-of-learning-statistical-mechanics-of-deep-neural-networks/
#Review: [Book] Commented summary of Machine Learning for Asset Managers by <NAME>
#https://gmarti.gitlab.io/qfin/2020/04/12/commented-summary-machine-learning-for-asset-managers.html
#Chapter 2: This chapter essentially describes an approach that Bouchaud and his crew from the CFM have
#pioneered and refined for the past 20 years. The latest iteration of this body of work is summarized in
#Joel Bun’s Cleaning large correlation matrices: Tools from Random Matrix Theory.
#https://www.sciencedirect.com/science/article/pii/S0370157316303337
#Condition number: https://dominus.ai/wp-content/uploads/2019/11/ML_WhitePaper_MarcoGruppo.pdf
# Excersize 2.9:
# 2. Using a series of matrix of stock returns:
# a) Compute the covariance matrix.
# What is the condition number of the correlation matrix
# b) Compute one hundredth efficient frontiers by drawing one hundred
# alternative vectors of expected returns from a Normal distribution
# with mean 10% and std 10%
# c) Compute the variance of the errors against the mean efficient frontier.
def get_OL_tickers_close(T=936, N=234):
# N - num stocks in portfolio, T lookback time
ol = pd.read_csv('csv/ol_ticker.csv', sep='\t', header=None)
ticker_names = ol[0]
closePrice = np.empty([T, N])
covariance_matrix = np.empty([T, N])
portfolio_name = [ [ None ] for x in range( N ) ]
ticker_adder = 0
for i in range(0, len(ticker_names)): #len(ticker_names)): # 46
ticker = ticker_names[i]
print(ticker)
ol_ticker = ticker + '.ol'
df = yf.Ticker(ol_ticker)
#'shortName' in df.info and
try:
ticker_df = df.history(period="7y")
if ticker=='EMAS': print("****EMAS******")
if ticker=='AVM': print("****AVM*********")
if ticker_df.shape[0] > T and ticker!='EMAS' and ticker != 'AVM': # only read tickers with more than 30 days history
#1.Stock Data
closePrice[:,ticker_adder] = ticker_df['Close'][-T:].values # inserted from oldest tick to newest tick
portfolio_name[ticker_adder] = ol_ticker
ticker_adder += 1
else:
print("no data for ticker:" + ol_ticker)
except ValueError:
print("no history:"+ol_ticker)
return closePrice, portfolio_name
def denoise_OL(S, do_plot=True):
np.argwhere( np.isnan(S) )
# cor.shape = (1000,1000). If rowvar=1 - row represents a var, with observations in the columns.
cor = np.corrcoef(S, rowvar=0)
eVal0 , eVec0 = mp.getPCA( cor )
print(np.argwhere(np.isnan(np.diag(eVal0))))
# code snippet 2.4
T = float(S.shape[0])
N = S.shape[1]
q = float(S.shape[0])/S.shape[1] #T/N
eMax0, var0 = mp.findMaxEval(np.diag(eVal0), q, bWidth=.01)
nFacts0 = eVal0.shape[0]-np.diag(eVal0)[::-1].searchsorted(eMax0)
if do_plot:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(np.diag(eVal0), density = True, bins=100) #, normed = True) #normed = True,
pdf0 = mp.mpPDF(var0, q=S.shape[0]/float(S.shape[1]), pts=N)
pdf1 = mp.fitKDE( np.diag(eVal0), bWidth=.005) #empirical pdf
#plt.plot(pdf1.keys(), pdf1, color='g') #no point in drawing this
plt.plot(pdf0.keys(), pdf0, color='r')
plt.show()
# code snippet 2.5 - denoising by constant residual eigenvalue
corr1 = mp.denoisedCorr(eVal0, eVec0, nFacts0)
eVal1, eVec1 = mp.getPCA(corr1)
return eVal0, eVec0, eVal1, eVec1, corr1, var0
#consider using log-returns
def calculate_returns( S, percentageAsProduct=False ):
ret = np.zeros((S.shape[0]-1, S.shape[1]))
cum_sums = np.zeros(S.shape[1])
for j in range(0, S.shape[1]):
cum_return = 0
S_ret = np.zeros(S.shape[0]-1)
for i in range(0,S.shape[0]-1):
if percentageAsProduct==True:
S_ret[i] = 1+((S[i+1,j]-S[i,j])/S[i,j])
else:
S_ret[i] = ((S[i+1,j]-S[i,j])/S[i,j])
cum_return = np.prod(S_ret)-1
cum_sums[j] = cum_return
ret[:, j] = S_ret
return ret, cum_sums
def getVolatility(S): #std of instruments
return [np.std(S[:,i]) for i in range(0, S.shape[1])]
def test_exception_in_plotting_efficient_frontier(S_value):
# pylint: disable=invalid-name, protected-access
"""
Test raising of exception when plotting the efficient frontier.
"""
mvo = MeanVarianceOptimisation()
pdPrice = pd.DataFrame(S_value)
pdPrice.index = pd.RangeIndex(start=0, stop=6, step=1)
dates = ['2019-01-01','2019-02-01','2019-03-01','2019-04-01','2019-05-01','2019-06-01']
pdPrice['Datetime'] = pd.to_datetime(dates)
pdPrice.set_index('Datetime')
expected_returns = ReturnsEstimators().calculate_mean_historical_returns(asset_prices=pdPrice, resample_by=None) #'W')
covariance = ReturnsEstimators().calculate_returns(asset_prices=pdPrice, resample_by=None).cov()
plot = mvo.plot_efficient_frontier(covariance=covariance, max_return=1.0, expected_asset_returns=expected_returns)
assert len(plot._A) == 41
plot.savefig('books_read.png')
print("read books")
# Chapter 7 - apply the Nested Clustered Optimization (NCO) algorithm
def testNCO():
N = 5
T = 5
S_value = np.array([[1., 2,3, 4,5],
[1.1,3,2, 3,5],
[1.2,4,1.3,4,5],
[1.3,5,1, 3,5],
[1.4,6,1, 4,5.5],
[1.5,7,1, 3,5.5]])
S_value[:,1] =1
S_value[5,1] =1.1
S, _ = calculate_returns(S_value)
_, instrument_returns = calculate_returns(S_value, percentageAsProduct=True)
np.testing.assert_almost_equal(S, pd.DataFrame(S_value).pct_change().dropna(how="all"))
mu1 = None
cov1_d = np.cov(S ,rowvar=0, ddof=1)
#test baseClustering
corr1 = mp.cov2corr(cov1_d)
a,b,c = nco.NCO()._cluster_kmeans_base(pd.DataFrame(corr1))
d,e,f = oc.clusterKMeansBase(pd.DataFrame(corr1))
#b={0: [0, 1, 2], 1: [3, 4]}
#e={0: [0, 3, 4], 1: [1, 2]}
min_var_markowitz = mc.optPort(cov1_d, mu1).flatten()
#compare min_var_markowitz with mlfinlab impl
#ml.
min_var_NCO = pc.optPort_nco(cov1_d, mu1, max(int(cov1_d.shape[0]/2), 2)).flatten()
mlfinlab_NCO= nco.NCO().allocate_nco(cov1_d, mu1, max(int(cov1_d.shape[0]/2), 2)).flatten()
cov1_d = np.cov(S,rowvar=0, ddof=1)
mlfinlab_NCO= nco.NCO().allocate_nco(cov1_d, mu1, int(cov1_d.shape[0]/2)).flatten()
expected_return_markowitz = [min_var_markowitz[i]*instrument_returns[i] for i in range(0,cov1_d.shape[0])]
e_m = sum(expected_return_markowitz)
expected_return_NCO = [min_var_NCO[i]*instrument_returns[i] for i in range(0,cov1_d.shape[0])]
e_NCO = sum(expected_return_markowitz)
vol = getVolatility(S_value)
m_minVol = [min_var_markowitz[i]*vol[i] for i in range(0, cov1_d.shape[0])]
NCO_minVol = [mlfinlab_NCO[i]*vol[i] for i in range(0, cov1_d.shape[0])]
if __name__ == '__main__':
testNCO()
N = 333 #3
T = 936
S_value = np.loadtxt('csv/ol184.csv', delimiter=',')
if S_value.shape[0] < 1 or not os.path.exists('csv/portfolio_name.csv'):
S_value, portfolio_name = get_OL_tickers_close(T, N)
np.savetxt('csv/ol184.csv', S_value, delimiter=',')
np.savetxt('csv/portfolio_name.csv', portfolio_name, delimiter=',', fmt='%s')
portfolio_name = pd.read_csv('csv/portfolio_name.csv', sep='\t', header=None).values
lastIndex = 173
S_value = S_value[:,0:lastIndex] # S = S[:,6:9]
portfolio_name = portfolio_name[0:lastIndex] #portfolio_name = portfolio_name[6:9]
# use matrix of returns to calc correlation
S, instrument_returns = calculate_returns(S_value)
_, instrument_returns = calculate_returns(S_value, percentageAsProduct=True)
print(np.asarray(portfolio_name)[np.argsort(instrument_returns)]) #prints performance ascending
#calculate_correlation(S)
eVal0, eVec0, denoised_eVal, denoised_eVec, denoised_corr, var0 = denoise_OL(S)
detoned_corr = mp.detoned_corr(denoised_corr, denoised_eVal, denoised_eVec, market_component=1)
detoned_eVal, detoned_eVec = mp.getPCA(detoned_corr)
denoised_eigenvalue = np.diag(denoised_eVal)
eigenvalue_prior = np.diag(eVal0)
plt.plot(range(0, len(denoised_eigenvalue)), np.log(denoised_eigenvalue), color='r', label="Denoised eigen-function")
plt.plot(range(0, len(eigenvalue_prior)), np.log(eigenvalue_prior), color='g', label="Original eigen-function")
plt.xlabel("Eigenvalue number")
plt.ylabel("Eigenvalue (log-scale)")
plt.legend(loc="upper right")
plt.show()
#from code snippet 2.10
detoned_cov = mc.corr2cov(detoned_corr, var0)
w = mc.optPort(detoned_cov)
print(w)
#min_var_port = 1./nTrials*(np.sum(w, axis=0))
#print(min_var_port)
#expected portfolio variance: W^T.(Cov).W
#https://blog.quantinsti.com/calculating-covariance-matrix-portfolio-variance/
minVarPortfolio_var = np.dot(np.dot(w.T, detoned_corr), w)
#Expected return: w.T . mu
# https://www.mn.uio.no/math/english/research/projects/focustat/publications_2/shatthik_barua_master2017.pdf p8
# or I.T.cov^-1.mu / I.T.cov^-1.I
#inv = np.linalg.inv(cov)
#e_r = np.dot(np.dot(ones.T, inv), mu) / np.dot(ones.T, np.dot(ones.T, inv))
#Chapter 4 optimal clustering
# recreate fig 4.1 colormap of random block correlation matrix
nCols, minBlockSize = 183, 2
print("minBlockSize"+str(minBlockSize))
corr0 = detoned_corr
corr1, clstrs, silh = oc.clusterKMeansTop(pd.DataFrame(detoned_corr)) #1: [18, 24, 57, 81, 86, 99, 112, 120, 134, 165]
tStatMeanDepth = np.mean([np.mean(silh[clstrs[i]]) / np.std(silh[clstrs[i]]) for i in clstrs.keys()])
print("tstat at depth:")
print(tStatMeanDepth)
corr1, clstrs, silh = oc.clusterKMeansTop(pd.DataFrame(detoned_corr)) #1: [18, 24, 57, 81, 86, 99, 112, 120, 134, 165]
tStatMeanDepth = np.mean([np.mean(silh[clstrs[i]]) / np.std(silh[clstrs[i]]) for i in clstrs.keys()])
print("tstat at depth:")
print(tStatMeanDepth)
raise SystemExit
#corr11, clstrs11, silh11 = onc.get_onc_clusters(pd.DataFrame(detoned_corr)) #test with mlfinlab impl: 1: [18, 24, 57, 81, 86, 99, 112, 120, 134, 165]
matplotlib.pyplot.matshow(corr11) #invert y-axis to get origo at lower left corner
matplotlib.pyplot.gca().xaxis.tick_bottom()
matplotlib.pyplot.gca().invert_yaxis()
matplotlib.pyplot.colorbar()
matplotlib.pyplot.show()
#Chapter 5 Financial labels
#Lets try trend-following on PHO
idxPHO =118
idxBGBIO = 29
idxWWI = 169
pho = S_value[:,idxBGBIO]
df0 = pd.Series(pho[-50:])
df1 = fl.getBinsFromTrend(df0.index, df0, [3, 10, 1]) # [3,10,1] = range(3,10)
tValues = df1['tVal'].values
lastTValue = []
for i in range(0, lastIndex):
pho = S_value[:, i]
df0 = pd.Series(pho[-50:])
df1 = fl.getBinsFromTrend(df0.index, df0, [3,10,1]) #[3,10,1] = range(3,10)
tValues = df1['tVal'].values
lastTValue.append(tValues[41])
np.argmax(lastTValue)
plt.scatter(df1.index, df0.loc[df1.index].values, c=tValues, cmap='viridis') #df1['tVal'].values, cmap='viridis')
plt.colorbar()
plt.show()
bgbio_df = yf.Ticker("BGBIO.ol")
bg_bio_ticker_df = bgbio_df.history(period="7y")
bgbio = bg_bio_ticker_df['Close']
df0 = pd.Series(bgbio[-200:])
df1 = fl.getBinsFromTrend(df0.index, df0, [3,20,1]) #[3,10,1] = range(3,10)
tValues = df1['tVal'].values
plt.scatter(df1.index, df0.loc[df1.index].values, c=tValues, cmap='viridis') #df1['tVal'].values, cmap='viridis')
plt.colorbar()
plt.show()
S, pnames = get_OL_tickers_close()
#get t-statistics from all instruments on OL
S, pnames = get_OL_tickers_close(T=200,N=237)
np.argwhere(np.isnan(S))
S[182, 110] = S[181,110]
#implementing from book
abc = [None for i in range(0,237)]
for i in range(0, 20):#len(pnames)):
instrument = S[:,i]
df0 =
|
pd.Series(instrument)
|
pandas.Series
|
from datetime import datetime
import re
import unittest
import nose
from nose.tools import assert_equal
import numpy as np
from pandas.tslib import iNaT
from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp
from pandas import compat
from pandas.compat import range, long, lrange, lmap, u
from pandas.core.common import notnull, isnull
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com._is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert(not is_seq(A()))
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(notnull(float_series), Series))
assert(isinstance(notnull(obj_series), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(isnull(float_series), Series))
assert(isinstance(isnull(obj_series), Series))
# call on DataFrame
df = DataFrame(np.random.randn(10, 5))
df['foo'] = 'bar'
result = isnull(df)
expected = result.apply(isnull)
tm.assert_frame_equal(result, expected)
def test_isnull_tuples():
result = isnull((1, 2))
exp = np.array([False, False])
assert(np.array_equal(result, exp))
result = isnull([(False,)])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([(1,), (2,)])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(('foo', 'bar'))
assert(not result.any())
result = isnull((u('foo'), u('bar')))
assert(not result.any())
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert(not result.any())
result = isnull([u('foo'), u('bar')])
assert(not result.any())
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert(mask[0])
assert(not mask[1:].any())
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
df.iloc[3:6,:] = np.nan
result = df.loc[4,'B'].value
assert(result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(tuple([slice(8,9)]),np.nan)
assert(isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').astype(np.int64))
def test_any_none():
assert(com._any_none(1, 2, 3, None))
assert(not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(com._all_not_none(1, 2, 3, 4))
assert(not com._all_not_none(1, 2, 3, None))
assert(not com._all_not_none(None, None, None, None))
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = com.pprint_thing(b, quote_strings=True)
assert_equal(res, repr(b))
res = com.pprint_thing(b, quote_strings=False)
assert_equal(res, b)
def test_rands():
r = com.rands(10)
assert(len(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = com.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(com.iterpairs(data))
assert(result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = com.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert(a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_is_list_like():
passes = ([], [1], (1,), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str)
fails = (1, '2', object())
for p in passes:
assert com.is_list_like(p)
for f in fails:
assert not com.is_list_like(f)
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
def test_ensure_platform_int():
# verify that when we create certain types of indices
# they remain the correct type under platform conversions
from pandas.core.index import Int64Index
# int64
x = Int64Index([1, 2, 3], dtype='int64')
assert(x.dtype == np.int64)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# int32
x = Int64Index([1, 2, 3], dtype='int32')
assert(x.dtype == np.int32)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# raise nose.SkipTest
# with tm.stdin_encoding(encoding=None):
# result = com.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
def test_is_re():
passes = re.compile('ad'),
fails = 'x', 2, 3, object()
for p in passes:
assert com.is_re(p)
for f in fails:
assert not com.is_re(f)
def test_is_recompilable():
passes = (r'a', u('x'), r'asdf', re.compile('adsf'),
u(r'\u2233\s*'), re.compile(r''))
fails = 1, [], object()
for p in passes:
assert com.is_re_compilable(p)
for f in fails:
assert not com.is_re_compilable(f)
class TestTake(unittest.TestCase):
# standard incompatible fill error
fill_error = re.compile("Incompatible type for fill_value")
_multiprocess_can_split_ = True
def test_1d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, 1]
out = np.empty(4, dtype=dtype)
com.take_1d(data, indexer, out=out)
expected = data.take(indexer)
|
tm.assert_almost_equal(out, expected)
|
pandas.util.testing.assert_almost_equal
|
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sn
import matplotlib.patches as mpatches
from matplotlib import rcParams
#from brokenaxes import brokenaxes
from natsort import index_natsorted, order_by_index
#sn.set_context("paper", font_scale = 2)
#AUX FUNC
def Vm_groupby(df, group_by, aggr):
df = df.groupby(group_by)
df = df.agg(aggr)
df = df.reset_index()
df = df.reindex(index=order_by_index(df.index, index_natsorted(df['TripID'], reverse=False)))
return df
####
#Loop VM - Single Plot + Compare Plot (BAR/BOXPLOT)
def prep_n_mig_LoopVM(plot_dict, df_lst_files):
df_aux_list = list()
for Class, df in zip(plot_dict['Classes'], df_lst_files):
dfaux = Vm_groupby(df, ['TripID', 'VmID'], {'Mig_ID':'count', 'tripdistance': 'first'})
dfaux.rename(columns={'TripID':'TripID', 'VmID':'VmID', 'Mig_ID':'Number of Migrations', 'tripdistance': 'tripdistance'},inplace=True)
#dfaux = Vm_groupby(dfaux, ['TripID'], {'Number of Migrations':'mean'})
dfaux.insert(0, 'Class', Class)
df_aux_list.append(dfaux)
dfconcat = pd.concat(df_aux_list)
return dfconcat
def n_mig_LoopVM(df):
fig, ax = plt.subplots()
#BOXPLOT
#ax.set_title("Number of Migrations by Trip " + title)
#sn.boxplot(x='TripID', y='Number of Migrations', hue='Class', palette=['C0', 'C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data = df, ax=ax)
#BAR
ax.set_title("Number of Migrations by Trip")
sn.barplot(x='TripID', y='Number of Migrations', hue='Class', palette=['C0', 'C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data=df, ax=ax)
ax.get_legend().remove()
ax.legend(loc='upper right')
ax.set_ylim(0, 25)
ax.set_xlabel('Trips')
ax.set_ylabel('Number of migrations')
return 1
def normalized_n_mig_LoopVM(df):
df["n_mig_km"] = ""
for i in range(df['TripID'].count()):
tripdistance = df['tripdistance'].values[i]
n_mig = df['Number of Migrations'].values[i]
normalized = n_mig / tripdistance
df['n_mig_km'].values[i] = normalized
#print(df)
fig, ax = plt.subplots()
#BOXPLOT
#ax.set_title("Number of Migrations by Trip " + title)
#sn.boxplot(x='TripID', y='Number of Migrations', hue='Class', palette=['C0', 'C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data = df, ax=ax)
#BAR
ax.set_title("Number of Migrations / km - by Trip")
sn.barplot(x='TripID', y='n_mig_km', hue='Class', palette=['C0', 'C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data=df, ax=ax)
ax.get_legend().remove()
ax.legend(loc='upper right')
ax.set_ylim(0, 1.4)
ax.set_xlabel('Trips')
ax.set_ylabel('Number of migrations / KM')
return 1
####
####
#Loop VM - Single Plot + Compare Plot
def prep_migtime_LoopVM(plot_dict, df_lst_files):
df_aux_list = list()
for Class, df in zip(plot_dict['Classes'], df_lst_files):
dfaux = Vm_groupby(df, ['TripID', 'VmID'], {'Mt_real':'sum', 'triptime': 'first'})
dfaux.insert(0, 'Class', Class)
df_aux_list.append(dfaux)
dfconcat = pd.concat(df_aux_list)
return dfconcat
def migtime_LoopVM(df):
fig, ax = plt.subplots()
ax.set_title("Time Spent Migrating vs Trip Time")
ax.scatter(df['TripID'], df['triptime'], color='black', label='Total Trip Time')
sn.boxplot(x='TripID', y='Mt_real', hue="Class", palette=['C0', 'C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data = df, ax=ax)
ax.set_xlabel('Trips')
ax.set_ylabel('Time in Seconds')
ax.legend()
return 1
def percentage_migtime_LoopVM(df):
df["Percentage_migtime"] = ""
for i in range(df['TripID'].count()):
triptime = df['triptime'].values[i]
Mt_real = df['Mt_real'].values[i]
percentage = (Mt_real * 100) / triptime
df['Percentage_migtime'].values[i] = percentage
#print(df)
fig, ax = plt.subplots()
ax.set_title("Time Spent Migrating vs Trip Time (Percentage)")
sn.boxplot(x='TripID', y='Percentage_migtime', hue="Class", palette=['C0', 'C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data = df, ax=ax)
ax.get_legend().remove()
ax.legend(loc='upper right')
ax.set_ylim(0, 70)
ax.set_xlabel('Trips')
ax.set_ylabel('Percentage')
return 1
####
####
#Loop VM - Single Plot + Compare Plot
def prep_downtime_LoopVM(plot_dict, df_lst_files):
df_aux_list = list()
for Class, df in zip(plot_dict['Classes'], df_lst_files):
dfaux = Vm_groupby(df, ['TripID', 'VmID'], {'DT_real':'sum', 'triptime': 'first'})
dfaux.insert(0, 'Class', Class)
df_aux_list.append(dfaux)
dfconcat = pd.concat(df_aux_list)
return dfconcat
def downtime_LoopVM(df):
fig, ax = plt.subplots()
ax.set_title("Downtime by Trip")
sn.boxplot(x='TripID', y='DT_real', hue="Class", palette=['C0', 'C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data = df, ax=ax)
ax.set_xlabel('Trips')
ax.set_ylabel('Time in milliseconds')
ax.legend()
return 1
def percentage_downtime_LoopVM(df):
df["Percentage_downtime"] = ""
for i in range(df['TripID'].count()):
triptime = df['triptime'].values[i] * pow(10,3)
DT_real = df['DT_real'].values[i]
percentage = (DT_real * 100) / triptime
df['Percentage_downtime'].values[i] = percentage
#print(df)
fig, ax = plt.subplots()
ax.set_title("Downtime by Trip (Percentage)")
sn.boxplot(x='TripID', y='Percentage_downtime', hue="Class", palette=['C0', 'C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data = df, ax=ax)
ax.get_legend().remove()
ax.legend(loc='upper right')
ax.set_ylim(0, 2)
ax.set_xlabel('Trips')
ax.set_ylabel('Percentage')
return 1
####
####
#Loop VM - Single Plot + Compare Plot
def prep_client_latency_LoopVM(plot_dict, df_lst_files):
df_aux_list = list()
for Class, df in zip(plot_dict['Classes'], df_lst_files):
dfaux = Vm_groupby(df, ['TripID', 'VmID'], {'Latency':'mean'})
dfaux.insert(0, 'Class', Class)
df_aux_list.append(dfaux)
dfconcat =
|
pd.concat(df_aux_list)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_PO Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #t-C #source: van Beijma et al. (2018)
initAGB_min = 233-72 #t-C
initAGB_max = 233 + 72 #t-C
#parameters for oil palm plantation. Source: Khasanah et al. (2015)
tf_palmoil = 26 #years
a_nucleus = 2.8167
b_nucleus = 6.8648
a_plasma = 2.5449
b_plasma = 5.0007
c_cont_po_nucleus = 0.5448 #fraction of carbon content in biomass
c_cont_po_plasma = 0.5454 #fraction of carbon content in biomass
tf = 201 #years
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
df3nu =
|
pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
|
pandas.read_excel
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
# function for loading data from disk
def load_data():
"""
this function is responsible for loading traing data from disk.
and performs some basic opertaions like
- one-hot encoding
- feature scaling
- reshaping data
Parameters:
(no-parameters)
Returns:
X : numpy array (contains all features of training data)
y : numpy array (contains all targets of traing data)
"""
path = "../data/train.csv"
if(not Path(path).is_file()):
print("[util]: train data not found at '",path,"'")
#quit()
print("[util]: Loading '",path,"'")
train = pd.read_csv(path)
y = np.array(
|
pd.get_dummies(train['label'])
|
pandas.get_dummies
|
import pandas as pd
import numpy as np
def getDailyVol(close,
span0=100):
"""SNIPPET 3.1 DAILY VOLATILITY ESTIMATES
Daily vol reindexed to close
"""
df0=close.index.searchsorted(close.index-pd.Timedelta(days=1))
df0=df0[df0>0]
df0=(pd.Series(close.index[df0-1],
index=close.index[close.shape[0]-df0.shape[0]:]))
try:
df0=close.loc[df0.index]/close.loc[df0.values].values-1 # daily rets
except Exception as e:
print(f'error: {e}\nplease confirm no duplicate indices')
df0=df0.ewm(span=span0).std().rename('dailyVol')
return df0
def applyPtSlOnT1(close,
events,
ptSl,
molecule):
"""SNIPPET 3.2 TRIPLE-BARRIER LABELING METHOD
Apply stop loss/profit taking, if it takes place before t1 (end of event)
"""
events_=events.loc[molecule]
out=events_[['t1']].copy(deep=True)
if ptSl[0]>0: pt=ptSl[0]*events_['trgt']
else: pt=pd.Series(index=events.index) # NaNs
if ptSl[1]>0: sl=-ptSl[1]*events_['trgt']
else: sl=pd.Series(index=events.index) # NaNs
for loc,t1 in events_['t1'].fillna(close.index[-1]).iteritems():
df0=close[loc:t1] # path prices
df0=(df0/close[loc]-1)*events_.at[loc,'side'] # path returns
out.loc[loc,'sl']=df0[df0<sl[loc]].index.min() # earliest stop loss
out.loc[loc,'pt']=df0[df0>pt[loc]].index.min() # earliest profit taking
return out
def getEvents(close,
tEvents,
ptSl,
trgt,
minRet,
numThreads,
t1=False,
side=None):
"""SNIPPET 3.3 GETTING THE TIME OF FIRST TOUCH
SNIPPET 3.6 EXPANDING getEvents TO INCORPORATEMETA-LABELING
"""
#1) get target
trgt=trgt.loc[tEvents]
trgt=trgt[trgt>minRet] # minRet
#2) get t1 (max holding period)
if t1 is False:t1=pd.Series(pd.NaT, index=tEvents)
#3) form events object, apply stop loss on t1
if side is None:side_,ptSl_=pd.Series(1.,index=trgt.index), [ptSl[0],ptSl[0]]
else: side_,ptSl_=side.loc[trgt.index],ptSl[:2]
events=(pd.concat({'t1':t1,'trgt':trgt,'side':side_}, axis=1)
.dropna(subset=['trgt']))
df0=mpPandasObj(func=applyPtSlOnT1,pdObj=('molecule',events.index),
numThreads=numThreads,close=close,events=events,
ptSl=ptSl_)
events['t1']=df0.dropna(how='all').min(axis=1) # pd.min ignores nan
if side is None:events=events.drop('side',axis=1)
return events
def addVerticalBarrier(tEvents,
close,
numDays=1):
"""SNIPPET 3.4 ADDING A VERTICAL BARRIER
"""
t1=close.index.searchsorted(tEvents+pd.Timedelta(days=numDays))
t1=t1[t1<close.shape[0]]
t1=(pd.Series(close.index[t1],index=tEvents[:t1.shape[0]]))
return t1
def getBinsOld(events,
close):
"""SNIPPET 3.5 LABELING FOR SIDE AND SIZE
"""
#1) prices aligned with events
events_=events.dropna(subset=['t1'])
px=events_.index.union(events_['t1'].values).drop_duplicates()
px=close.reindex(px,method='bfill')
#2) create out object
out=pd.DataFrame(index=events_.index)
out['ret']=px.loc[events_['t1'].values].values/px.loc[events_.index]-1
out['bin']=np.sign(out['ret'])
# where out index and t1 (vertical barrier) intersect label 0
try:
locs = out.query('index in @t1').index
out.loc[locs, 'bin'] = 0
except:
pass
return out
def getBins(events,
close):
"""SNIPPET 3.7 EXPANDING getBins TO INCORPORATE META-LABELING
Compute event's outcome (including side information, if provided).
events is a DataFrame where:
-events.index is event's starttime
-events['t1'] is event's endtime
-events['trgt'] is event's target
-events['side'] (optional) implies the algo's position side
Case 1: ('side' not in events): bin in (-1,1) <-label by price action
Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)
"""
#1) prices aligned with events
events_=events.dropna(subset=['t1'])
px=events_.index.union(events_['t1'].values).drop_duplicates()
px=close.reindex(px,method='bfill')
#2) create out object
out=
|
pd.DataFrame(index=events_.index)
|
pandas.DataFrame
|
# coding: utf-8
# # Estimating the total biomass of terrestrial protists
# After searching the literature, we could not find a comprehensive account of the biomass of protists in soils. We generated a crude estimate of the total biomass of protists in soil based on estimating the total number of individual protists in the soil, and on the characteristic carbon content of a single protist.
#
# In order to calculate the total biomass of soil protists we calculate a characteristic number of individual protists for each one of the morphological groups of protists (flagellates, ciliates, and naked and testate ameobae). We combine these estimates with estimates for the carbon content of each morphological group.
#
# ## Number of protists
# To estimate the total number of protists, we assembled data on the number of protists in soils which contains 160 measurements from 42 independent studies. Here is a sample of the data:
# In[1]:
# Initialization
import pandas as pd
import numpy as np
import gdal
from scipy.stats import gmean
import sys
sys.path.insert(0,'../../statistics_helper/')
from fraction_helper import *
from CI_helper import *
pd.options.display.float_format = '{:,.1e}'.format
# Load data
data = pd.read_excel('terrestrial_protist_data.xlsx','Density of Individuals')
data.head()
# To estimate the total number of protists, we group our samples to different habitats and to the study in which they were taken. We calculate the characteristic number of each of the groups of protists per gram of soil. To do this we first derive a representative value for each study in case there was more than one measurement done in it. We calculate the representative value for each study in each habitat. Then we calculate the average of different representative values from different studies within the same habitat. We calculate the averages either by using the arithmetic mean or the geometric mean.
# In[2]:
# Define the function to calculate the geometric mean of number of each group of protists per gram
def groupby_gmean(input):
return pd.DataFrame({'Number of ciliates [# g^-1]': gmean(input['Number of ciliates [# g^-1]'].dropna()),
'Number of naked amoebae [# g^-1]': gmean(input['Number of naked amoebae [# g^-1]'].dropna()),
'Number of testate amoebae [# g^-1]': gmean(input['Number of testate amoebae [# g^-1]'].dropna()),
'Number of flagellates [# g^-1]': gmean(input['Number of flagellates [# g^-1]'].dropna())},index=[0])
# Define the function to calculate the arithmetic mean of number of each group of protists per gram
def groupby_mean(input):
return pd.DataFrame({'Number of ciliates [# g^-1]': np.nanmean(input['Number of ciliates [# g^-1]'].dropna()),
'Number of naked amoebae [# g^-1]': np.nanmean(input['Number of naked amoebae [# g^-1]'].dropna()),
'Number of testate amoebae [# g^-1]': np.nanmean(input['Number of testate amoebae [# g^-1]'].dropna()),
'Number of flagellates [# g^-1]': np.nanmean(input['Number of flagellates [# g^-1]'].dropna())},index=[0])
# Group the samples by habitat and study, and calculate the geometric mean
grouped_data_gmean = data.groupby(['Habitat','DOI']).apply(groupby_gmean)
# Group the samples by habitat and study, and calculate the arithmetic mean
grouped_data_mean = data.groupby(['Habitat','DOI']).apply(groupby_mean)
# Group the representative values by habitat, and calculate the geometric mean
habitat_gmean = grouped_data_gmean.groupby('Habitat').apply(groupby_gmean)
# Group the representative values by habitat, and calculate the arithmetic mean
habitat_mean = grouped_data_mean.groupby('Habitat').apply(groupby_mean)
habitat_gmean.set_index(habitat_gmean.index.droplevel(1),inplace=True)
habitat_mean.set_index(habitat_mean.index.droplevel(1),inplace=True)
# Here is the calculated geometric mean number of cells per gram for each habitat and each group of protists:
# In[3]:
habitat_gmean
# For some groups, not all habitats have values. We fill values for missing data by the following scheme. For missing values in the boreal forest biome, we use values from the temperate forest biome. If we have data for the group of protists from the "General" habitat, which is based on expert assessment of the characteristic number of individuals for that group per gram of soil, we fill the missing values with the value for the "General" habitat.
#
# The only other missing data was for ciliates in tropical forests and tundra. For tropical forest, we used the values from temperate forests forests. For tundra, we use the mean over all the different habitats to fill the value:
# In[4]:
# Fill missing values for boreal forests
habitat_mean.loc['Boreal Forest',['Number of ciliates [# g^-1]','Number of flagellates [# g^-1]','Number of naked amoebae [# g^-1]']] = habitat_mean.loc['Temperate Forest',['Number of ciliates [# g^-1]','Number of flagellates [# g^-1]','Number of naked amoebae [# g^-1]']]
habitat_gmean.loc['Boreal Forest',['Number of ciliates [# g^-1]','Number of flagellates [# g^-1]','Number of naked amoebae [# g^-1]']] = habitat_gmean.loc['Temperate Forest',['Number of ciliates [# g^-1]','Number of flagellates [# g^-1]','Number of naked amoebae [# g^-1]']]
# Fill missing values for naked amoebae
habitat_mean.loc[['Shrubland','Tropical Forest','Tundra','Woodland'],'Number of naked amoebae [# g^-1]'] = habitat_mean.loc['General','Number of naked amoebae [# g^-1]']
habitat_gmean.loc[['Shrubland','Tropical Forest','Tundra','Woodland'],'Number of naked amoebae [# g^-1]'] = habitat_gmean.loc['General','Number of naked amoebae [# g^-1]']
# Fill missing values for flagellates
habitat_gmean.loc[['Desert','Grassland','Shrubland','Tropical Forest','Woodland'],'Number of flagellates [# g^-1]'] = habitat_gmean.loc['General','Number of flagellates [# g^-1]']
habitat_mean.loc[['Desert','Grassland','Shrubland','Tropical Forest','Woodland'],'Number of flagellates [# g^-1]'] = habitat_mean.loc['General','Number of flagellates [# g^-1]']
# Fill missing values for ciliates
habitat_gmean.loc['Tropical Forest','Number of ciliates [# g^-1]'] = habitat_gmean.loc['Temperate Forest','Number of ciliates [# g^-1]']
habitat_mean.loc['Tropical Forest','Number of ciliates [# g^-1]'] = habitat_mean.loc['Temperate Forest','Number of ciliates [# g^-1]']
habitat_gmean.loc['Tundra','Number of ciliates [# g^-1]'] = gmean(habitat_mean['Number of ciliates [# g^-1]'].dropna())
habitat_mean.loc['Tundra','Number of ciliates [# g^-1]'] = habitat_mean['Number of ciliates [# g^-1]'].dropna().mean()
habitat_gmean
# We have estimates for the total number of individual protists per gram of soil. In order to calculate the total number of individual protists we need to first convert the data to number of individuals per $m^2$. To convert number of individuals per gram of soil to number of individuals per $m^2$, we calculate a global average soil density in the top 15 cm based on [Hengl et al.](https://dx.doi.org/10.1371%2Fjournal.pone.0105992).
#
# In[5]:
# Load soil density map from Hengl et al. (in the top 15 cm, reduced in resolution to 1 degree resolution)
gtif = gdal.Open('bulk_density_data.tif')
bulk_density_map = np.array(gtif.GetRasterBand(1).ReadAsArray())
# Fill missing values with NaN
bulk_density_map[bulk_density_map == bulk_density_map[0,1]] = np.nan
# Mean soil bulk density from Hengl et al. [in g per m^3]
bulk_density = np.nanmean(bulk_density_map[:])*1000
print('Our best estimate for the global mean bulk density of soil in the top 15 cm is ≈%.1e g m^3' %bulk_density)
#of ≈1.3 g $cm^3$
# Measuring the density of individuals per gram of soil does not take into account the distribution on biomass along the soil profile. Most of the measurements of the number of individual protists per gram of soil are done in shallow soil depths. We calculate the average sampling depth across studies:
# In[6]:
# Calculate the average sampling depth
sampling_depth = data.groupby('DOI').mean().mean()['Sampling Depth [cm]']
print('The average sampling depth of soil protists is ≈%.0f cm' %sampling_depth)
# It is not obvious what is the fraction of the total biomass of soil protists that is found in the top 8 cm of soil. To estimate the fraction of the biomass of soil protists found in the top 8 cm, we rely on two methodologies. The first is based on the distribution of microbial biomass with depth as discussed in Xu et al. Xu et al. extrapolate the microbial biomass across the soil profile based on empirical equations for the distribution of root biomass along soil depth from [Jackson et al.](http://dx.doi.org/10.1007/BF00333714). The empirical equations are biome-specific, and follow the general form: $$Y = 1-\beta^d$$ Where Y is the cumulative fraction of roots, d is depth in centimeters, and $\beta$ is a coefficient fitted for each biome. On a global scale, the best fit for $\beta$ as reported in Jackson et al., is ≈0.966. We use this coefficient to calculate the fraction of total biomass of soil protists found in the top 8 cm:
# In[7]:
# The beta coefficient from Jackson et al.
jackson_beta = 0.966
# Calculate the fraction of the biomass of soil protists found in the top 8 cm
jackson_fraction = 1 - jackson_beta** sampling_depth
print('Our estimate for the fraction of biomass of soil protists found in soil layers sampled, based on Jackson et al. is ≈%.0f percent' %(jackson_fraction*100))
# As a second estimate for the fraction of the total biomass of soil protists found in the top 8 cm, we rely on an empirical equation from [Fierer et al.](http://dx.doi.org/10.1111/j.1461-0248.2009.01360.x), which estimates the fraction microbial biomass found below sampling depth d:
# $$ f = [-0.132×ln(d) + 0.605]×B$$
# Where f is the fraction microbial biomass found below sampling depth d (in cm). We use this equation to calculate the fraction of the total biomass of soil protists found in the top 8 cm:
#
# In[8]:
# The fraction of microbial biomass found in layer shallower than depth x based on Fierer et al.
fierer_eq = lambda x: 1-(-0.132*np.log(x)+0.605)
fierer_frac = fierer_eq(sampling_depth)
print('Our estimate for the fraction of biomass of soil protists found in soil layers sampled, based on Fierer et al. is ≈%.0f percent' %(fierer_frac*100))
# As our best estimate for the fraction of the total biomass of soil protists found in layers shallower than 8 cm, we use the geometric mean of the estimates based on Jackson et al. and Fierer et al.:
# In[9]:
best_depth_frac = frac_mean(np.array([jackson_fraction,fierer_frac]))
print('Our best estimate for the fraction of biomass of soil protists found in soil layers sampled is ≈%.0f percent' %(best_depth_frac*100))
# To convert the measurements per gram of soil to number of individuals per $m^2$, we calculate the average sampling depth across studies. We calculate the volume of soil held within this sampling depth. We use the bulk density to calculate the total weight of soil within one $m^2$ of soil with depth equal to the sampling depth. We multiply the estimates per gram of soil by the total weight of soil per $m^2$. To account for biomass present in lower layers, we divide the total number of individual protists per $m^2$ by our best estimate for the fraction of the total biomass of soil protists found in layer shallower than 8 cm.
# In[10]:
# convert number of individuals per gram soil to number of individuals per m^2
habitat_per_m2_gmean = (habitat_gmean*bulk_density*sampling_depth/100/best_depth_frac)
habitat_per_m2_mean = (habitat_mean*bulk_density*sampling_depth/100/best_depth_frac)
# To calculate the total number of protists we multiply the total number of individuals per unit area of each type of protist in each habitat by the total area of each habitat taken from the book [Biogeochemistry: An analysis of Global Change](https://www.sciencedirect.com/science/book/9780123858740) by Schlesinger & Bernhardt. The areas of each habitat are:
# In[11]:
habitat_area = pd.read_excel('terrestrial_protist_data.xlsx','Biome area', skiprows=1,index_col=0)
habitat_area
# One habitat for which we do not have data is the savanna. We use the mean of the values for the tropical forest, woodland, shrubland and grassland as an estimate of the total biomass in the savanna.
# In[12]:
habitat_per_m2_gmean.loc['Tropical Savanna'] = gmean(habitat_per_m2_gmean.loc[['Tropical Forest','Woodland','Shrubland','Grassland']])
habitat_per_m2_mean.loc['Tropical Savanna'] = habitat_per_m2_gmean.loc[['Tropical Forest','Woodland','Shrubland','Grassland']].mean(axis=0)
tot_num_gmean = habitat_per_m2_gmean.mul(habitat_area['Area [m^2]'],axis=0)
tot_num_mean = habitat_per_m2_mean.mul(habitat_area['Area [m^2]'],axis=0)
print(tot_num_mean.sum())
print(tot_num_gmean.sum())
print(gmean([tot_num_mean.sum(),tot_num_gmean.sum()]))
# We generated two types of estimates for the total number of soil protists: an estimate which uses the arithmetic mean of the number of individuals at each habitat, and an estimate which uses the geometric mean of the number of individuals at each habitat. The estimate based on the arithmetic mean is more susceptible to sampling bias, as even a single measurement which is not characteristic of the global population (such as samples which are contaminated with organic carbon sources, or samples which have some technical biases associated with them) might shift the average concentration significantly. On the other hand, the estimate based on the geometric mean might underestimate global biomass as it will reduce the effect of biologically relevant high biomass concentrations. As a compromise between these two caveats, we chose to use as our best estimate the geometric mean of the estimates from the two methodologies.
# In[13]:
tot_num_protist = gmean([tot_num_mean.sum(),tot_num_gmean.sum()])
tot_num_protist
# ## Carbon content of protists
# We estimate the characteristic carbon content of a single protist from each of the morphological groups of protists based on data from several sources. Here is a sample of the data:
# In[14]:
cc_data = pd.read_excel('terrestrial_protist_data.xlsx', 'Carbon content')
cc_data.head()
# We combine this data with an additional source from [Finlay & Fenchel](http://dx.doi.org/10.1078/1434-4610-00060). We calculate the average cell length for each group.
# In[15]:
# Load data from Finlay & Fenchel
ff_data = pd.read_excel('terrestrial_protist_data.xlsx', 'Finlay & Fenchel', skiprows=1)
# Define the function to calculate the weighted average for each group of protists
def weighted_av_groupby(input):
return np.average(input['Length [µm]'],weights=input['Abundance [# g^-1]'])
cell_lengths = ff_data.groupby('Protist type').apply(weighted_av_groupby)
# We convert the cell length to biovolume according the the allometric relation decribed in Figure 10 of Finlay & Fenchel. The relation between cell volume and cell length is given by the equation:
# $$V = 0.6×L^{2.36}$$
# Where V is the cell volume in $µm^3$ and L is the cell length in µm.
# In[16]:
cell_volumes = 0.6*cell_lengths**2.36
cell_volumes
# We convert cell volumes to carbon content assuming ≈150 fg C µm$^3$:
# In[17]:
ff_carbon_content = cell_volumes*150e-15
pd.options.display.float_format = '{:,.1e}'.format
ff_carbon_content
# We add these numbers as an additional source for calculating the carbon content of protists:
# In[18]:
cc_data.loc[cc_data.index[-1]+1] = pd.Series({'Reference': 'Finlay & Fenchel',
'DOI': 'http://dx.doi.org/10.1078/1434-4610-00060',
'Carbon content of ciliates [g C cell^-1]': ff_carbon_content.loc['Ciliate'],
'Carbon content of naked amoebae [g C cell^-1]': ff_carbon_content.loc['Naked amoebae'],
'Carbon content of testate amoebae [g C cell^-1]': ff_carbon_content.loc['Testate amoebae'],
'Carbon content of flagellates [g C cell^-1]': ff_carbon_content.loc['Flagellate']
})
# We calculate the geometric mean of carbon contents for first for values within each study and then for the average values between studies:
# In[19]:
def groupby_gmean(input):
return pd.DataFrame({'Carbon content of ciliates [g C cell^-1]': gmean(input['Carbon content of ciliates [g C cell^-1]'].dropna()),
'Carbon content of naked amoebae [g C cell^-1]': gmean(input['Carbon content of naked amoebae [g C cell^-1]'].dropna()),
'Carbon content of testate amoebae [g C cell^-1]': gmean(input['Carbon content of testate amoebae [g C cell^-1]'].dropna()),
'Carbon content of flagellates [g C cell^-1]': gmean(input['Carbon content of flagellates [g C cell^-1]'].dropna())},index=[0])
study_mean_cc = cc_data.groupby('DOI').apply(groupby_gmean)
mean_cc = study_mean_cc.reset_index().groupby('level_1').apply(groupby_gmean)
# In[20]:
gmean(study_mean_cc['Carbon content of flagellates [g C cell^-1]'].dropna())
mean_cc.T
# To estimate the total biomass of soil protists based on the total number of individuals and their carbon content, we multiply our estimate for the total number of individuals for each morphological type by its characteristic carbon content. We sum over all morophological types of protists to generate our best estimate for the global biomass of soil protists
# In[21]:
# Calculate the total biomass of protists
best_estimate = (tot_num_protist*mean_cc).sum(axis=1)
print('Our best estimate of the total biomass of soil protists is ≈%.1f Gt C' %(best_estimate/1e15))
tot_num_protist*mean_cc
# # Uncertainty analysis
# To assess the uncertainty associated with our estimate of the total biomass of terrestrial protists, we collect available uncertainties for the values reported within studies and between studies. We use the highest uncertainty out of this collection of uncertainties as our best projection for the uncertainty associated we the estimate of the total biomass of terrestrial protists.
#
# ## Number of individuals
# We assemble different measures of uncertainty at different levels - for values within the same study, for studies within the same habitat, and between habitats.
#
# ### Intra-study uncertainty
# For each study which reports more than one value, we calculate 95% confidence interval around the geometric mean of those values. We take the maximal uncertainty in each habitat as our measure of the intra-study uncertainty
# In[22]:
pd.options.display.float_format = '{:,.1f}'.format
# Define the function ot calculate the 95% confidence interval around the
# geometric mean of number of each group of protists per gram
def groupby_geo_CI(input):
return pd.DataFrame({'Number of ciliates [# g^-1]': geo_CI_calc(input['Number of ciliates [# g^-1]'].dropna()),
'Number of naked amoebae [# g^-1]': geo_CI_calc(input['Number of naked amoebae [# g^-1]'].dropna()),
'Number of testate amoebae [# g^-1]': geo_CI_calc(input['Number of testate amoebae [# g^-1]'].dropna()),
'Number of flagellates [# g^-1]': geo_CI_calc(input['Number of flagellates [# g^-1]'].dropna())},index=[0])
# Group the samples by habitat and study, and calculate the 95% confidence
# interval around the geometric mean of values within each study
intra_study_num_CI = data.groupby(['Habitat','DOI']).apply(groupby_geo_CI)
# Use the maximal uncertainty in each habitat as a measure of the intra-study uncertainty
intra_num_CI = intra_study_num_CI.groupby('Habitat').max()
# ### Interstudy uncertainty
# We calculate 95% confidence interval around the geometric mean of the average values from different studies.
# In[23]:
# Group the representative values by habitat, and calculate the 95% confidence interval
# around the geometric mean of values within habitat
inter_study_habitat_num_CI = grouped_data_gmean.groupby('Habitat').apply(groupby_geo_CI)
inter_study_habitat_num_CI.set_index(inter_study_habitat_num_CI.index.droplevel(level=1),inplace=True)
inter_study_habitat_num_CI
# ### Inter-habitat uncertainty
# We first use the maximum of the intra-study and interstudy uncertainty in each habitat as our best projection for the uncertainty associated with the estimate of the total number of protists in the habitat. For habitats with missing uncertainty projections, we use the maximum of the uncertainties for the same group of protists in other habitats.
# In[24]:
# Use the maximum of the intra-study and interstudy uncertainty as our best projection of the uncertainty
# of the number of protists in each habitat
tot_num_habitat_CI = inter_study_habitat_num_CI.where(inter_study_habitat_num_CI > intra_num_CI, intra_num_CI).fillna(inter_study_habitat_num_CI)
# Fill missing values for each habitat with the mean of the uncertainties for the same group of
# protists in the other habitats
tot_num_habitat_CI['Number of ciliates [# g^-1]'].fillna(tot_num_habitat_CI['Number of ciliates [# g^-1]'].max(),inplace=True)
tot_num_habitat_CI['Number of flagellates [# g^-1]'].fillna(tot_num_habitat_CI['Number of flagellates [# g^-1]'].max(),inplace=True)
tot_num_habitat_CI['Number of naked amoebae [# g^-1]'].fillna(tot_num_habitat_CI['Number of naked amoebae [# g^-1]'].max(),inplace=True)
tot_num_habitat_CI['Number of testate amoebae [# g^-1]'].fillna(tot_num_habitat_CI['Number of testate amoebae [# g^-1]'].max(),inplace=True)
# Fill the uncertainty of the values for the tropical savanna with the mean the uncertainties
# for the same group of protists in the other habitats
tot_num_habitat_CI.loc['Tropical Savanna'] = tot_num_habitat_CI.max()
tot_num_habitat_CI
# We propagate the uncertainties associated with the estimates of the total number of protists per gram soil in each habitat to the estimate of the sum across all habitats:
# In[25]:
tot_num_habitat_CI = tot_num_habitat_CI.loc[tot_num_gmean.dropna().index.values]
ciliate_num_per_g_CI = CI_sum_prop(estimates=tot_num_gmean.dropna()['Number of ciliates [# g^-1]'],mul_CIs=tot_num_habitat_CI['Number of ciliates [# g^-1]'])
flagellate_num_per_g_CI = CI_sum_prop(estimates=tot_num_gmean.dropna()['Number of ciliates [# g^-1]'],mul_CIs=tot_num_habitat_CI['Number of ciliates [# g^-1]'])
naked_amoebea_num_per_g_CI = CI_sum_prop(estimates=tot_num_gmean.dropna()['Number of naked amoebae [# g^-1]'],mul_CIs=tot_num_habitat_CI['Number of naked amoebae [# g^-1]'])
testate_amoebea_num_per_g_CI = CI_sum_prop(estimates=tot_num_gmean.dropna()['Number of testate amoebae [# g^-1]'],mul_CIs=tot_num_habitat_CI['Number of testate amoebae [# g^-1]'])
num_per_g_CI = pd.Series([ciliate_num_per_g_CI,flagellate_num_per_g_CI,naked_amoebea_num_per_g_CI,testate_amoebea_num_per_g_CI], index= tot_num_habitat_CI.columns)
num_per_g_CI
# ### Inter-method uncertainty
# We generated two types of estimates for the total number of individual protists per gram of soil - one based on the arithmetic mean and one based on the geometric mean of values. As our best estimate we used the geometric mean of the arithmetic mean and geometric mean-based estimates. We calculate the 95% confidence interval around the geometric mean of the two types of estimates as a measure of the uncertainty this procedure introduces into the estimate of the total number of protists:
# In[26]:
inter_method_num_CI = geo_CI_calc(pd.DataFrame([tot_num_mean.sum(),tot_num_gmean.sum()]))
inter_method_num_CI
# We use the maximum of the uncertainty stemming from the intra-study and interstudy variability and the inter-method uncertainty as our best projection of the uncertainty associated with our estimate of the number of individual protists per gram of soil:
# In[27]:
best_num_CI = np.max([num_per_g_CI,inter_method_num_CI],axis=0)
best_num_CI =
|
pd.Series(best_num_CI,index= inter_method_num_CI.index)
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import csv
import datetime
import gc
import gzip
import json
import subprocess
import sys
from typing import Union, List
import nibabel as nib
import pandas as pd
import pydicom as dicom
from bids import layout
from matgrab import mat2df
from pyedflib import highlevel
from scipy.io import wavfile
from utils import *
def get_parser(): # parses flags at onset of command
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter
, description="""
Data2bids is a script based on the SIMEXP lab script to convert nifti MRI files into BIDS format. This script has been modified to
also parse README data as well as include conversion of DICOM files to nifti. The script utilizes <NAME>'s Dcm2niix program for
actual conversion.
This script takes one of two formats for conversion. The first is a series of DICOM files in sequence with an optional \"medata\" folder which
contains any number of single or multi-echo uncompressed nifti files (.nii). Note that nifti files in this case must also have a corresponding
DICOM scan run, but not necessarily scan echo (for example, one DICOM scan for run 5 but three nifti files which are echoes 1, 2, and 3 of
run 5). The other format is a series of nifti files and a README.txt file formatted the same way as it is in the example. Both formats are
shown in the examples folder.
Both formats use a .json config file that maps either DICOM tags or text within the nifti file name to BIDS metadata. The syntax and formatting of this .json file
can be found here https://github.com/SIMEXP/Data2Bids#heuristic.
The only thing this script does not account for is event files. If you have the 1D files that's taken care of, but chances are you have some other
format. If this is the case, I recommend https://bids-specification.readthedocs.io/en/stable/04-modality-specific-files/05-task-events.html
so that you can make BIDS compliant event files.
Data2bids documentation at https://github.com/SIMEXP/Data2Bids
Dcm2niix documentation at https://github.com/rordenlab/dcm2niix"""
, epilog="""
Made by <NAME> (<EMAIL>)
""")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-i"
, "--input_dir"
, required=False
, default=None
, help="""
Input data directory(ies), must include a readme.txt file formatted like example under examples folder.
Mutually exclusive with DICOM directory option. Default: current directory
""",
)
parser.add_argument(
"-c"
, "--config"
, required=False
, default=None
, help="JSON configuration file (see https://github.com/SIMEXP/Data2Bids/blob/master/example/config.json)",
)
parser.add_argument(
"-o"
, "--output_dir"
, required=False
, default=None
, help="Output BIDS directory, Default: Inside current directory ",
)
group.add_argument(
"-d"
, "--DICOM_path"
, default=None
, required=False
, help="Optional DICOM directory, Mutually exclusive with input directory option",
)
parser.add_argument(
"-m"
, "--multi_echo"
, nargs='*'
, type=int
, required=False
, help="""
indicator of multi-echo dataset. Only necessary if NOT converting DICOMs. For example, if runs 3-6 were all multi-echo then the flag
should look like: -m 3 4 5 6 . Additionally, the -m flag may be called by itself if you wish to let data2bids auto-detect multi echo data,
but it will not be able to tell you if there is a mistake."""
)
parser.add_argument(
"-ow"
, "--overwrite"
, required=False
, action='store_true'
, help="overwrite preexisting BIDS file structures in destination location",
)
parser.add_argument(
"-ch"
, "--channels"
, nargs='*'
, required=False
, help="""
Indicator of channels to keep from edf files.
"""
)
parser.add_argument(
"-s"
, "--stim_dir"
, required=False
, default=None
, help="directory containing stimuli files",
)
parser.add_argument(
"-v"
, "--verbose"
, required=False
, action='store_true'
, help="verbosity",
)
return parser
class Data2Bids: # main conversion and file organization program
def __init__(self, input_dir=None, config=None, output_dir=None, DICOM_path=None, multi_echo=None, overwrite=False,
stim_dir=None, channels=None, verbose=False):
# sets the .self globalization for self variables
self._input_dir = None
self._config_path = None
self._config = None
self._bids_dir = None
self._bids_version = "1.6.0"
self._dataset_name = None
self._data_types = {"anat": False, "func": False, "ieeg": False}
self._ignore = []
self.set_overwrite(overwrite)
self.set_data_dir(input_dir, DICOM_path)
self.set_config_path(config)
self.set_bids_dir(output_dir)
self.set_DICOM(DICOM_path)
self.set_multi_echo(multi_echo)
self.set_verbosity(verbose)
self.set_stim_dir(stim_dir)
self.set_channels(channels)
def check_ignore(self, file):
assert os.path.isabs(file), file + "must be given with the absolute path including root"
if not os.path.exists(file):
raise FileNotFoundError(file + " does not exist")
ans = False
for item in self._ignore:
if os.path.isfile(item) and Path(file).resolve() == Path(item).resolve():
ans = True
elif os.path.isdir(item):
for root, dirs, files in os.walk(item):
if os.path.basename(file) in files and Path(root).resolve() == Path(
os.path.dirname(file)).resolve():
ans = True
return ans
def set_stim_dir(self, dir):
if dir is None:
if "stimuli" in os.listdir(self._data_dir): # data2bids can be called at the parent folder
dir = os.path.join(self._data_dir, "stimuli")
elif "stimuli" in os.listdir(os.path.dirname(self._data_dir)): # or subject folder level
dir = os.path.join(os.path.dirname(self._data_dir), "stimuli")
else:
self.stim_dir = None
return
if not os.path.isdir(os.path.join(self._bids_dir, "stimuli")):
os.mkdir(os.path.join(self._bids_dir, "stimuli"))
for item in os.listdir(dir):
shutil.copyfile(os.path.join(dir, item), os.path.join(self._bids_dir, "stimuli", item))
self.stim_dir = dir
self._ignore.append(dir)
def set_channels(self, channels):
try:
headers_dict = self._config["ieeg"]["headerData"]
except KeyError:
headers_dict = None
self.channels = {}
self.sample_rate = {}
part_match = None
task_label_match = None
if self._data_dir:
for root, _, files in os.walk(self._data_dir):
# ignore BIDS directories and stimuli
files[:] = [f for f in files if not self.check_ignore(os.path.join(root, f))]
i = 1
while files:
file = files.pop(0)
src = os.path.join(root, file)
if not part_match == match_regexp(self._config["partLabel"], file):
part_match = match_regexp(self._config["partLabel"], file)
self.channels[part_match] = []
part_match_z = self.part_check(part_match)[1]
df = None
for name, var in self._config["ieeg"]["channels"].items():
if name in src:
df = mat2df(src, var)
if "highpass_cutoff" in df.columns.to_list():
df = df.rename(columns={"highpass_cutoff": "high_cutoff"})
if "lowpass_cutoff" in df.columns.to_list():
df = df.rename(columns={"lowpass_cutoff": "low_cutoff"})
name_gen = self.generate_names(src, part_match=part_match, verbose=False)
if name_gen is not None and name_gen[-2] is not None:
task_label_match = name_gen[-2]
if df is None:
continue
elif task_label_match is None:
i += 1
if i > 40:
raise NameError("No tasks could be found in files:\n", os.listdir(os.path.dirname(src)))
else:
files.append(file)
continue
else:
i = 1
filename = os.path.join(self._bids_dir, "sub-" + part_match_z, "sub-" + part_match_z + "_task-" +
task_label_match + "_channels.tsv")
os.mkdir(os.path.dirname(filename))
df.to_csv(filename, sep="\t", index=False)
if part_match not in headers_dict.keys():
try:
var = headers_dict["default"]
if isinstance(var, str):
var = [var]
self.channels[part_match] = self.channels[part_match] + [v for v in var if
v not in self.channels[part_match]]
except KeyError:
pass
for name, var in headers_dict.items():
if name == part_match:
if isinstance(var, str):
var = [var]
self.channels[part_match] = self.channels[part_match] + [v for v in var if
v not in self.channels[part_match]]
elif re.match(".*?" + part_match + ".*?" + name,
src): # some sort of checking for .mat or txt files?
if name.endswith(".mat") and re.match(".*?" + part_match + ".*?" + name, src):
self.channels[part_match] = self.channels[part_match] + mat2df(src, var).tolist()
try:
self.sample_rate[part_match] = mat2df(src, self._config['ieeg']['sampleRate']).iloc[
0]
except KeyError:
self.sample_rate[part_match] = None
except AttributeError:
raise IndexError(self._config['ieeg']['sampleRate'] + " not found in " + src)
self._ignore.append(src)
elif name.endswith((".txt", ".csv", ".tsv")) and re.match(".*?" + part_match + ".*?" + name,
src):
f = open(name, 'r')
content = f.read()
f.close()
self.channels[part_match] = self.channels[part_match] + content.split()
elif name.endswith(tuple(self._config['dataFormat'])) and re.match(
".*?" + part_match + ".*?" + name, src):
raise NotImplementedError(src +
"\nthis file format does not yet support {ext} files for "
"channel labels".format(
ext=os.path.splitext(src)[1]))
if isinstance(channels, str) and channels not in channels[part_match]:
self.channels[part_match] = self.channels[part_match] + [channels]
elif channels is not None:
self.channels[part_match] = self.channels[part_match] + [c for c in channels if
c not in self.channels[part_match]]
def set_overwrite(self, overwrite):
self._is_overwrite = overwrite
def set_verbosity(self, verbose):
self._is_verbose = verbose
def set_multi_echo(self, multi_echo): # if -m flag is called
if multi_echo is None:
self.is_multi_echo = False
else:
self.is_multi_echo = True
if not multi_echo:
self._multi_echo = 0
else:
self._multi_echo = multi_echo
def set_DICOM(self, ddir): # triggers only if dicom flag is called and therefore _data_dir is None
if self._data_dir is None:
self._data_dir = os.path.dirname(self._bids_dir)
subdirs = [x[0] for x in os.walk(ddir)]
files = [x[2] for x in os.walk(ddir)]
sub_num = str(dicom.read_file(os.path.join(subdirs[1], files[1][0]))[0x10, 0x20].value).split("_", 1)[1]
sub_dir = os.path.join(os.path.dirname(self._bids_dir),
"sub-{SUB_NUM}".format(SUB_NUM=sub_num)) # destination subdirectory
if os.path.isdir(sub_dir):
proc = subprocess.Popen("rm -rf {file}".format(file=sub_dir), shell=True, stdout=subprocess.PIPE)
proc.communicate()
os.mkdir(sub_dir)
if any("medata" in x for x in subdirs): # copy over and list me data
melist = [x[2] for x in os.walk(os.path.join(ddir, "medata"))][0]
runlist = []
for me in melist:
if me.startswith("."):
continue
runmatch = re.match(r".*run(\d{2}).*", me).group(1)
if str(int(runmatch)) not in runlist:
runlist.append(str(int(runmatch)))
shutil.copyfile(os.path.join(ddir, "medata", me), os.path.join(sub_dir, me))
self.is_multi_echo = True # will trigger even if single echo data is in medata folder. Should still
# be okay
for subdir in subdirs[1:]: # not including parent folder or /medata, run dcm2niix on non me data
try:
fobj = dicom.read_file(os.path.join(subdir, list(os.walk(subdir))[0][2][0]),
force=True) # first dicom file of the scan
scan_num = str(int(os.path.basename(subdir))).zfill(2)
except ValueError:
continue
firstfile = [x[2] for x in os.walk(subdir)][0][0]
# print(str(fobj[0x20, 0x11].value), runlist)
# running dcm2niix,
if str(fobj[0x20, 0x11].value) in runlist:
proc = subprocess.Popen(
"dcm2niix -z y -f run{SCAN_NUM}_%p_%t_sub{SUB_NUM} -o {OUTPUT_DIR} -s y -b y {DATA_DIR}".format(
OUTPUT_DIR=sub_dir, SUB_NUM=sub_num, DATA_DIR=os.path.join(subdir, firstfile),
SCAN_NUM=scan_num), shell=True, stdout=subprocess.PIPE)
# output = proc.stdout.read()
outs, errs = proc.communicate()
prefix = re.match(".*/sub-{SUB_NUM}/(run{SCAN_NUM}".format(SUB_NUM=sub_num,
SCAN_NUM=scan_num) + r"[^ \(\"\\n\.]*).*",
str(outs)).group(1)
for file in os.listdir(sub_dir):
mefile = re.match(r"run{SCAN_NUM}(\.e\d\d)\.nii".format(SCAN_NUM=scan_num), file)
if re.match(r"run{SCAN_NUM}\.e\d\d.nii".format(SCAN_NUM=scan_num), file):
shutil.move(os.path.join(sub_dir, file),
os.path.join(sub_dir, prefix + mefile.group(1) + ".nii"))
shutil.copy(os.path.join(sub_dir, prefix + ".json"),
os.path.join(sub_dir, prefix + mefile.group(1) + ".json"))
os.remove(os.path.join(sub_dir, prefix + ".nii.gz"))
os.remove(os.path.join(sub_dir, prefix + ".json"))
else:
proc = subprocess.Popen(
"dcm2niix -z y -f run{SCAN_NUM}_%p_%t_sub{SUB_NUM} -o {OUTPUT_DIR} -b y {DATA_DIR}".format(
OUTPUT_DIR=sub_dir, SUB_NUM=sub_num, DATA_DIR=subdir, SCAN_NUM=scan_num), shell=True,
stdout=subprocess.PIPE)
outs, errs = proc.communicate()
sys.stdout.write(outs.decode("utf-8"))
self._multi_echo = runlist
self._data_dir = os.path.join(os.path.dirname(self._bids_dir), "sub-{SUB_NUM}".format(SUB_NUM=sub_num))
self._DICOM_path = ddir
def get_data_dir(self):
return self._data_dir
def set_data_dir(self, data_dir, DICOM): # check if input dir is listed
if DICOM is None:
if data_dir is None:
self._data_dir = os.getcwd()
else:
self._data_dir = data_dir
self._dataset_name = os.path.basename(self._data_dir)
else:
self._data_dir = None
def get_config(self):
return self._config
def get_config_path(self):
return self._config_path
def _set_config(self):
with open(self._config_path, 'r') as fst:
self._config = json.load(fst)
def set_config(self, config):
self._config = config
def set_config_path(self, config_path):
if config_path is None:
# Checking if a config.json is present
if os.path.isfile(os.path.join(os.getcwd(), "config.json")):
self._config_path = os.path.join(os.getcwd(), "config.json")
# Otherwise taking the default config
else:
self._config_path = os.path.join(os.path.dirname(__file__), "config.json")
else:
self._config_path = config_path
self._set_config()
def get_bids_dir(self):
return self._bids_dir
def set_bids_dir(self, bids_dir):
if bids_dir is None:
# Creating a new directory for BIDS
try:
newdir = self._data_dir + "/BIDS"
except TypeError:
print("Error: Please provide input data directory if no BIDS directory...")
# deleting old BIDS to make room for new
elif not os.path.basename(bids_dir) == "BIDS":
newdir = os.path.join(bids_dir, "BIDS")
else:
newdir = bids_dir
if not os.path.isdir(newdir):
os.mkdir(newdir)
elif self._is_overwrite:
force_remove(newdir)
os.mkdir(newdir)
self._bids_dir = newdir
self._ignore.append(newdir)
# as of BIDS ver 1.6.0, CT is not a part of BIDS, so check for CT files and add to .bidsignore
self.bidsignore("*_CT.*")
def get_bids_version(self):
return self._bids_version
def bids_validator(self):
assert self._bids_dir is not None, "Cannot launch bids-validator without specifying bids directory !"
# try:
subprocess.check_call(['bids-validator', self._bids_dir])
# except FileNotFoundError:
# print("bids-validator does not appear to be installed")
def generate_names(self, src_file_path, filename=None,
# function to run through name text and generate metadata
part_match=None,
sess_match=None,
ce_match=None,
acq_match=None,
echo_match=None,
data_type_match=None,
task_label_match=None,
run_match=None,
verbose=None,
debug=False):
if filename is None:
filename = os.path.basename(src_file_path)
if part_match is None:
part_match = match_regexp(self._config["partLabel"], filename)
if verbose is None:
verbose = self._is_verbose
try:
if re.match(r"^[^\d]{1,3}", part_match):
part_matches = re.split(r"([^\d]{1,3})", part_match, 1)
part_match_z = part_matches[1] + str(int(part_matches[2])).zfill(self._config["partLabel"]["fill"])
else:
part_match_z = str(int(part_match)).zfill(self._config["partLabel"]["fill"])
except KeyError:
pass
dst_file_path = self._bids_dir + "/sub-" + part_match_z
new_name = "/sub-" + part_match_z
SeqType = None
# Matching the session
try:
if sess_match is None:
sess_match = match_regexp(self._config["sessLabel"], filename)
dst_file_path = dst_file_path + "/ses-" + sess_match
new_name = new_name + "_ses-" + sess_match
except AssertionError:
if verbose:
print("No session found for %s" % src_file_path)
# Matching the run number
try:
if run_match is None:
run_match = match_regexp(self._config["runIndex"], filename)
try:
if re.match(r"^[^\d]{1,3}", run_match):
run_matches = re.split(r"([^\d]{1,3})", run_match, 1)
run_match = run_matches[1] + str(int(run_matches[2])).zfill(self._config["runIndex"]["fill"])
else:
run_match = str(int(run_match)).zfill(self._config["runIndex"]["fill"])
except KeyError:
pass
except AssertionError:
pass
# Matching the anat/fmri data type and task
try:
if data_type_match is None:
data_type_match = match_regexp(self._config["anat"]
, filename
, subtype=True)
dst_file_path = dst_file_path + "/anat"
self._data_types["anat"] = True
except (AssertionError, KeyError) as e:
# If no anatomical, trying functionnal
try:
if data_type_match is None:
data_type_match = match_regexp(self._config["func"]
, filename
, subtype=True)
dst_file_path = dst_file_path + "/func"
self._data_types["func"] = True
# Now trying to match the task
try:
if task_label_match is None:
task_label_match = match_regexp(self._config["func.task"]
, filename
, subtype=True)
new_name = new_name + "_task-" + task_label_match
except AssertionError as e:
print("No task found for %s" % src_file_path)
if debug:
raise e
return
except (AssertionError, KeyError) as e:
# no functional or anatomical, try ieeg
try:
if data_type_match is None:
data_type_match = match_regexp(self._config["ieeg"]
, filename
, subtype=True)
dst_file_path = dst_file_path + "/ieeg"
self._data_types["ieeg"] = True
# Now trying to match the task
try:
if task_label_match is None:
task_label_match = match_regexp(self._config["ieeg.task"]
, filename
, subtype=True)
new_name = new_name + "_task-" + task_label_match
except AssertionError as e:
print("No task found for %s" % src_file_path)
if debug:
raise e
return
except AssertionError as e:
if verbose:
print("No anat, func, or ieeg data type found for %s" % src_file_path)
if debug:
raise e
return
except KeyError as e:
print("No anat, func, or ieeg data type found in config file, one of these data types is required")
if debug:
raise e
return
# if is an MRI
if dst_file_path.endswith("/func") or dst_file_path.endswith("/anat"):
try:
SeqType = str(match_regexp(self._config["pulseSequenceType"], filename, subtype=True))
except AssertionError:
if verbose:
print("No pulse sequence found for %s" % src_file_path)
except KeyError:
if verbose:
print("pulse sequence not listed for %s, will look for in file header" % src_file_path)
try:
if echo_match is None:
echo_match = match_regexp(self._config["echo"], filename)
new_name = new_name + "_echo-" + echo_match
except AssertionError:
if verbose:
print("No echo found for %s" % src_file_path)
# check for optional labels
try:
if acq_match is None:
acq_match = match_regexp(self._config["acq"], filename)
try:
if re.match(r"^[^\d]{1,3}", acq_match):
acq_matches = re.split(r"([^\d]{1,3})", acq_match, 1)
acq_match = acq_matches[1] + str(int(acq_matches[2])).zfill(self._config["acq"]["fill"])
else:
acq_match = str(int(acq_match)).zfill(self._config["acq"]["fill"])
except KeyError:
pass
new_name = new_name + "_acq-" + acq_match
except (AssertionError, KeyError) as e:
if verbose:
print("no optional labels for %s" % src_file_path)
try:
if ce_match is None:
ce_match = match_regexp(self._config["ce"]
, filename)
new_name = new_name + "_ce-" + ce_match
except (AssertionError, KeyError) as e:
if verbose:
print("no special contrast labels for %s" % src_file_path)
if run_match is not None:
new_name = new_name + "_run-" + run_match
# Adding the modality to the new filename
new_name = new_name + "_" + data_type_match
return (new_name, dst_file_path, part_match, run_match,
acq_match, echo_match, sess_match, ce_match,
data_type_match, task_label_match, SeqType)
def multi_echo_check(self, runnum, src_file=""): # check to see if run is multi echo based on input
if self.is_multi_echo:
if int(runnum) in self._multi_echo:
return (True)
else:
if self._multi_echo == 0:
try:
match_regexp(self._config["echo"], src_file)
except AssertionError:
return (False)
return (True)
else:
return (False)
else:
return (False)
def get_params(self, folder, echo_num, run_num): # function to run through DICOMs and get metadata
# threading?
if self.is_multi_echo and run_num in self._multi_echo:
vols_per_time = len(self._config['delayTimeInSec']) - 1
echo = self._config['delayTimeInSec'][echo_num]
else:
vols_per_time = 1
echo = None
for root, _, dfiles in os.walk(folder, topdown=True):
dfiles.sort()
for dfile in dfiles:
dcm_file_path = os.path.join(root, dfile)
fobj = dicom.read_file(str(dcm_file_path))
if echo is None:
try:
echo = float(fobj[0x18, 0x81].value) / 1000
except KeyError:
echo = self._config['delayTimeInSec'][0]
ImagesInAcquisition = int(fobj[0x20, 0x1002].value)
seqlist = []
for i in list(range(5)):
try:
seqlist.append(fobj[0x18, (32 + i)].value)
if seqlist[i] == 'NONE':
seqlist[i] = None
if isinstance(seqlist[i], dicom.multival.MultiValue):
seqlist[i] = list(seqlist[i])
if isinstance(seqlist[i], list):
seqlist[i] = ", ".join(seqlist[i])
except KeyError:
seqlist.append(None)
[ScanningSequence, SequenceVariant, SequenceOptions, AquisitionType, SequenceName] = seqlist
try:
timings = []
except NameError:
timings = [None] * int(ImagesInAcquisition / vols_per_time)
RepetitionTime = (
(float(fobj[0x18, 0x80].value) / 1000)) # TR value extracted in milliseconds, converted to seconds
try:
acquisition_series = self._config['series']
except KeyError:
print("default")
acquisition_series = "non-interleaved"
if acquisition_series == "even-interleaved":
InstackPositionNumber = 2
else:
InStackPositionNumber = 1
InstanceNumber = 0
while None in timings:
if timings[InStackPositionNumber - 1] is None:
timings[InStackPositionNumber - 1] = slice_time_calc(RepetitionTime, InstanceNumber,
int(ImagesInAcquisition / vols_per_time),
echo)
if acquisition_series == "odd-interleaved" or acquisition_series == "even-interleaved":
InStackPositionNumber += 2
if InStackPositionNumber > ImagesInAcquisition / vols_per_time and acquisition_series == "odd-interleaved":
InStackPositionNumber = 2
elif InStackPositionNumber > ImagesInAcquisition / vols_per_time and acquisition_series == "even-interleaved":
InStackPositionNumber = 1
else:
InStackPositionNumber += 1
InstanceNumber += 1
return (timings, echo, ScanningSequence, SequenceVariant, SequenceOptions, SequenceName)
def read_edf(self, file_name, channels=None, extra_arrays=None, extra_signal_headers=None):
[edfname, dst_path, part_match] = self.generate_names(file_name, verbose=False)[0:3]
# file_name = str(file_name)
header = highlevel.make_header(patientname=part_match, startdate=datetime.datetime(1, 1, 1))
edf_name = dst_path + edfname + ".edf"
d = {str: [], int: []}
for i in channels:
d[type(i)].append(i)
f = EdfReader(file_name)
chn_nums = d[int] + [i for i, x in enumerate(f.getSignalLabels()) if x in channels]
f.close()
chn_nums.sort()
try:
check_sep = self._config["eventFormat"]["Sep"]
except (KeyError, AssertionError) as e:
check_sep = None
gc.collect() # helps with memory
if check_sep:
# read edf
print("Reading " + file_name + "...")
[array, signal_headers, _] = highlevel.read_edf(file_name, ch_nrs=chn_nums,
digital=self._config["ieeg"]["digital"],
verbose=True)
if extra_arrays:
array = array + extra_arrays
if extra_signal_headers:
signal_headers = signal_headers + extra_signal_headers
# replace trigger channels with trigger label ("DC1")
if part_match in self._config["ieeg"]["headerData"].keys():
trig_label = self._config["ieeg"]["headerData"][part_match]
else:
trig_label = self._config["ieeg"]["headerData"]["default"]
for i in range(len(signal_headers)):
#print(re.match(".*\.xls.*", trig_label))
if re.match(".*\.xls.*", str(trig_label)):
xls_df = pd.ExcelFile(trig_label).parse(part_match)
for column in xls_df:
if "Trigger" in column:
trig_label = xls_df[column].iloc[0]
if signal_headers[i]["label"] == trig_label:
signal_headers[i]["label"] = "Trigger"
return dict(name=file_name, bids_name=edf_name, nsamples=array.shape[1], signal_headers=signal_headers,
file_header=header, data=array, reader=f)
elif channels:
highlevel.drop_channels(file_name, edf_name, channels, verbose=self._is_verbose)
return None
else:
shutil.copy(file_name, edf_name)
return None
def part_check(self, part_match=None, filename=None):
# Matching the participant label to determine if
# there exists therein delete previously created BIDS subject files
assert part_match or filename
if filename:
try:
part_match = match_regexp(self._config["partLabel"], filename)
except AssertionError:
print("No participant found for %s" % filename)
except KeyError as e:
print("Participant label pattern must be defined")
raise e
if re.match(r"^[^\d]{1,3}", part_match):
part_matches = re.split(r"([^\d]{1,3})", part_match, 1)
part_match_z = part_matches[1] + str(int(part_matches[2])).zfill(self._config["partLabel"]["fill"])
else:
part_match_z = str(int(part_match)).zfill(self._config["partLabel"]["fill"])
return part_match, part_match_z
def bidsignore(self, string: str):
if not os.path.isfile(self._bids_dir + "/.bidsignore"):
with open(self._bids_dir + "/.bidsignore", 'w') as f:
f.write(string + "\n")
else:
with open(self._bids_dir + "/.bidsignore", "r+") as f:
if string not in f.read():
f.write(string + "\n")
def run(self): # main function
# First we check that every parameters are configured
if (self._data_dir is not None
and self._config_path is not None
and self._config is not None
and self._bids_dir is not None):
print("---- data2bids starting ----")
print(self._data_dir)
print("\n BIDS version:")
print(self._bids_version)
print("\n Config from file :")
print(self._config_path)
print("\n Ouptut BIDS directory:")
print(self._bids_dir)
print("\n")
# Create the output BIDS directory
if not os.path.exists(self._bids_dir):
os.makedirs(self._bids_dir)
# else
# shutil.rmtree(self._bids_dir)
# What is the base format to convert to
curr_ext = self._config["dataFormat"]
compress = self._config["compress"]
# delay time in TR unit (if delay_time = 1, delay_time = repetition_time)
repetition_time = self._config["repetitionTimeInSec"]
delaytime = self._config["delayTimeInSec"]
# dataset_description.json must be included in the folder root foolowing BIDS specs
if os.path.exists(self._bids_dir + "/dataset_description.json"):
# with open(dst_file_path + new_name + ".json", 'w') as fst:
with open(self._bids_dir + "/dataset_description.json") as fst:
filedata = json.load(fst)
with open(self._bids_dir + "/dataset_description.json", 'w') as fst:
data = {'Name': self._dataset_name,
'BIDSVersion': self._bids_version}
filedata.update(data)
json.dump(filedata, fst, ensure_ascii=False, indent=4)
else:
with open(self._bids_dir + "/dataset_description.json", 'w') as fst:
data = {'Name': self._dataset_name,
'BIDSVersion': self._bids_version}
json.dump(data, fst, ensure_ascii=False, indent=4)
try:
for key, data in self._config["JSON_files"].items():
with open(self._bids_dir + '/' + key, 'w') as fst:
json.dump(data, fst, ensure_ascii=False, indent=4)
except KeyError:
pass
# add a README file
if not os.path.exists(self._bids_dir + "/README"):
with open(self._bids_dir + "/README", 'w') as fst:
data = ""
fst.write(data)
# now we can scan all files and rearrange them
part_match = None
part_match_z = None
for root, _, files in os.walk(self._data_dir,
topdown=True):
# each loop is a new participant so long as participant is top level
files[:] = [f for f in files if not self.check_ignore(os.path.join(root, f))]
eeg = []
dst_file_path_list = []
names_list = []
mat_list = []
run_list = []
tsv_condition_runs = []
tsv_fso_runs = []
d_list = []
txt_df_list = []
correct = None
if not files:
continue
files.sort()
part_match = None
i = 0
while part_match is None:
try:
part_match, part_match_z = self.part_check(filename=files[i])
except:
i += 1
continue
if self.channels:
if self._is_verbose and self.channels[part_match] is not None:
print("Channels for participant " + part_match + " are")
print(self.channels[part_match])
for i in self._ignore:
if part_match in i:
print("From " + i)
if self._is_verbose:
print(files)
while files: # loops over each participant file
file = files.pop(0)
if self._is_verbose:
print(file)
src_file_path = os.path.join(root, file)
dst_file_path = self._bids_dir
data_type_match = None
new_name = None
if re.match(".*?" + ".json", file):
try:
(new_name, dst_file_path, part_match, run_match,
acq_match, echo_match, sess_match, ce_match,
data_type_match, task_label_match, SeqType) = self.generate_names(src_file_path,
part_match=part_match)
except TypeError:
continue
if echo_match is None:
echo_match = 0
if new_name in names_list:
shutil.copy(src_file_path, dst_file_path + new_name + ".json")
# finally, if it is a bold experiment, we need to edit the JSON file using DICOM tags
if os.path.exists(dst_file_path + new_name + ".json"):
# https://github.com/nipy/nibabel/issues/712, that is why we take the
# scanner parameters from the config.json
# nib_img = nib.load(src_file_path)
# TR = nib_img.header.get_zooms()[3]
for foldername in os.listdir(str(self._DICOM_path)):
if run_match.zfill(4) == foldername.zfill(4):
DICOM_filepath = os.path.join(self._DICOM_path, foldername)
slicetimes, echotime, ScanSeq, SeqVar, SeqOpt, SeqName = self.get_params(
str(DICOM_filepath), int(echo_match), int(run_match))
# with open(dst_file_path + new_name + ".json", 'w') as fst:
with open(dst_file_path + new_name + ".json", 'r+') as fst:
filedata = json.load(fst)
with open(dst_file_path + new_name + ".json", 'w') as fst:
if data_type_match == "bold":
filedata['TaskName'] = task_label_match
filedata['SliceTiming'] = slicetimes
if int(run_match) in self._multi_echo:
filedata['EchoTime'] = echotime
else:
filedata['DelayTime'] = delaytime[0]
if SeqType is not None:
filedata['PulseSequenceType'] = SeqType
if ScanSeq is not None:
filedata['ScanningSequence'] = ScanSeq
if SeqVar is not None:
filedata['SequenceVariant'] = SeqVar
if SeqOpt is not None:
filedata['SequenceOption'] = SeqOpt
if SeqName is not None:
filedata['SequenceName'] = SeqName
json.dump(filedata, fst, ensure_ascii=False, indent=4, default=set_default)
else:
print("Cannot update %s" % (dst_file_path + new_name + ".json"))
elif any(re.search("\\.nii", filelist) for filelist in files):
files.append(src_file_path)
continue
elif re.match(".*?" + "EADME.txt", file): # if README.txt in image list
with open(src_file_path, 'r') as readmetext:
for line in readmetext:
regret_words = ["Abort", "NOTE"]
if ". tempAttnAudT" in line:
# these lines could and should be improved by
# linking config["func.task"] instead of literal strings
prevline = "con"
tsv_condition_runs.append(
re.search(r'\d+', line).group()) # save the first number on the line
elif ". fsoSubLocal" in line:
prevline = "fso"
tsv_fso_runs.append(re.search(r'\d+', line).group())
elif all(x in line for x in regret_words):
if prevline == "con":
del tsv_condition_runs[-1]
elif prevline == "fso":
del tsv_fso_runs[-1]
prevline = ""
else:
prevline = ""
if not os.path.exists(
self._bids_dir + "/sub-" + part_match):
# Writing both a particpant-specific and agnostic README
# Requires creation of a .bidsignore file for local READMEs
os.makedirs(self._bids_dir + "/sub-" + part_match)
shutil.copy(src_file_path, self._bids_dir + "/sub-" + part_match + "/README.txt")
with open(src_file_path, 'r') as readmetext:
for line in readmetext:
if os.path.exists(self._bids_dir + "/README"):
with open(self._bids_dir + "/README", 'a') as f:
f.write(line + "\n")
else:
with open(self._bids_dir + "/README", 'w') as f:
f.write(line + "\n")
continue
elif re.match(".*?" + "\\.1D", file):
d_list.append(src_file_path)
continue
elif re.match(".*?" + "\\.mat", file):
mat_list.append(src_file_path)
continue
# if the file doesn't match the extension, we skip it
elif re.match(".*?" + "\\.txt", file):
if part_match is None:
files.append(file)
else:
try:
df = pd.read_table(src_file_path, header=None, sep="\s+")
e = None
except Exception as e:
df = None
txt_df_list.append(dict(name=file, data=df, error=e))
continue
elif not any(re.match(".*?" + ext, file) for ext in curr_ext):
print("Warning : Skipping %s" % src_file_path)
continue
if self._is_verbose:
print("trying %s" % src_file_path)
try:
(new_name, dst_file_path, part_match, run_match,
acq_match, echo_match, sess_match, ce_match,
data_type_match, task_label_match, _) = self.generate_names(src_file_path,
part_match=part_match)
except TypeError as problem: #
print("\nIssue in generate names")
print("problem with %s:" % src_file_path, problem, "\n")
continue
# Creating the directory where to store the new file
if not os.path.exists(dst_file_path):
os.makedirs(dst_file_path)
# print(data_type_match)
# finally, if the file is not nifti
if dst_file_path.endswith("/func") or dst_file_path.endswith("/anat"):
# we convert it using nibabel
if not any(file.endswith(ext) for ext in [".nii", ".nii.gz"]):
# check if .nii listed in config file, not if file ends with .nii
# loading the original image
nib_img = nib.load(src_file_path)
nib_affine = np.array(nib_img.affine)
nib_data = np.array(nib_img.dataobj)
# create the nifti1 image
# if minc format, invert the data and change the affine transformation
# there is also an issue on minc headers
if file.endswith(".mnc"):
if len(nib_img.shape) > 3:
nib_affine[0:3, 0:3] = nib_affine[0:3, 0:3]
rot_z(np.pi / 2)
rot_y(np.pi)
rot_x(np.pi / 2)
nib_data = nib_data.T
nib_data = np.swapaxes(nib_data, 0, 1)
nifti_img = nib.Nifti1Image(nib_data, nib_affine, nib_img.header)
nifti_img.header.set_xyzt_units(xyz="mm", t="sec")
zooms = np.array(nifti_img.header.get_zooms())
zooms[3] = repetition_time
nifti_img.header.set_zooms(zooms)
elif len(nib_img.shape) == 3:
nifti_img = nib.Nifti1Image(nib_data, nib_affine, nib_img.header)
nifti_img.header.set_xyzt_units(xyz="mm")
else:
nifti_img = nib.Nifti1Image(nib_data, nib_affine, nib_img.header)
# saving the image
nib.save(nifti_img, dst_file_path + new_name + ".nii.gz")
# if it is already a nifti file, no need to convert it so we just copy rename
if file.endswith(".nii.gz"):
shutil.copy(src_file_path, dst_file_path + new_name + ".nii.gz")
elif file.endswith(".nii"):
shutil.copy(src_file_path, dst_file_path + new_name + ".nii")
# compression just if .nii files
if compress is True:
print("zipping " + file)
with open(dst_file_path + new_name + ".nii", 'rb') as f_in:
with gzip.open(dst_file_path + new_name + ".nii.gz", 'wb',
self._config["compressLevel"]) as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(dst_file_path + new_name + ".nii")
elif dst_file_path.endswith("/ieeg"):
remove_src_edf = True
headers_dict = self.channels[part_match]
if file.endswith(".edf"):
remove_src_edf = False
elif file.endswith(".edf.gz"):
with gzip.open(src_file_path, 'rb', self._config["compressLevel"]) as f_in:
with open(src_file_path.rsplit(".gz", 1)[0], 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
elif not self._config["ieeg"]["binary?"]:
raise NotImplementedError(
"{file} file format not yet supported. If file is binary format, please indicate so "
"and what encoding in the config.json file".format(
file=file))
elif headers_dict and any(".mat" in i for i in files) and self.sample_rate[
part_match] is not None:
# assume has binary encoding
try: # open binary file and write decoded numbers as array where rows = channels
# check if zipped
if file.endswith(".gz"):
with gzip.open(src_file_path, 'rb', self._config["compressLevel"]) as f:
data = np.frombuffer(f.read(),
dtype=np.dtype(self._config["ieeg"]["binaryEncoding"]))
else:
with open(src_file_path, mode='rb') as f:
data = np.fromfile(f, dtype=self._config["ieeg"]["binaryEncoding"])
array = np.reshape(data, [len(headers_dict), -1],
order='F') # byte order is Fortran encoding, dont know why
signal_headers = highlevel.make_signal_headers(headers_dict,
sample_rate=self.sample_rate[part_match],
physical_max=np.amax(array),
physical_min=(np.amin(array)))
print("converting binary" + src_file_path + " to edf" + os.path.splitext(src_file_path)[
0] + ".edf")
highlevel.write_edf(os.path.splitext(src_file_path)[0] + ".edf", array, signal_headers,
digital=self._config["ieeg"]["digital"])
except OSError as e:
print("eeg file is either not detailed well enough in config file or file type not yet "
"supported")
raise e
else:
raise FileNotFoundError("{file} header could not be found".format(file=file))
# check for extra channels in data, not working in other file modalities
f = EdfReader(os.path.splitext(src_file_path)[0] + ".edf")
extra_arrays = []
extra_signal_headers = []
if any(len(mat2df(os.path.join(root, fname))) == f.samples_in_file(0) for fname in
[i for i in files if i.endswith(".mat")]) or any(
len(mat2df(os.path.join(root, fname))) == f.samples_in_file(0) for fname in [
i for i in mat_list if i.endswith(".mat")]):
for fname in [i for i in files + mat_list if i.endswith(".mat")]:
sig_len = f.samples_in_file(0)
if not os.path.isfile(fname):
fname = os.path.join(root, fname)
if mat2df(fname) is None:
continue
elif len(mat2df(fname)) == sig_len:
if fname in files:
files.remove(fname)
if fname in mat_list:
mat_list.remove(fname)
df = pd.DataFrame(mat2df(fname))
for cols in df.columns:
extra_arrays = np.vstack([extra_arrays, df[cols]])
extra_signal_headers.append(highlevel.make_signal_header(
os.path.splitext(os.path.basename(fname))[0]
, sample_rate=self.sample_rate[part_match]))
elif sig_len * 0.99 <= len(mat2df(fname)) <= sig_len * 1.01:
raise BufferError(
file + "of size" + sig_len + "is not the same size as" + fname + "of size" +
len(mat2df(fname)))
f.close()
# read edf and either copy data to BIDS file or save data as dict for writing later
eeg.append(self.read_edf(os.path.splitext(src_file_path)[0] + ".edf", headers_dict,
extra_arrays, extra_signal_headers))
if remove_src_edf:
if self._is_verbose:
print("Removing " + os.path.splitext(src_file_path)[0] + ".edf")
os.remove(os.path.splitext(src_file_path)[0] + ".edf")
# move the sidecar from input to output
names_list.append(new_name)
dst_file_path_list.append(dst_file_path)
try:
if run_match is not None:
run_list.append(int(run_match))
except UnboundLocalError:
pass
if d_list:
self.convert_1D(run_list, d_list, tsv_fso_runs, tsv_condition_runs, names_list, dst_file_path_list)
if mat_list: # deal with remaining .mat files
self.mat2tsv(mat_list)
if txt_df_list:
for txt_df_dict in txt_df_list:
if self._config["coordsystem"] in txt_df_dict["name"]:
if txt_df_dict["error"] is not None:
raise txt_df_dict["error"]
df = txt_df_dict["data"]
df.columns = ["name1", "name2", "x", "y", "z", "hemisphere", "del"]
df["name"] = df["name1"] + df["name2"].astype(str).str.zfill(2)
df["hemisphere"] = df["hemisphere"] + df["del"]
df = df.drop(columns=["name1", "name2", "del"])
df = pd.concat([df["name"], df["x"], df["y"], df["z"], df["hemisphere"]], axis=1)
df.to_csv(self._bids_dir + "/sub-" + part_match_z + "/sub-" + part_match_z +
"_space-Talairach_electrodes.tsv", sep="\t", index=False)
elif self._config["eventFormat"]["AudioCorrection"] in txt_df_dict["name"]:
if txt_df_dict["error"] is not None:
raise txt_df_dict["error"]
correct = txt_df_dict["data"]
else:
print("skipping " + txt_df_dict["name"])
# check final file set
for new_name in names_list:
print(new_name)
file_path = dst_file_path_list[names_list.index(new_name)]
full_name = file_path + new_name + ".edf"
task_match = re.match(".*_task-(\w*)_.*", full_name)
if task_match:
task_label_match = task_match.group(1)
# split any edfs according to tsvs
if new_name.endswith("_ieeg") and any(re.match(new_name.split("_ieeg")[0].split("/", 1)[1] +
"(?:" + "_acq-" + self._config["acq"]["content"][0] +
")?" + "_run-" + self._config["runIndex"]["content"][
0] + "_events.tsv", set_file) for set_file in
os.listdir(file_path)): # if edf is not yet split
if self._is_verbose:
print("Reading for split... ")
if full_name in [i["bids_name"] for i in eeg]:
eeg_dict = eeg[[i["bids_name"] for i in eeg].index(full_name)]
else:
raise LookupError(
"This error should not have been raised, was edf file " + full_name + " ever written?"
, [i["name"] for i in eeg])
[array, signal_headers, header] = [eeg_dict["data"], eeg_dict["signal_headers"],
eeg_dict["file_header"]]
start_nums = []
matches = []
for file in sorted(os.listdir(file_path)):
match_tsv = re.match(new_name.split("_ieeg", 1)[0].split("/", 1)[1] +
"(?:_acq-" + self._config["acq"]["content"][0] + ")?_run-(" +
self._config["runIndex"]["content"][0] + ")_events.tsv", file)
if match_tsv:
df = pd.read_csv(os.path.join(file_path, file), sep="\t", header=0)
# converting signal start and end to correct sample rate for data
end_num = str2num(df[self._config["eventFormat"]["Timing"]["end"]].iloc[-1])
i = -1
while not isinstance(end_num, (int, float)):
print(end_num, type(end_num))
i -= 1
end_num = str2num(df[self._config["eventFormat"]["Timing"]["end"]].iloc[i])
num_list = [round((float(x) / float(self._config["eventFormat"]["SampleRate"])) *
signal_headers[0]["sample_rate"]) for x in (
df[self._config["eventFormat"]["Timing"]["start"]][0], end_num)]
start_nums.append(tuple(num_list))
matches.append(match_tsv)
for i in range(len(start_nums)):
if i == 0:
start = 0
practice = os.path.join(file_path, "practice" + new_name.split("_ieeg", 1)[0]
+ "_ieeg.edf")
if not os.path.isfile(practice) and self._config["split"]["practice"]:
os.makedirs(os.path.join(file_path, "practice"), exist_ok=True)
highlevel.write_edf(practice, np.split(array, [0, start_nums[0][0]], axis=1)[1],
signal_headers, header, digital=self._config["ieeg"]["digital"])
self.bidsignore("*practice*")
else:
start = start_nums[i - 1][1]
if i == len(start_nums) - 1:
end = array.shape[1]
else:
end = start_nums[i + 1][0]
new_array = np.split(array, [start, end], axis=1)[1]
tsv_name: str = os.path.join(file_path, matches[i].string)
edf_name: str = os.path.join(file_path, matches[i].string.split("_events.tsv", 1)[0]
+ "_ieeg.edf")
full_name = os.path.join(file_path, new_name.split("/", 1)[1] + ".edf")
if self._is_verbose:
print(full_name + "(Samples[" + str(start) + ":" + str(end) + "]) ---> " + edf_name)
highlevel.write_edf(edf_name, new_array, signal_headers, header,
digital=self._config["ieeg"]["digital"])
df = pd.read_csv(tsv_name, sep="\t", header=0)
os.remove(tsv_name)
# all column manipulation and math in frame2bids
df_new = self.frame2bids(df, self._config["eventFormat"]["Events"],
self.sample_rate[part_match], correct, start)
df_new.to_csv(tsv_name, sep="\t", index=False,
na_rep="n/a")
# dont forget .json files!
self.write_sidecar(edf_name)
self.write_sidecar(tsv_name)
continue
# write JSON file for any missing files
self.write_sidecar(file_path + new_name)
# write any indicated .json files
try:
json_list = self._config["JSON_files"]
except KeyError:
json_list = dict()
for jfile, contents in json_list.items():
print(part_match_z, task_label_match, jfile)
file_name = os.path.join(self._bids_dir, "sub-" + part_match_z, "sub-" + part_match_z + "_task-" +
task_label_match + "_" + jfile)
with open(file_name, "w") as fst:
json.dump(contents, fst)
# Output
if self._is_verbose:
tree(self._bids_dir)
# Finally, we check with bids_validator if everything went alright (This wont work)
# self.bids_validator()
else:
print("Warning: No parameters are defined !")
def write_sidecar(self, full_file):
if full_file.endswith(".tsv"): # need to search BIDS specs for list of possible known BIDS columns
data = dict()
df = pd.read_csv(full_file, sep="\t")
# for col in df.columns:
# data[col] =
return
elif os.path.dirname(full_file).endswith("/ieeg"):
if not full_file.endswith(".edf"):
full_file = full_file + ".edf"
entities = layout.parse_file_entities(full_file)
f = EdfReader(full_file)
if f.annotations_in_file == 0:
description = "n/a"
elif f.getPatientAdditional():
description = f.getPatientAdditional()
elif f.getRecordingAdditional():
description = f.getRecordingAdditional()
elif any((not i.size == 0) for i in f.readAnnotations()):
description = [i for i in f.readAnnotations()]
print("description:", description)
else:
raise SyntaxError(full_file + "was not annotated correctly")
signals = [sig for sig in f.getSignalLabels() if "Trigger" not in sig]
data = dict(TaskName=entities['task'],
InstitutionName=self._config["institution"],
iEEGReference=description, SamplingFrequency=int(f.getSignalHeader(0)["sample_rate"]),
PowerLineFrequency=60, SoftwareFilters="n/a", ECOGChannelCount=len(signals),
TriggerChannelCount=1, RecordingDuration=f.file_duration)
elif os.path.dirname(full_file).endswith("/anat"):
entities = layout.parse_file_entities(full_file + ".nii.gz")
if entities["suffix"] == "CT":
data = {}
elif entities["suffix"] == "T1w":
data = {}
else:
raise NotImplementedError(full_file + "is not yet accounted for")
else:
data = {}
if not os.path.isfile(os.path.splitext(full_file)[0] + ".json"):
with open(os.path.splitext(full_file)[0] + ".json", "w") as fst:
json.dump(data, fst)
def frame2bids(self, df: pd.DataFrame, events: Union[dict, List[dict]], data_sample_rate=None, audio_correction=None
, start_at=0):
new_df = None
if isinstance(events, dict):
events = list(events)
event_order = 0
for event in events:
event_order += 1
temp_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""max temp before jul 1 or min after"""
import datetime
import psycopg2.extras
import numpy as np
import pandas as pd
from matplotlib.patches import Rectangle
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT = {'fall': 'Minimum Temperature after 1 July',
'spring': 'Maximum Temperature before 1 July'}
PDICT2 = {'high': 'High Temperature',
'low': 'Low Temperature'}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['highcharts'] = True
desc['description'] = """This plot presents the climatology and actual
year's progression of warmest to date or coldest to date temperature.
The simple average is presented along with the percentile intervals."""
desc['arguments'] = [
dict(type='station', name='station', default='IA0200',
label='Select Station:', network='IACLIMATE'),
dict(type='year', name='year', default=datetime.datetime.now().year,
label='Year to Highlight:'),
dict(type='select', name='half', default='fall',
label='Option to Plot:', options=PDICT),
dict(type='select', name='var', default='low',
label='Variable to Plot:', options=PDICT2),
dict(type='int', name='t', label='Highlight Temperature',
default=32),
]
return desc
def get_context(fdict):
""" Get the raw infromations we need"""
pgconn = get_dbconn('coop')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
today = datetime.date.today()
thisyear = today.year
ctx = get_autoplot_context(fdict, get_description())
year = ctx['year']
station = ctx['station']
varname = ctx['var']
half = ctx['half']
table = "alldata_%s" % (station[:2],)
ab = ctx['_nt'].sts[station]['archive_begin']
if ab is None:
raise NoDataFound("Unknown station metadata.")
startyear = int(ab.year)
data = np.ma.ones((thisyear-startyear+1, 366)) * 199
if half == 'fall':
cursor.execute("""SELECT extract(doy from day), year,
""" + varname + """ from
"""+table+""" WHERE station = %s and low is not null and
high is not null and year >= %s""", (station, startyear))
else:
cursor.execute("""SELECT extract(doy from day), year,
""" + varname + """ from
"""+table+""" WHERE station = %s and high is not null and
low is not null and year >= %s""", (station, startyear))
if cursor.rowcount == 0:
raise NoDataFound("No Data Found.")
for row in cursor:
data[int(row[1] - startyear), int(row[0] - 1)] = row[2]
data.mask = np.where(data == 199, True, False)
doys = []
avg = []
p25 = []
p2p5 = []
p75 = []
p97p5 = []
mins = []
maxs = []
dyear = []
idx = year - startyear
last_doy = int(today.strftime("%j"))
if half == 'fall':
for doy in range(181, 366):
low = np.ma.min(data[:-1, 180:doy], 1)
avg.append(np.ma.average(low))
mins.append(np.ma.min(low))
maxs.append(np.ma.max(low))
p = np.percentile(low, [2.5, 25, 75, 97.5])
p2p5.append(p[0])
p25.append(p[1])
p75.append(p[2])
p97p5.append(p[3])
doys.append(doy)
if year == thisyear and doy > last_doy:
continue
dyear.append(np.ma.min(data[idx, 180:doy]))
else:
for doy in range(1, 181):
low = np.ma.max(data[:-1, :doy], 1)
avg.append(np.ma.average(low))
mins.append(np.ma.min(low))
maxs.append(np.ma.max(low))
p = np.percentile(low, [2.5, 25, 75, 97.5])
p2p5.append(p[0])
p25.append(p[1])
p75.append(p[2])
p97p5.append(p[3])
doys.append(doy)
if year == thisyear and doy > last_doy:
continue
dyear.append(np.ma.max(data[idx, :doy]))
# http://stackoverflow.com/questions/19736080
d = dict(doy=pd.Series(doys), mins=pd.Series(mins), maxs=pd.Series(maxs),
p2p5=pd.Series(p2p5),
p97p5=pd.Series(p97p5), p25=pd.Series(p25),
p75=pd.Series(p75), avg=pd.Series(avg),
thisyear=
|
pd.Series(dyear)
|
pandas.Series
|
from contextlib import nullcontext as does_not_raise
from functools import partial
import pandas as pd
from pandas.testing import assert_series_equal
from solarforecastarbiter import datamodel
from solarforecastarbiter.reference_forecasts import persistence
from solarforecastarbiter.conftest import default_observation
import pytest
def load_data_base(data, observation, data_start, data_end):
# slice doesn't care about closed or interval label
# so here we manually adjust start and end times
if 'instant' in observation.interval_label:
pass
elif observation.interval_label == 'ending':
data_start += pd.Timedelta('1s')
elif observation.interval_label == 'beginning':
data_end -= pd.Timedelta('1s')
return data[data_start:data_end]
@pytest.fixture
def powerplant_metadata():
"""1:1 AC:DC"""
modeling_params = datamodel.FixedTiltModelingParameters(
ac_capacity=200, dc_capacity=200, temperature_coefficient=-0.3,
dc_loss_factor=3, ac_loss_factor=0,
surface_tilt=30, surface_azimuth=180)
metadata = datamodel.SolarPowerPlant(
name='Albuquerque Baseline', latitude=35.05, longitude=-106.54,
elevation=1657.0, timezone='America/Denver',
modeling_parameters=modeling_params)
return metadata
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190404 1400'),
('ending', 'right', '20190404 1400'),
('instant', None, '20190404 1359')
])
def test_persistence_scalar(site_metadata, interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp(end, tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data=load_data)
expected_index = pd.date_range(
start='20190404 1300', end=end, freq='5min', tz=tz,
closed=closed)
expected = pd.Series(100., index=expected_index)
assert_series_equal(fx, expected)
@pytest.mark.parametrize('obs_interval_label', ('beginning', 'ending',
'instant'))
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190406 0000'),
('ending', 'right', '20190406 0000'),
('instant', None, '20190405 2359')
])
def test_persistence_interval(site_metadata, obs_interval_label,
interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label=obs_interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
data = pd.Series(data_index.hour, index=data_index, dtype=float)
if obs_interval_label == 'ending':
# e.g. timestamp 12:00:00 should be equal to 11
data = data.shift(1).fillna(0)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed=closed)
expected_vals = list(range(0, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
# handle permutations of parameters that should fail
if data_end.minute == 59 and obs_interval_label != 'instant':
expectation = pytest.raises(ValueError)
elif data_end.minute == 0 and obs_interval_label == 'instant':
expectation = pytest.raises(ValueError)
else:
expectation = does_not_raise()
with expectation:
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected)
def test_persistence_interval_missing_data(site_metadata):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label='ending')
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404T1200', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
end = '20190406 0000'
data = pd.Series(data_index.hour, index=data_index, dtype=float)
data = data.shift(1)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed='right')
expected_vals = [None] * 12 + list(range(12, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, 'ending', load_data)
assert_series_equal(fx, expected)
@pytest.fixture
def uniform_data():
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
return data
@pytest.mark.parametrize(
'interval_label,expected_index,expected_ghi,expected_ac,obsscale', (
('beginning',
['20190404 1300', '20190404 1330'],
[96.41150694741889, 91.6991546408236],
[96.60171202566896, 92.074796727846],
1),
('ending',
['20190404 1330', '20190404 1400'],
[96.2818141290749, 91.5132934827808],
[96.47816752344607, 91.89460837042301],
1),
# test clipped at 2x clearsky
('beginning',
['20190404 1300', '20190404 1330'],
[1926.5828549018618, 1832.4163238767312],
[383.1524464326973, 365.19729186262526],
50)
)
)
def test_persistence_scalar_index(
powerplant_metadata, uniform_data, interval_label,
expected_index, expected_ghi, expected_ac, obsscale):
# ac_capacity is 200 from above
observation = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning')
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning', variable='ac_power')
data = uniform_data * obsscale
tz = data.index.tzinfo
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
expected_index, tz=tz, freq=interval_length)
expected = pd.Series(expected_ghi, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected = pd.Series(expected_ac, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_instant_obs_fx(
site_metadata, powerplant_metadata, uniform_data):
# instantaneous obs and fx
interval_length = pd.Timedelta('30min')
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
data = uniform_data
tz = data.index.tzinfo
load_data = partial(load_data_base, data)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1259', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1359', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.59022431746838, 91.99405501672328]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_values = [96.77231379880752, 92.36198028963426]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
# instant obs and fx, but with offset added to starts instead of ends
data_start = pd.Timestamp('20190404 1201', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1301', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.55340033645147, 91.89662922267517]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_invalid_times_instant(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
# instant obs that cover the whole interval - not allowed!
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
with pytest.raises(ValueError):
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
@pytest.mark.parametrize('data_start,data_end,forecast_start,forecast_end', (
('20190404 1201', '20190404 1300', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1259', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1301', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1300', '20190404 1359'),
))
def test_persistence_scalar_index_invalid_times_interval(
site_metadata, interval_label, data_start, data_end, forecast_start,
forecast_end):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
# base times to mess with
data_start = pd.Timestamp(data_start, tz=tz)
data_end = pd.Timestamp(data_end, tz=tz)
forecast_start = pd.Timestamp(forecast_start, tz=tz)
forecast_end = pd.Timestamp(forecast_end, tz=tz)
# interval average obs with invalid starts/ends
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
errtext = "with interval_label beginning or ending"
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert errtext in str(excinfo.value)
def test_persistence_scalar_index_invalid_times_invalid_label(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
interval_label = 'invalid'
observation = default_observation(
site_metadata, interval_length='5min')
object.__setattr__(observation, 'interval_label', interval_label)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert "invalid interval_label" in str(excinfo.value)
def test_persistence_scalar_index_low_solar_elevation(
site_metadata, powerplant_metadata):
interval_label = 'beginning'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
# at ABQ Baseline, solar apparent zenith for these points is
# 2019-05-13 12:00:00+00:00 91.62
# 2019-05-13 12:05:00+00:00 90.09
# 2019-05-13 12:10:00+00:00 89.29
# 2019-05-13 12:15:00+00:00 88.45
# 2019-05-13 12:20:00+00:00 87.57
# 2019-05-13 12:25:00+00:00 86.66
tz = 'UTC'
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
index = pd.date_range(start=data_start, end=data_end,
freq='5min', closed='left')
# clear sky 5 min avg (from 1 min avg) GHI is
# [0., 0.10932908, 1.29732454, 4.67585122, 10.86548521, 19.83487399]
# create data series that could produce obs / clear of
# 0/0, 1/0.1, -1/1.3, 5/5, 10/10, 20/20
# average without limits is (10 - 1 + 1 + 1 + 1) / 5 = 2.4
# average with element limits of [0, 2] = (2 + 0 + 1 + 1 + 1) / 5 = 1
data = pd.Series([0, 1, -1, 5, 10, 20.], index=index)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start=forecast_start, end=forecast_end, freq='5min', closed='left')
# clear sky 5 min avg GHI is
# [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected_vals = [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected = pd.Series(expected_vals, index=expected_index)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected, check_less_precise=1, check_names=False)
expected = pd.Series([0.2, 0.7, 1.2, 1.6, 2., 2.5], index=expected_index)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected, check_less_precise=1, check_names=False)
@pytest.mark.parametrize("interval_label", [
'beginning', 'ending'
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0, 0, 0, 20, 20, 20], 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0, 0, 0, 4, 4, 4], 'y', [50], [2]),
# invalid axis
pytest.param([0, 0, 0, 4, 4, 4], 'percentile', [-1], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
])
def test_persistence_probabilistic(site_metadata, interval_label, obs_values,
axis, constant_values, expected_values):
tz = 'UTC'
interval_length = '5min'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='5min',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0] * 11 + [20] * 11, 'x', [10, 20], [50, 100]),
([0] * 11 + [20] * 11, 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0] * 11 + [4] * 11, 'y', [50], [2]),
# invalid axis
pytest.param([0] * 11 + [4] * 11, 'percentile', [-1], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
# insufficient observation data
pytest.param([5.3, 7.3, 1.4] * 4, 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
pytest.param([], 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
pytest.param([None]*10, 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
])
def test_persistence_probabilistic_timeofday(site_metadata, obs_values, axis,
constant_values, expected_values):
tz = 'UTC'
interval_label = "beginning"
interval_length = '1h'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
# all observations at 9am each day
data_end = pd.Timestamp('20190513T0900', tz=tz)
data_start = data_end - pd.Timedelta("{}D".format(len(obs_values)))
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1D',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
# forecast 9am
forecast_start = pd.Timestamp('20190514T0900', tz=tz)
forecast_end = pd.Timestamp('20190514T1000', tz=tz)
interval_length = pd.Timedelta('1h')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic_timeofday(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("data_end,forecast_start", [
# no timezone
(pd.Timestamp("20190513T0900"), pd.Timestamp("20190514T0900")),
# same timezone
(
pd.Timestamp("20190513T0900", tz="UTC"),
pd.Timestamp("20190514T0900", tz="UTC")
),
# different timezone
(
pd.Timestamp("20190513T0200", tz="US/Pacific"),
pd.Timestamp("20190514T0900", tz="UTC")
),
# obs timezone, but no fx timezone
(
pd.Timestamp("20190513T0900", tz="UTC"),
pd.Timestamp("20190514T0900")
),
# no obs timezone, but fx timezone
(
pd.Timestamp("20190513T0900"),
pd.Timestamp("20190514T0900", tz="UTC")
),
])
def test_persistence_probabilistic_timeofday_timezone(site_metadata, data_end,
forecast_start):
obs_values = [0] * 11 + [20] * 11
axis, constant_values, expected_values = 'x', [10, 20], [50, 100]
interval_label = "beginning"
interval_length = '1h'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
# all observations at 9am each day
data_start = data_end - pd.Timedelta("{}D".format(len(obs_values)))
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1D',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
# forecast 9am
forecast_end = forecast_start + pd.Timedelta("1h")
interval_length = pd.Timedelta('1h')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
# if forecast without timezone, then use obs timezone
if data.index.tzinfo is not None and forecast_start.tzinfo is None:
expected_index = expected_index.tz_localize(data.index.tzinfo)
forecasts = persistence.persistence_probabilistic_timeofday(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("interval_label", [
'beginning', 'ending'
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0] * 15 + [20] * 15, 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0] * 15 + [4] * 15, 'y', [50], [2]),
([None] * 30, 'y', [50], [None]),
([0] * 10 + [None] * 10 + [20] * 10, 'x', [10, 20], [50, 100]),
([0] * 10 + [None] * 10 + [4] * 10, 'y', [50], [2]),
])
def test_persistence_probabilistic_resampling(
site_metadata,
interval_label,
obs_values, axis,
constant_values,
expected_values
):
tz = 'UTC'
interval_length = '1min'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1min',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
# all observations 9-10 each day.
# This index is for (09:00, 10:00] (interval_label=ending), but subtract
# 30 minutes for [09:00, 10:00) (interval_label=beginning)
PROB_PERS_TOD_OBS_INDEX = pd.DatetimeIndex([
'2019-04-21 09:30:00+00:00', '2019-04-21 10:00:00+00:00',
'2019-04-22 09:30:00+00:00', '2019-04-22 10:00:00+00:00',
'2019-04-23 09:30:00+00:00', '2019-04-23 10:00:00+00:00',
'2019-04-24 09:30:00+00:00', '2019-04-24 10:00:00+00:00',
'2019-04-25 09:30:00+00:00', '2019-04-25 10:00:00+00:00',
'2019-04-26 09:30:00+00:00', '2019-04-26 10:00:00+00:00',
'2019-04-27 09:30:00+00:00', '2019-04-27 10:00:00+00:00',
'2019-04-28 09:30:00+00:00', '2019-04-28 10:00:00+00:00',
'2019-04-29 09:30:00+00:00', '2019-04-29 10:00:00+00:00',
'2019-04-30 09:30:00+00:00', '2019-04-30 10:00:00+00:00',
'2019-05-01 09:30:00+00:00', '2019-05-01 10:00:00+00:00',
'2019-05-02 09:30:00+00:00', '2019-05-02 10:00:00+00:00',
'2019-05-03 09:30:00+00:00', '2019-05-03 10:00:00+00:00',
'2019-05-04 09:30:00+00:00', '2019-05-04 10:00:00+00:00',
'2019-05-05 09:30:00+00:00', '2019-05-05 10:00:00+00:00',
'2019-05-06 09:30:00+00:00', '2019-05-06 10:00:00+00:00',
'2019-05-07 09:30:00+00:00', '2019-05-07 10:00:00+00:00',
'2019-05-08 09:30:00+00:00', '2019-05-08 10:00:00+00:00',
'2019-05-09 09:30:00+00:00', '2019-05-09 10:00:00+00:00',
'2019-05-10 09:30:00+00:00', '2019-05-10 10:00:00+00:00',
'2019-05-11 09:30:00+00:00', '2019-05-11 10:00:00+00:00',
'2019-05-12 09:30:00+00:00', '2019-05-12 10:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
@pytest.mark.parametrize('obs_interval_label_index', [
('beginning', PROB_PERS_TOD_OBS_INDEX - pd.Timedelta('30min')),
('ending', PROB_PERS_TOD_OBS_INDEX)
])
@pytest.mark.parametrize('fx_interval_label_index', [
('beginning', pd.DatetimeIndex(['20190514T0900Z'], freq='1h')),
('ending', pd.DatetimeIndex(['20190514T1000Z'], freq='1h'))
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
# intervals always average to 10 if done properly, but 0 or 20 if
# done improperly
([0, 20] * 22, 'x', [10, 20], [100., 100.]),
# constant_values = percentiles [%]
# forecasts = variable values
([0, 4] * 22, 'y', [50], [2.]),
# works with nan
([None, 4] * 22, 'y', [50], [4.]),
([0.] + [None] * 42 + [4.], 'y', [50], [2.]),
# first interval averages to 0, last to 20, else nan
([0.] + [None] * 42 + [20.], 'x', [10, 20], [50., 100.]),
])
def test_persistence_probabilistic_timeofday_resample(
site_metadata,
obs_values,
axis,
constant_values,
expected_values,
obs_interval_label_index,
fx_interval_label_index
):
obs_interval_label, obs_index = obs_interval_label_index
fx_interval_label, fx_index = fx_interval_label_index
tz = 'UTC'
observation = default_observation(
site_metadata,
interval_length='30min',
interval_label=obs_interval_label
)
data_start = pd.Timestamp('20190421T0900', tz=tz)
data_end = pd.Timestamp('20190512T1000', tz=tz)
data = pd.Series(obs_values, index=obs_index, dtype=float)
# forecast 9am - 10am, but label will depend on inputs
forecast_start = pd.Timestamp('20190514T0900', tz=tz)
forecast_end = pd.Timestamp('20190514T1000', tz=tz)
interval_length = pd.Timedelta('1h')
load_data = partial(load_data_base, data)
expected_index = fx_index
forecasts = persistence.persistence_probabilistic_timeofday(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, fx_interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for expected, fx in zip(expected_values, forecasts):
pd.testing.assert_series_equal(
fx,
pd.Series(expected, index=expected_index)
)
PROB_PERS_TOD_OBS_INDEX_2H = PROB_PERS_TOD_OBS_INDEX.union(
PROB_PERS_TOD_OBS_INDEX + pd.Timedelta('1h')
)
@pytest.mark.parametrize('obs_interval_label_index', [
('beginning', PROB_PERS_TOD_OBS_INDEX_2H - pd.Timedelta('30min')),
('ending', PROB_PERS_TOD_OBS_INDEX_2H)
])
@pytest.mark.parametrize('fx_interval_label_index', [
(
'beginning',
pd.DatetimeIndex(['20190514T0900Z', '20190514T1000Z'], freq='1h')
),
(
'ending',
pd.DatetimeIndex(['20190514T1000Z', '20190514T1100Z'], freq='1h')
)
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# first interval averages to 0, last to 20, else nan
([0.] + [None] * 86 + [20.], 'x', [10, 20], [[100., 0.], [100., 100.]]),
# no valid observations in first forecast hour
(
[None, None, 20., 20.] * 22,
'x',
[10, 20],
[[None, 0.], [None, 100.]]
),
])
def test_persistence_probabilistic_timeofday_resample_2h(
site_metadata,
obs_values,
axis,
constant_values,
expected_values,
obs_interval_label_index,
fx_interval_label_index
):
obs_interval_label, obs_index = obs_interval_label_index
fx_interval_label, fx_index = fx_interval_label_index
tz = 'UTC'
observation = default_observation(
site_metadata,
interval_length='30min',
interval_label=obs_interval_label
)
data_start = pd.Timestamp('20190421T0900', tz=tz)
data_end =
|
pd.Timestamp('20190512T1100', tz=tz)
|
pandas.Timestamp
|
import pandas as pd
from collections import Counter
from natsort import index_natsorted
import numpy as np
ids = []
text = []
ab_ids = []
ab_text = []
normal_vocab_freq_dist = Counter()
ab_vocab_freq_dist = Counter()
# keywords that most likely associated with abnormalities
KEYWORDS = ['emphysema', 'cardiomegaly', 'borderline', 'mild', 'chronic', 'minimal', 'copd', 'hernia',
'hyperinflated', 'hemodialysis', 'atelectasis', 'degenerative', 'effusion', 'atherosclerotic',
'aneurysmal', 'granuloma', 'fracture', 'severe', 'concerns', 'fibrosis', 'scarring', 'crowding', 'opacities',
'persistent', 'ectatic', 'hyperinflation', 'moderate', 'opacity', 'calcified', 'effusions', 'edema',
'continued', 'low lung volume', 'pacing lead', 'resection', 'dilated', 'left', 'right', 'bilateral',
'hyperexpanded', 'calcification', 'concerning', 'concern', 'enlargement', 'lines', 'tubes', 'Emphysema',
'Hyperexpanded', 'advanced', 'Advanced', 'tortuosity']
with open('files/normal.txt', mode='r', encoding='utf-8') as f, open('files/abnormal.txt', mode='r', encoding='utf-8') as af:
for line in f:
xml, *label_text = line.split()
ids.append(xml)
normal_vocab_freq_dist.update(label_text)
text.append(' '.join(label_text))
for line in af:
xml, *label_text = line.split()
ab_ids.append(xml)
ab_vocab_freq_dist.update(label_text)
ab_text.append(' '.join(label_text))
def first_filter_normal_label(a_string):
if a_string.startswith(('no acute', 'no evidence', 'no active', 'no radiographic evidence')) and a_string.endswith(
('process.', 'disease.', 'abnormality.', 'abnormalities.', 'findings.', 'finding.', 'identified.',
'infiltrates.', 'infiltrate.')):
return 0
else:
return a_string
def second_filter_normal(a_string):
if isinstance(a_string, int):
return a_string
if a_string.startswith(('normal chest', 'normal exam', 'unremarkable chest', 'unremarkable examination',
'unremarkable radiographs')):
return 0
if a_string.startswith('clear') and a_string.endswith('lungs.'):
return 0
if a_string.startswith(('negative for', 'negative chest')):
return 0
if a_string.startswith('negative') and a_string.endswith('negative.'):
return 0
else:
return a_string
def third_filter_normal(a_string):
if isinstance(a_string, int):
return a_string
if a_string.startswith(('stable appearance', 'stable chest radiograph', 'stable exam', 'stable',
'stable post-procedural', 'stable radiographic')):
if any(w in a_string for w in KEYWORDS):
return a_string
else:
return 0
if a_string.startswith('clear') or a_string.endswith('clear.'):
if any(w in a_string for w in KEYWORDS):
return a_string
else:
return 0
return a_string
def fourth_filter_normal(a_string):
if isinstance(a_string, int):
return a_string
if 'no acute' or 'without acute' in a_string:
if any(w in a_string for w in KEYWORDS):
return 2
elif 'stable' or 'clear' or 'normal' in a_string:
return 0
return a_string
print(normal_vocab_freq_dist.most_common(50))
print(ab_vocab_freq_dist.most_common(50))
# filtering strickt normal from borderline/mild abnormal e.g. stable/chronic conditions but no acute findings
normal = {'xmlId': ids, 'label_text': text}
normal_df = pd.DataFrame(normal)
normal_df['label'] = normal_df['label_text']
normal_df['label'] = normal_df['label'].apply(first_filter_normal_label)
normal_df['label'] = normal_df['label'].apply(second_filter_normal)
normal_df['label'] = normal_df['label'].apply(third_filter_normal)
normal_df['label'] = normal_df['label'].apply(fourth_filter_normal)
print(normal_df.loc[normal_df['label'] != 0])
print(normal_df.loc[normal_df['label'] == 0])
print('creating data frame from abnormal.txt')
# dataframe for abnormal file
ab_normal = {'xmlId': ab_ids, 'label_text': ab_text}
ab_normal_df =
|
pd.DataFrame(ab_normal)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# # ReEDS Scenarios on PV ICE Tool STATES
# To explore different scenarios for furture installation projections of PV (or any technology), ReEDS output data can be useful in providing standard scenarios. ReEDS installation projections are used in this journal as input data to the PV ICE tool.
#
# Current sections include:
#
# <ol>
# <li> ### Reading a standard ReEDS output file and saving it in a PV ICE input format </li>
# <li>### Reading scenarios of interest and running PV ICE tool </li>
# <li>###Plotting </li>
# <li>### GeoPlotting.</li>
# </ol>
# Notes:
#
# Scenarios of Interest:
# the Ref.Mod,
# o 95-by-35.Adv, and
# o 95-by-35+Elec.Adv+DR ones
#
# In[1]:
import PV_ICE
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
from IPython.display import display
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 8)
# In[2]:
import os
from pathlib import Path
testfolder = str(Path().resolve().parent.parent.parent / 'PV_ICE' / 'TEMP' / 'SF_States')
statedatafolder = str(Path().resolve().parent.parent.parent / 'PV_ICE' / 'TEMP' / 'STATEs')
print ("Your simulation will be stored in %s" % testfolder)
# In[3]:
PV_ICE.__version__
# ### Reading REEDS original file to get list of SCENARIOs, PCAs, and STATEs
# In[4]:
r"""
reedsFile = str(Path().resolve().parent.parent.parent / 'December Core Scenarios ReEDS Outputs Solar Futures v2a.xlsx')
print ("Input file is stored in %s" % reedsFile)
rawdf = pd.read_excel(reedsFile,
sheet_name="UPV Capacity (GW)")
#index_col=[0,2,3]) #this casts scenario, PCA and State as levels
#now set year as an index in place
#rawdf.drop(columns=['State'], inplace=True)
rawdf.drop(columns=['Tech'], inplace=True)
rawdf.set_index(['Scenario','Year','PCA', 'State'], inplace=True)
scenarios = list(rawdf.index.get_level_values('Scenario').unique())
PCAs = list(rawdf.index.get_level_values('PCA').unique())
STATEs = list(rawdf.index.get_level_values('State').unique())
simulationname = scenarios
simulationname = [w.replace('+', '_') for w in simulationname]
simulationname
SFscenarios = [simulationname[0], simulationname[4], simulationname[8]]
"""
# ### Reading GIS inputs
# In[5]:
r"""
GISfile = str(Path().resolve().parent.parent.parent.parent / 'gis_centroid_n.xlsx')
GIS = pd.read_excel(GISfile)
GIS = GIS.set_index('id')
GIS.head()
GIS.loc['p1'].long
"""
# ### Create Scenarios in PV_ICE
# #### Downselect to Solar Future scenarios of interest
#
# Scenarios of Interest:
# <li> Ref.Mod
# <li> 95-by-35.Adv
# <li> 95-by-35+Elec.Adv+DR
# In[6]:
SFscenarios = ['Reference.Mod', '95-by-35.Adv', '95-by-35_Elec.Adv_DR']
SFscenarios
# In[7]:
STATEs = ['WA', 'CA', 'VA', 'FL', 'MI', 'IN', 'KY', 'OH', 'PA', 'WV', 'NV', 'MD',
'DE', 'NJ', 'NY', 'VT', 'NH', 'MA', 'CT', 'RI', 'ME', 'ID', 'MT', 'WY', 'UT', 'AZ', 'NM',
'SD', 'CO', 'ND', 'NE', 'MN', 'IA', 'WI', 'TX', 'OK', 'OR', 'KS', 'MO', 'AR', 'LA', 'IL', 'MS',
'AL', 'TN', 'GA', 'SC', 'NC']
# ### Create the 3 Scenarios and assign Baselines
#
# Keeping track of each scenario as its own PV ICE Object.
# In[8]:
MATERIALS = ['glass', 'silicon', 'silver','copper','aluminium','backsheet','encapsulant']
# In[9]:
#for ii in range (0, 1): #len(scenarios):
i = 0
r1 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r1.createScenario(name=STATEs[jj], file=filetitle)
r1.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
i = 1
r2 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r2.createScenario(name=STATEs[jj], file=filetitle)
r2.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
i = 2
r3 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r3.createScenario(name=STATEs[jj], file=filetitle)
r3.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
# # Calculate Mass Flow
# In[10]:
r1.scenMod_noCircularity()
r2.scenMod_noCircularity()
r3.scenMod_noCircularity()
IRENA= False
PERFECTMFG = False
ELorRL = 'RL'
if IRENA:
r1.scenMod_IRENIFY(ELorRL=ELorRL)
r2.scenMod_IRENIFY(ELorRL=ELorRL)
r3.scenMod_IRENIFY(ELorRL=ELorRL)
if PERFECTMFG:
r1.scenMod_PerfectManufacturing()
r2.scenMod_PerfectManufacturing()
r3.scenMod_PerfectManufacturing()
# In[11]:
r1.calculateMassFlow()
r2.calculateMassFlow()
r3.calculateMassFlow()
# In[12]:
print("STATEs:", r1.scenario.keys())
print("Module Keys:", r1.scenario[STATEs[jj]].data.keys())
print("Material Keys: ", r1.scenario[STATEs[jj]].material['glass'].materialdata.keys())
# # OPEN EI
# In[13]:
kk=0
SFScenarios = [r1, r2, r3]
SFScenarios[kk].name
# In[14]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
if keywdlevel[jj] == 'material':
for ii in range (0, len(materials)):
sentit = '@value|'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
for ii in range (0, len(materials)):
sentit = '@value|Cumulative'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]].cumsum()/keywscale[jj]
else:
sentit = '@value|'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
#sentit = '@value|'+keywprint[jj]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
sentit = '@value|Cumulative'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]].cumsum()/keywscale[jj]
foo['@states'] = STATEs[zz]
foo['@scenario|Solar Futures'] = SFScenarios[kk].name
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE OpenEI.csv', index=False)
print("Done")
# In[15]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
if keywdlevel[jj] == 'material':
for ii in range (0, len(materials)):
sentit = '@value|'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]]/keywscale[jj]
else:
sentit = '@value|'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
#sentit = '@value|'+keywprint[jj]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]]/keywscale[jj]
foo['@states'] = STATEs[zz]
foo['@scenario|Solar Futures'] = SFScenarios[kk].name
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE OpenEI Yearly Only.csv', index=False)
print("Done")
# In[16]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
if keywdlevel[jj] == 'material':
if keywdcumneed[jj]:
for ii in range (0, len(materials)):
sentit = '@value|Cumulative'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]].cumsum()/keywscale[jj]
else:
if keywdcumneed[jj]:
sentit = '@value|Cumulative'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]].cumsum()/keywscale[jj]
foo['@states'] = STATEs[zz]
foo['@scenario|Solar Futures'] = SFScenarios[kk].name
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv(title_Method+' OpenEI Cumulatives Only.csv', index=False)
print("Done")
# In[ ]:
# WORK ON THIS FOIR OPENEI
# SCENARIO DIFERENCeS
keyw=['new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['NewInstalledCapacity','InstalledCapacity']
keywprint = ['NewInstalledCapacity','InstalledCapacity']
sfprint = ['Reference','Grid Decarbonization', 'High Electrification']
keywunits = ['MW','MW']
keywdcumneed = [True,False]
keywdlevel = ['module','module']
keywscale = [1,1e6]
materials = []
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
# kk -- scenario
for kk in range(0, 3):
sentit = '@value|'+keywprint[jj]+'|'+sfprint[kk]+'#'+keywunits[jj]
#sentit = '@value|'+keywprint[jj]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
sentit = '@value|Cumulative'+keywprint[jj]+'|'+sfprint[kk]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]].cumsum()/keywscale[jj]
# foo['@value|scenario|Solar Futures'] = SFScenarios[kk].name
foo['@states'] = STATEs[zz]
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE OpenEI ScenarioDifferences.csv', index=False)
print("Done")
# In[ ]:
scenariolist.head()
# # SAVE DATA FOR BILLY: STATES
# In[ ]:
#for 3 significant numbers rounding
N = 2
# SFScenarios[kk].scenario[PCAs[zz]].data.year
#
# Index 20 --> 2030
#
# Index 30 --> 2040
#
# Index 40 --> 2050
# In[ ]:
idx2030 = 20
idx2040 = 30
idx2050 = 40
print("index ", idx2030, " is year ", r1.scenario[STATEs[0]].data['year'].iloc[idx2030])
print("index ", idx2040, " is year ", r1.scenario[STATEs[0]].data['year'].iloc[idx2040])
print("index ", idx2050, " is year ", r1.scenario[STATEs[0]].data['year'].iloc[idx2050])
# #### 6 - STATE Cumulative Virgin Needs by 2050
#
# In[ ]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = []
for ii in range (0, len(materials)):
keywordsum = []
for zz in range (0, len(STATEs)):
keywordsum.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword].sum())
materiallist.append(keywordsum)
df = pd.DataFrame (materiallist,columns=STATEs, index = materials)
df = df.T
df = df.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , df], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE 6 - STATE Cumulative2050 VirginMaterialNeeds_tons.csv')
# #### 7 - STATE Cumulative EoL Only Waste by 2050
# In[ ]:
keyword='mat_Total_EOL_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = []
for ii in range (0, len(materials)):
keywordsum = []
for zz in range (0, len(STATEs)):
keywordsum.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword].sum())
materiallist.append(keywordsum)
df = pd.DataFrame (materiallist,columns=STATEs, index = materials)
df = df.T
df = df.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , df], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE 7 - STATE Cumulative2050 Waste_EOL_tons.csv')
# ##### 8 - STATE Yearly Virgin Needs 2030 2040 2050
# In[ ]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(STATEs)):
keywordsum2030.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2030])
keywordsum2040.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2040])
keywordsum2050.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2050])
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=STATEs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = pd.concat([materiallist, yearlylist], axis=1)
materiallist = materiallist.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , materiallist], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PVICE 8 - STATE Yearly 2030 2040 2050 VirginMaterialNeeds_tons.csv')
# #### 9 - STATE Yearly EoL Waste 2030 2040 205
# In[ ]:
keyword='mat_Total_EOL_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(STATEs)):
keywordsum2030.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2030])
keywordsum2040.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2040])
keywordsum2050.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2050])
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=STATEs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist =
|
pd.concat([materiallist, yearlylist], axis=1)
|
pandas.concat
|
import io
import time
import json
from datetime import datetime
import pandas as pd
from pathlib import Path
import requests
drop_cols = [
'3-day average of daily number of positive tests (may count people more than once)',
'daily total tests completed (may count people more than once)',
'3-day average of new people who tested positive (counts first positive lab per person)',
'3-day average of currently hospitalized',
'daily number of vaccine doses administered beyond the primary series '
]
def save_file(df, file_path, current_date):
# save/update file
if not Path(file_path).exists():
df.to_csv(file_path, index=False)
else:
# get prior file date
prior = pd.read_csv(file_path, parse_dates=['date'])
prior_date = pd.to_datetime(prior['date'].max()).date()
if current_date > prior_date:
df.to_csv(file_path, mode='a', header=False, index=False)
return
def scrape_sheet(sheet_id):
# load previous raw_data and get prior date
raw_general = './data/raw/ri-covid-19.csv'
df = pd.read_csv(raw_general, parse_dates=['date'])
prior_date = df['date'].max().tz_localize('EST').date()
# wait till 5:05 then check every 15 mins for update
target = datetime.now().replace(hour=17).replace(minute=5)
while datetime.now() < target:
print(f"[status] waiting for 5pm", end='\r')
time.sleep(60)
# load data from RI - DOH spreadsheet
gen_url = f'https://docs.google.com/spreadsheets/d/{sheet_id}264100583'
df = pd.read_csv(gen_url).dropna(axis=1, how='all')
date = list(df)[1].strip()
date = pd.to_datetime(date).tz_localize('EST').date()
if df.shape[0] != 27:
print('[ERROR: summary page format changed]')
while not prior_date < date:
print(f"[status] waiting for update...{time.strftime('%H:%M')}", end='\r')
time.sleep(5 * 60)
df = pd.read_csv(gen_url)
date = list(df)[1].strip()
date = pd.to_datetime(date).tz_localize('EST').date()
else:
print('[status] found new update pausing for 2 mins')
time.sleep(2 * 60)
## transform general sheet
df['date'] = date
df.columns = ['metric', 'count', 'date']
save_file(df, raw_general, date)
## scrape geographic sheet
geo_url = f'https://docs.google.com/spreadsheets/d/{sheet_id}901548302'
geo_df = pd.read_csv(geo_url)
# get grographic date & fix cols
geo_date = geo_df.iloc[-1][1]
geo_date = pd.to_datetime(geo_date)
geo_df['date'] = geo_date
cols = [x for x in list(geo_df) if 'Rate' not in x]
geo_df = geo_df[cols]
geo_df = geo_df.dropna(axis=0)
geo_df.columns = ['city_town', 'count', 'hostpialized', 'deaths', 'fully_vaccinated', 'date']
# save file
raw_geo = './data/raw/geo-ri-covid-19.csv'
save_file(geo_df, raw_geo, geo_date)
## scrape demographics sheet
dem_url = f'https://docs.google.com/spreadsheets/d/{sheet_id}31350783'
dem_df = pd.read_csv(dem_url)
# make sure no columns were added/removed
if not dem_df.shape == (31, 9):
print('[error] demographics format changed')
return
else:
# get demographics updated date
dem_date = dem_df.iloc[-1][1]
dem_date = pd.to_datetime(dem_date).tz_localize('EST').date()
# drop percentage columns & rename
dem_df = dem_df.drop(dem_df.columns[[1, 2, 4, 6, 8]], axis=1)
dem_df.columns = ['metric', 'case_count', 'hosptialized', 'deaths']
# get data
sex = dem_df[1:4]
age = dem_df[5:17]
race = dem_df[18:24]
dem_df = pd.concat([sex, age, race])
dem_df['date'] = dem_date
raw_dem = './data/raw/demographics-covid-19.csv'
save_file(dem_df, raw_dem, dem_date)
def scrape_revised(sheet_id):
# load previous revised_data and get prior date
raw_revised = './data/raw/revised-data.csv'
df = pd.read_csv(raw_revised, parse_dates=['date'])
prior_date = df['date'].max().tz_localize('EST').date()
# load revised sheet & fix column names
url = f'https://docs.google.com/spreadsheets/d/{sheet_id}1592746937'
df = pd.read_csv(url, parse_dates=['Date'])
df.columns = [x.lower() for x in list(df)]
# test to try and make sure columns dont change
if df.shape[1] != 36 or list(df)[6] != 'daily total tests completed (may count people more than once)':
print('[error] revised sheet columns changed')
return
# check if updated
if df['date'].max() > prior_date:
df = df.drop(columns=drop_cols)
# re order columns
move_cols = (list(df)[6:11] + list(df)[22:31])
cols = [x for x in list(df) if x not in move_cols]
cols.extend(move_cols)
df = df[cols]
df['date_scraped'] = datetime.strftime(datetime.now(), '%m/%d/%Y')
save_file(df, raw_revised, df['date'].max())
def scrape_nursing_homes(sheet_id):
# load prior date
raw_facility = './data/raw/nurse-homes-covid-19.csv'
df = pd.read_csv(raw_facility, parse_dates=['date'])
prior_date = df['date'].max().tz_localize('EST').date()
url = f'https://docs.google.com/spreadsheets/d/{sheet_id}500394186'
df = pd.read_csv(url)
# get date of last update
date = df.iloc[0,0].split(' ')[-1]
date = pd.to_datetime(date).tz_localize('EST').date()
if not date > prior_date:
print('\n[status] nursing homes:\tno update')
return
else:
# fix headers
df.columns = df.iloc[1]
# drop past 14 days column
df = df.drop(columns='New Resident Cases (in past 14 days)')
df['Facility Name'] = df['Facility Name'].str.replace(u'\xa0', ' ') # random unicode appeared
# fix dataframe shape
assisted = df[df['Facility Name'] == 'Assisted Living Facilities'].index[0]
nursing_homes = df[3:assisted].copy()
assisted_living = df[assisted+1:-1].copy()
# add facility type & recombine
nursing_homes['type'] = 'nursing home'
assisted_living['type'] = 'assisted living'
df = pd.concat([nursing_homes, assisted_living]).reset_index(drop=True)
# add date
df['date'] = date
save_file(df, raw_facility, date)
print('[status] nursing homes:\tupdated')
def scrape_zip_codes(sheet_id):
# load prior date
raw_zip = './data/raw/zip-codes-covid-19.csv'
df = pd.read_csv(raw_zip, parse_dates=['date'])
prior_date = df['date'].max().tz_localize('EST').date()
url = f'https://docs.google.com/spreadsheets/d/{sheet_id}365656702'
df = pd.read_csv(url)
# check if updated
date = df.iloc[-1][1].strip()
date = pd.to_datetime(date).tz_localize('EST').date()
if not date > prior_date:
print('[status] zip codes:\tno update')
return
else:
# stop # pending more info
df.columns = ['zip_code', 'count', 'rate']
df = df[:df[df.zip_code == 'Pending further info'].index[0]]
df = df[['zip_code', 'count']]
# add date & save
df['date'] = date
save_file(df, raw_zip, date)
print('[status] zip codes:\tupdated')
def scrape_schools(sheet_id):
# load prior date
raw_school = './data/raw/schools-covid-19.csv'
df =
|
pd.read_csv(raw_school, parse_dates=['date'])
|
pandas.read_csv
|
from itertools import product
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.validation import quality_mapping
def test_ok_user_flagged():
assert quality_mapping.DESCRIPTION_MASK_MAPPING['OK'] == 0
assert quality_mapping.DESCRIPTION_MASK_MAPPING['USER FLAGGED'] == 1
def test_description_dict_version_compatibility():
for dict_ in quality_mapping.BITMASK_DESCRIPTION_DICT.values():
assert dict_['VERSION IDENTIFIER 0'] == 1 << 1
assert dict_['VERSION IDENTIFIER 1'] == 1 << 2
assert dict_['VERSION IDENTIFIER 2'] == 1 << 3
def test_latest_version_flag():
# test valid while only identifiers 0 - 2 present
last_identifier = max(
int(vi.split(' ')[-1]) for vi in
quality_mapping.DESCRIPTION_MASK_MAPPING.keys() if
vi.startswith('VERSION IDENTIFIER'))
assert last_identifier == 2
assert (quality_mapping.LATEST_VERSION_FLAG ==
quality_mapping.LATEST_VERSION << 1)
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask(flag_val):
flag, mask = flag_val
mask |= quality_mapping.LATEST_VERSION_FLAG
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, True)
assert_series_equal(flags, pd.Series([
mask, mask, quality_mapping.LATEST_VERSION_FLAG, mask,
quality_mapping.LATEST_VERSION_FLAG]))
@pytest.mark.parametrize('flag_invert', product(
quality_mapping.DESCRIPTION_MASK_MAPPING.keys(), [True, False]))
def test_convert_bool_flags_to_flag_mask_none(flag_invert):
assert quality_mapping.convert_bool_flags_to_flag_mask(
None, *flag_invert) is None
@pytest.mark.parametrize('flag_invert', product(
quality_mapping.DESCRIPTION_MASK_MAPPING.keys(), [True, False]))
def test_convert_bool_flags_to_flag_mask_adds_latest_version(flag_invert):
ser = pd.Series([0, 0, 0, 1, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(
ser, *flag_invert)
assert (flags & quality_mapping.LATEST_VERSION_FLAG).all()
@pytest.fixture()
def ignore_latest_version(mocker):
mocker.patch(
'solarforecastarbiter.validation.quality_mapping.LATEST_VERSION_FLAG',
0)
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask_invert(flag_val,
ignore_latest_version):
flag, mask = flag_val
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, True)
assert_series_equal(flags, pd.Series([mask, mask, 0, mask, 0]))
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask_no_invert(flag_val,
ignore_latest_version):
flag, mask = flag_val
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, False)
assert_series_equal(flags, pd.Series([0, 0, mask, 0, mask]))
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_mask_flags(flag_val):
flag, mask = flag_val
latest = quality_mapping.LATEST_VERSION_FLAG
mask |= latest
@quality_mapping.mask_flags(flag)
def f():
return pd.Series([True, True, False, False])
out = f(_return_mask=True)
assert_series_equal(out, pd.Series([latest, latest, mask, mask]))
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_mask_flags_tuple(flag_val):
flag, mask = flag_val
latest = quality_mapping.LATEST_VERSION_FLAG
mask |= latest
@quality_mapping.mask_flags(flag)
def f():
return pd.Series([True, True, False, False]), None
out = f(_return_mask=True)
assert_series_equal(out[0], pd.Series([latest, latest, mask, mask]))
assert out[1] is None
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_mask_flags_noop(flag_val):
flag, mask = flag_val
latest = quality_mapping.LATEST_VERSION_FLAG
mask |= latest
inp = pd.Series([True, True, False, False])
@quality_mapping.mask_flags(flag)
def f():
return inp
out = f()
assert_series_equal(out, inp)
@pytest.mark.parametrize('flag,expected', [
(0b10, 1),
(0b11, 1),
(0b10010, 1),
(0b10010010, 1),
(0b100, 2),
(0b110, 3),
(0b1110001011111, 7)
])
def test_get_version(flag, expected):
assert quality_mapping.get_version(flag) == expected
def test_has_data_been_validated():
flags = pd.Series([0, 1, 2, 7])
out = quality_mapping.has_data_been_validated(flags)
assert_series_equal(out, pd.Series([False, False, True, True]))
@pytest.mark.parametrize('flag,desc,result', [
(0, 'OK', True),
(1, 'OK', False),
(2, 'OK', True),
(3, 'OK', False),
(0, 'USER FLAGGED', False),
(3, 'USER FLAGGED', True),
(0, 'CLEARSKY', False),
(16, 'OK', False),
(1, 'USER FLAGGED', True),
(16, 'NIGHTTIME', True),
(33, 'CLEARSKY', True),
(33, 'NIGHTTIME', False),
(33, ['OK', 'NIGHTTIME'], False),
(33, ('OK', 'CLEARSKY', 'USER FLAGGED'), True),
(2, ('OK', 'NIGHTTIME'), True),
(9297, 'USER FLAGGED', True)
])
def test_check_if_single_value_flagged(flag, desc, result):
flag |= quality_mapping.LATEST_VERSION_FLAG
out = quality_mapping.check_if_single_value_flagged(flag, desc)
assert out == result
@pytest.mark.parametrize('flag', [0, 1])
def test_check_if_single_value_flagged_validation_error(flag):
with pytest.raises(ValueError):
quality_mapping.check_if_single_value_flagged(flag, 'OK')
@pytest.mark.parametrize('desc', [33, b'OK', [1, 2], []])
def test_check_if_single_value_flagged_type_error(desc):
with pytest.raises(TypeError):
quality_mapping.check_if_single_value_flagged(2, desc)
@pytest.mark.parametrize('desc', ['NOPE', 'MAYBE', ['YES', 'NO']])
def test_check_if_single_value_flagged_key_error(desc):
with pytest.raises(KeyError):
quality_mapping.check_if_single_value_flagged(2, desc)
@pytest.mark.parametrize('flags,expected', [
(pd.Series([0, 1, 0]),
|
pd.Series([False, False, False])
|
pandas.Series
|
# rate_of_rise.py is part of the `ca_img_analyzer' package:
# github.com/DanielSchuette/ca_img_analyzer
#
# this code is MIT licensed
#
# if you find a bug or want to contribute, please
# use the GitHub repository or write an email:
# d.schuette(at)online.de
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
class Workbook(object):
"""
class 'Workbook' holds data of (potentially multiple)
Excel spread sheets and comprises methods to modify/
manipulate this data; Workbook does not inherit from
anything but object.
The following methods can be called on Workbook:
- calc_derivative()
- plot_derivatives()
- get_max_derivatives()
- plot_max_derivatives()
- concat_covslips()
"""
def __init__(self,
path,
h=1,
order=None,
kind="box",
plot_style=None,
verbose=False,
debug=False):
"""
takes a valid file path to a .xlsx file and reads it in;
also iterates over the different sheets in that workbook
and appends their names to a newly initialized list.
-----------------------------
parameters:
TODO!
"""
# read the excel workbook in
self.raw_data = pd.read_excel(path, sheet_name=None)
# append individual names to a list of sheet names
self.sheet_names = []
for idx, name in enumerate(self.raw_data):
if verbose:
print("number {}: {}".format(idx + 1, name))
self.sheet_names.append(name)
if len(self.sheet_names) < 2:
print("""did not get more than one spread sheet to read;
is that correct?""")
# create empty list to hold dataframes with derivates
# and various data formats
self.derivative_df_list = []
self.derivatives_table =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# # Calculating Similarity
#
# create some transformer embedded vectors, then use a cosine similarity
# In[2]:
import h
import pandas as pd
# pd.set_option('display.max_colwidth', None)
# use movies dataset
df = pd.read_csv('../data/imdb_top_1000.csv')#.head(10)
# df[['Series_Title', 'Overview']].head(10)
# In[ ]:
# In[ ]:
# sentences = df['Overview']
# h.compute_similarity(0, sentences)
# In[ ]:
df['similar_movies'] =
|
pd.Series(df.index)
|
pandas.Series
|
from nose_parameterized import parameterized
from unittest import TestCase
from pandas import (
Series,
DataFrame,
DatetimeIndex,
date_range,
Timedelta,
read_csv
)
from pandas.util.testing import (assert_frame_equal)
import os
import gzip
from pyfolio.round_trips import (extract_round_trips,
add_closing_transactions,
_groupby_consecutive,
)
class RoundTripTestCase(TestCase):
dates = date_range(start='2015-01-01', freq='D', periods=20)
dates_intraday = date_range(start='2015-01-01',
freq='2BH', periods=8)
@parameterized.expand([
(DataFrame(data=[[2, 10., 'A'],
[2, 20., 'A'],
[-2, 20., 'A'],
[-2, 10., 'A'],
],
columns=['amount', 'price', 'symbol'],
index=dates_intraday[:4]),
DataFrame(data=[[4, 15., 'A'],
[-4, 15., 'A'],
],
columns=['amount', 'price', 'symbol'],
index=dates_intraday[[0, 2]])
.rename_axis('dt', axis='index')
),
(DataFrame(data=[[2, 10., 'A'],
[2, 20., 'A'],
[2, 20., 'A'],
[2, 10., 'A'],
],
columns=['amount', 'price', 'symbol'],
index=dates_intraday[[0, 1, 4, 5]]),
DataFrame(data=[[4, 15., 'A'],
[4, 15., 'A'],
],
columns=['amount', 'price', 'symbol'],
index=dates_intraday[[0, 4]])
.rename_axis('dt', axis='index')
),
])
def test_groupby_consecutive(self, transactions, expected):
grouped_txn = _groupby_consecutive(transactions)
assert_frame_equal(grouped_txn.sort_index(axis='columns'),
expected.sort_index(axis='columns'))
@parameterized.expand([
# Simple round-trip
(DataFrame(data=[[2, 10., 'A'],
[-2, 15., 'A']],
columns=['amount', 'price', 'symbol'],
index=dates[:2]),
DataFrame(data=[[dates[0], dates[1],
Timedelta(days=1), 10., .5,
True, 'A']],
columns=['open_dt', 'close_dt',
'duration', 'pnl', 'rt_returns',
'long', 'symbol'],
index=[0])
),
# Round-trip with left-over txn that shouldn't be counted
(DataFrame(data=[[2, 10., 'A'],
[2, 15., 'A'],
[-9, 10., 'A']],
columns=['amount', 'price', 'symbol'],
index=dates[:3]),
DataFrame(data=[[dates[0], dates[2],
Timedelta(days=2), -10., -.2,
True, 'A']],
columns=['open_dt', 'close_dt',
'duration', 'pnl', 'rt_returns',
'long', 'symbol'],
index=[0])
),
# Round-trip with sell that crosses 0 and should be split
(DataFrame(data=[[2, 10., 'A'],
[-4, 15., 'A'],
[3, 20., 'A']],
columns=['amount', 'price', 'symbol'],
index=dates[:3]),
DataFrame(data=[[dates[0], dates[1],
Timedelta(days=1), 10., .5,
True, 'A'],
[dates[1], dates[2],
|
Timedelta(days=1)
|
pandas.Timedelta
|
import sys
import nltk
nltk.download(['punkt', 'wordnet', 'stopwords'])
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import re
import numpy as np
import pandas as pd
import pickle
import sklearn
from sqlalchemy import create_engine
from sklearn.metrics import classification_report, make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
def load_data(database_filepath):
'''
FUNCTION:
Load database file containing the cleaned data
INPUTS:
database_filepath - database file containing the cleaned data
OUTPUTS
X - The messages from the database
y - the categories the messages fall in marked 0 or 1
y.columns - the names of the categories
'''
engine = create_engine('sqlite:///{}'.format(database_filepath))
df = pd.read_sql_table(database_filepath, engine)
X = df['message']
y = df.iloc[:,4:]
return X, y, y.columns
def tokenize(text):
'''
FUNCTION:
clean the input text, simplify it, and tokenize it
INPUTS:
text - a string to clean and tokenize
OUTPUTS
clean_tokens - a list of tokenized words from text
'''
#remove punctuation and convert to lowercase
message = re.sub(r'[^a-zA-Z0-9]', ' ', text.lower())
#tokenize
tokens = word_tokenize(message)
#lemmatization
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
if tok not in stopwords.words('english'):
clean_tok = lemmatizer.lemmatize(tok).strip()
clean_tokens.append(clean_tok)
return clean_tokens
def gscv_scores(y_test, y_pred):
'''
Function: calculate the f-1 score average of all classes to use in
GridSearch evaluation
Inputs:
-y_test: outputs of test data set from train_test_split()
-y_pred: pipeline predicted output based on predicted x from
train_test_split()
Outputs:
-average f-1 score of all classes to determine if a parameter improved
predictibility
'''
y_pred = pd.DataFrame(y_pred, columns = y_test.columns)
report = pd.DataFrame()
for col in y_test.columns:
class_rep = classification_report(y_test.loc[:,col], y_pred.loc[:,col],
output_dict=True, zero_division=0)
scoring_df = pd.DataFrame.from_dict(class_rep)
#print(scoring_df)
#drop un-needed columns and rows
scoring_df = scoring_df[['macro avg']]
scoring_df.drop(index='support', inplace=True)
scoring_df = scoring_df.transpose()
report = report.append(scoring_df, ignore_index=True)
report.index = y_test.columns
return report['f1-score'].mean()
def build_model():
'''
FUNCTION: create and return a pipeline
'''
pipeline = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))])
#comment different parameters to decrease run time
parameters = {
#'vect__ngram_range': ((1,1), (1,2)),
#'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000),
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__estimator__n_estimators': [10, 25, 50],
#'clf__estimator__min_samples_split': [2, 4, 6],
'clf__estimator__bootstrap': (True, False)
}
scorer = make_scorer(gscv_scores)
cv = GridSearchCV(pipeline, param_grid=parameters, scoring=scorer, cv=3, verbose=4)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
'''
FUNCTION:
Evaluate the effectiveness of the pipeline in modeling the data
INPUTS:
model - pipeline fit to the data
X_test - test set of messages from the train_test_split
Y_test - test set of categories from the train_test_split
category_names - names of the categories the messages can fit in
OUTPUTS
report - the precision, recall, and f-1 scores for each category
scores - the average precision, recal, and f-1 scores for the data set
'''
y_pred = model.predict(X_test)
y_pred = pd.DataFrame(y_pred, columns=category_names)
report =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pytest
from staircase import Stairs
def s1(closed="left"):
int_seq1 = Stairs(initial_value=0, closed=closed)
int_seq1.layer(1, 10, 2)
int_seq1.layer(-4, 5, -1.75)
int_seq1.layer(3, 5, 2.5)
int_seq1.layer(6, 7, -2.5)
int_seq1.layer(7, 10, -2.5)
return int_seq1
def s2():
int_seq2 = Stairs(initial_value=0)
int_seq2.layer(1, 7, -2.5)
int_seq2.layer(8, 10, 5)
int_seq2.layer(2, 5, 4.5)
int_seq2.layer(2.5, 4, -2.5)
int_seq2.layer(-2, 1, -1.75)
return int_seq2
def s3(): # boolean
int_seq = Stairs(initial_value=0)
int_seq.layer(-10, 10, 1)
int_seq.layer(-8, -7, -1)
int_seq.layer(-5, -2, -1)
int_seq.layer(0.5, 1, -1)
int_seq.layer(3, 3.5, -1)
int_seq.layer(7, 9.5, -1)
return int_seq
def s4(): # boolean
int_seq = Stairs(initial_value=0)
int_seq.layer(-11, 9, 1)
int_seq.layer(-9.5, -8, -1)
int_seq.layer(-7.5, -7, -1)
int_seq.layer(0, 3, -1)
int_seq.layer(6, 6.5, -1)
int_seq.layer(7, 8.5, -1)
return int_seq
@pytest.fixture
def s1_fix():
return s1()
@pytest.fixture
def s2_fix():
return s2()
@pytest.fixture
def s3_fix():
return s3()
@pytest.fixture
def s4_fix():
return s4()
@pytest.mark.parametrize(
"x, kwargs, expected_val",
[
(
[-4, -2, 1, 3],
{"aggfunc": "mean", "window": (-0.5, 0.5)},
np.array([-0.875, -1.75, -0.75, 1.5]),
),
(
[-4, -2, 1, 3],
{"aggfunc": "mean", "window": (-1, 0)},
np.array([0.0, -1.75, -1.75, 0.25]),
),
(
[-4, -2, 1, 3],
{"aggfunc": "mean", "window": (0, 1)},
np.array([-1.75, -1.75, 0.25, 2.75]),
),
],
)
def test_s1_agg_mean(s1_fix, x, kwargs, expected_val):
window = kwargs["window"]
x = np.array(x)
ii = pd.IntervalIndex.from_arrays(x + window[0], x + window[1])
assert np.array_equal(s1_fix.slice(ii).mean().values, expected_val)
@pytest.mark.parametrize(
"closed, x, kwargs, expected_val",
[
(
"left",
[0, 2, 7],
{"aggfunc": "max", "window": (-1, 1)},
np.array([-1.75, 0.25, -0.5]),
),
(
"right",
[0, 2, 7],
{"aggfunc": "max", "window": (-1, 1), "closed": "left"},
np.array([-1.75, 0.25, 2.0]),
),
(
"left",
[0, 2, 7],
{"aggfunc": "max", "window": (-1, 1), "closed": "right"},
np.array([0.25, 2.75, -0.5]),
),
(
"right",
[0, 2, 7],
{"aggfunc": "max", "window": (-1, 1), "closed": "right"},
np.array([-1.75, 0.25, -0.5]),
),
],
)
def test_s1_agg_max(closed, x, kwargs, expected_val):
window = kwargs["window"]
x = np.array(x)
ii = pd.IntervalIndex.from_arrays(
x + window[0], x + window[1], closed=kwargs.get("closed", "left")
)
assert np.array_equal(s1(closed=closed).slice(ii).max().values, expected_val)
def test_slicing_mean(s1_fix):
pd.testing.assert_series_equal(
s1_fix.slice(range(-4, 11, 2)).mean(),
pd.Series(
{
pd.Interval(-4, -2, closed="left"): -1.75,
pd.Interval(-2, 0, closed="left"): -1.75,
pd.Interval(0, 2, closed="left"): -0.75,
pd.Interval(2, 4, closed="left"): 1.5,
pd.Interval(4, 6, closed="left"): 2.375,
pd.Interval(6, 8, closed="left"): -0.5,
pd.Interval(8, 10, closed="left"): -0.5,
}
),
check_names=False,
check_index_type=False,
)
def test_slicing_max(s1_fix):
pd.testing.assert_series_equal(
s1_fix.slice(range(-4, 11, 2)).max(),
pd.Series(
{
pd.Interval(-4, -2, closed="left"): -1.75,
pd.Interval(-2, 0, closed="left"): -1.75,
pd.Interval(0, 2, closed="left"): 0.25,
pd.Interval(2, 4, closed="left"): 2.75,
pd.Interval(4, 6, closed="left"): 2.75,
|
pd.Interval(6, 8, closed="left")
|
pandas.Interval
|
"""Integration tests for the HyperTransformer."""
import re
from copy import deepcopy
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from rdt import HyperTransformer
from rdt.errors import Error, NotFittedError
from rdt.transformers import (
DEFAULT_TRANSFORMERS, BaseTransformer, BinaryEncoder, FloatFormatter, FrequencyEncoder,
OneHotEncoder, UnixTimestampEncoder, get_default_transformer, get_default_transformers)
class DummyTransformerNumerical(BaseTransformer):
INPUT_SDTYPE = 'categorical'
OUTPUT_SDTYPES = {
'value': 'float'
}
def _fit(self, data):
pass
def _transform(self, data):
return data.astype(float)
def _reverse_transform(self, data):
return data.astype(str)
class DummyTransformerNotMLReady(BaseTransformer):
INPUT_SDTYPE = 'datetime'
OUTPUT_SDTYPES = {
'value': 'categorical',
}
def _fit(self, data):
pass
def _transform(self, data):
# Stringify input data
return data.astype(str)
def _reverse_transform(self, data):
return data.astype('datetime64')
TEST_DATA_INDEX = [4, 6, 3, 8, 'a', 1.0, 2.0, 3.0]
def get_input_data():
datetimes = pd.to_datetime([
'2010-02-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
'2010-01-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
])
data = pd.DataFrame({
'integer': [1, 2, 1, 3, 1, 4, 2, 3],
'float': [0.1, 0.2, 0.1, 0.2, 0.1, 0.4, 0.2, 0.3],
'categorical': ['a', 'a', 'b', 'b', 'a', 'b', 'a', 'a'],
'bool': [False, False, False, True, False, False, True, False],
'datetime': datetimes,
'names': ['Jon', 'Arya', 'Arya', 'Jon', 'Jon', 'Sansa', 'Jon', 'Jon'],
}, index=TEST_DATA_INDEX)
return data
def get_transformed_data():
datetimes = [
1.264982e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18,
1.262304e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18
]
return pd.DataFrame({
'integer.value': [1, 2, 1, 3, 1, 4, 2, 3],
'float.value': [0.1, 0.2, 0.1, 0.2, 0.1, 0.4, 0.2, 0.3],
'categorical.value': [0.3125, 0.3125, .8125, 0.8125, 0.3125, 0.8125, 0.3125, 0.3125],
'bool.value': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
'datetime.value': datetimes,
'names.value': [0.3125, 0.75, 0.75, 0.3125, 0.3125, 0.9375, 0.3125, 0.3125]
}, index=TEST_DATA_INDEX)
def get_reversed_data():
data = get_input_data()
data['bool'] = data['bool'].astype('object')
return data
DETERMINISTIC_DEFAULT_TRANSFORMERS = deepcopy(DEFAULT_TRANSFORMERS)
DETERMINISTIC_DEFAULT_TRANSFORMERS['categorical'] = FrequencyEncoder
@patch('rdt.transformers.DEFAULT_TRANSFORMERS', DETERMINISTIC_DEFAULT_TRANSFORMERS)
def test_hypertransformer_default_inputs():
"""Test the HyperTransformer with default parameters.
This tests that if default parameters are provided to the HyperTransformer,
the ``default_transformers`` method will be used to determine which
transformers to use for each field.
Setup:
- Patch the ``DEFAULT_TRANSFORMERS`` to use the ``FrequencyEncoder``
for categorical sdtypes, so that the output is predictable.
Input:
- A dataframe with every sdtype.
- A fixed random seed to guarantee the samle values are null.
Expected behavior:
- The transformed data should contain all the ML ready data.
- The reverse transformed data should be the same as the input.
"""
# Setup
datetimes = pd.to_datetime([
np.nan,
'2010-02-01',
'2010-01-01',
'2010-01-01',
'2010-01-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
])
data = pd.DataFrame({
'integer': [1, 2, 1, 3, 1, 4, 2, 3],
'float': [0.1, 0.2, 0.1, np.nan, 0.1, 0.4, np.nan, 0.3],
'categorical': ['a', 'a', np.nan, 'b', 'a', 'b', 'a', 'a'],
'bool': [False, np.nan, False, True, False, np.nan, True, False],
'datetime': datetimes,
'names': ['Jon', 'Arya', 'Arya', 'Jon', 'Jon', 'Sansa', 'Jon', 'Jon'],
}, index=TEST_DATA_INDEX)
# Run
ht = HyperTransformer()
ht.detect_initial_config(data)
ht.fit(data)
transformed = ht.transform(data)
reverse_transformed = ht.reverse_transform(transformed)
# Assert
expected_datetimes = [
1.263069e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18,
1.262304e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18
]
expected_transformed = pd.DataFrame({
'integer.value': [1, 2, 1, 3, 1, 4, 2, 3],
'float.value': [0.1, 0.2, 0.1, 0.2, 0.1, 0.4, 0.2, 0.3],
'categorical.value': [0.3125, 0.3125, 0.9375, 0.75, 0.3125, 0.75, 0.3125, 0.3125],
'bool.value': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
'datetime.value': expected_datetimes,
'names.value': [0.3125, 0.75, 0.75, 0.3125, 0.3125, 0.9375, 0.3125, 0.3125]
}, index=TEST_DATA_INDEX)
pd.testing.assert_frame_equal(transformed, expected_transformed)
reversed_datetimes = pd.to_datetime([
'2010-01-09 20:34:17.142857216',
'2010-02-01',
'2010-01-01',
'2010-01-01',
'2010-01-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
])
expected_reversed = pd.DataFrame({
'integer': [1, 2, 1, 3, 1, 4, 2, 3],
'float': [0.1, 0.2, 0.1, 0.20000000000000004, 0.1, 0.4, 0.20000000000000004, 0.3],
'categorical': ['a', 'a', np.nan, 'b', 'a', 'b', 'a', 'a'],
'bool': [False, False, False, True, False, False, True, False],
'datetime': reversed_datetimes,
'names': ['Jon', 'Arya', 'Arya', 'Jon', 'Jon', 'Sansa', 'Jon', 'Jon'],
}, index=TEST_DATA_INDEX)
for row in range(reverse_transformed.shape[0]):
for column in range(reverse_transformed.shape[1]):
expected = expected_reversed.iloc[row, column]
actual = reverse_transformed.iloc[row, column]
assert pd.isna(actual) or expected == actual
assert isinstance(ht._transformers_tree['integer']['transformer'], FloatFormatter)
assert ht._transformers_tree['integer']['outputs'] == ['integer.value']
assert isinstance(ht._transformers_tree['float']['transformer'], FloatFormatter)
assert ht._transformers_tree['float']['outputs'] == ['float.value']
assert isinstance(ht._transformers_tree['categorical']['transformer'], FrequencyEncoder)
assert ht._transformers_tree['categorical']['outputs'] == ['categorical.value']
assert isinstance(ht._transformers_tree['bool']['transformer'], BinaryEncoder)
assert ht._transformers_tree['bool']['outputs'] == ['bool.value']
assert isinstance(ht._transformers_tree['datetime']['transformer'], UnixTimestampEncoder)
assert ht._transformers_tree['datetime']['outputs'] == ['datetime.value']
assert isinstance(ht._transformers_tree['names']['transformer'], FrequencyEncoder)
assert ht._transformers_tree['names']['outputs'] == ['names.value']
get_default_transformers.cache_clear()
get_default_transformer.cache_clear()
def test_hypertransformer_field_transformers():
"""Test the HyperTransformer with ``field_transformers`` provided.
This tests that the transformers specified in the ``field_transformers``
argument are used. Any output of a transformer that is not ML ready (not
in the ``_transform_output_sdtypes`` list) should be recursively transformed
till it is.
Setup:
- The datetime column is set to use a dummy transformer that stringifies
the input. That output is then set to use the categorical transformer.
Input:
- A dict mapping each field to a transformer.
- A dataframe with every sdtype.
Expected behavior:
- The transformed data should contain all the ML ready data.
- The reverse transformed data should be the same as the input.
"""
# Setup
config = {
'sdtypes': {
'integer': 'numerical',
'float': 'numerical',
'categorical': 'categorical',
'bool': 'boolean',
'datetime': 'datetime',
'names': 'categorical'
},
'transformers': {
'integer': FloatFormatter(missing_value_replacement='mean'),
'float': FloatFormatter(missing_value_replacement='mean'),
'categorical': FrequencyEncoder,
'bool': BinaryEncoder(missing_value_replacement='mode'),
'datetime': DummyTransformerNotMLReady,
'names': FrequencyEncoder
}
}
data = get_input_data()
# Run
ht = HyperTransformer()
ht.detect_initial_config(data)
ht.set_config(config)
ht.fit(data)
transformed = ht.transform(data)
reverse_transformed = ht.reverse_transform(transformed)
# Assert
expected_transformed = get_transformed_data()
rename = {'datetime.value': 'datetime.value.value'}
expected_transformed = expected_transformed.rename(columns=rename)
transformed_datetimes = [0.8125, 0.8125, 0.3125, 0.3125, 0.3125, 0.8125, 0.3125, 0.3125]
expected_transformed['datetime.value.value'] = transformed_datetimes
pd.testing.assert_frame_equal(transformed, expected_transformed)
expected_reversed = get_reversed_data()
pd.testing.assert_frame_equal(expected_reversed, reverse_transformed)
def test_single_category():
"""Test that categorical variables with a single value are supported."""
# Setup
ht = HyperTransformer()
data = pd.DataFrame({
'a': ['a', 'a', 'a']
})
# Run
ht.detect_initial_config(data)
ht.update_transformers(column_name_to_transformer={
'a': OneHotEncoder()
})
ht.fit(data)
transformed = ht.transform(data)
reverse = ht.reverse_transform(transformed)
# Assert
pd.testing.assert_frame_equal(data, reverse)
def test_dtype_category():
"""Test that categorical variables of dtype category are supported."""
# Setup
data = pd.DataFrame({'a': ['a', 'b', 'c']}, dtype='category')
# Run
ht = HyperTransformer()
ht.detect_initial_config(data)
ht.fit(data)
transformed = ht.transform(data)
reverse = ht.reverse_transform(transformed)
# Assert
pd.testing.assert_frame_equal(reverse, data)
def test_multiple_fits():
"""HyperTransformer should be able to be used multiple times.
Fitting, transforming and reverse transforming should produce the same results when
called on the same data multiple times.
"""
# Setup
data = get_input_data()
ht = HyperTransformer()
# Run
ht.detect_initial_config(data)
ht.fit(data)
transformed1 = ht.transform(data)
reversed1 = ht.reverse_transform(transformed1)
ht.detect_initial_config(data)
ht.fit(data)
transformed2 = ht.transform(data)
reversed2 = ht.reverse_transform(transformed2)
# Assert
pd.testing.assert_frame_equal(transformed1, transformed2)
pd.testing.assert_frame_equal(reversed1, reversed2)
def test_multiple_fits_different_data():
"""HyperTransformer should be able to be used multiple times regardless of the data.
Fitting, transforming and reverse transforming should work when called on different data.
"""
# Setup
data = pd.DataFrame({'col1': [1, 2, 3], 'col2': [1.0, 0.0, 0.0]})
new_data = pd.DataFrame({'col2': [1, 2, 3], 'col1': [1.0, 0.0, 0.0]})
ht = HyperTransformer()
# Run
ht.detect_initial_config(data)
ht.fit(data)
ht.detect_initial_config(new_data)
ht.fit(new_data)
transformed1 = ht.transform(new_data)
transformed2 = ht.transform(new_data)
reverse1 = ht.reverse_transform(transformed1)
reverse2 = ht.reverse_transform(transformed2)
# Assert
expected_transformed = pd.DataFrame({'col2.value': [1, 2, 3], 'col1.value': [1.0, 0.0, 0.0]})
pd.testing.assert_frame_equal(transformed1, expected_transformed)
pd.testing.assert_frame_equal(transformed2, expected_transformed)
pd.testing.assert_frame_equal(reverse1, new_data)
pd.testing.assert_frame_equal(reverse2, new_data)
def test_multiple_fits_different_columns():
"""HyperTransformer should be able to be used multiple times regardless of the data.
Fitting, transforming and reverse transforming should work when called on different data.
"""
# Setup
data = pd.DataFrame({'col1': [1, 2, 3], 'col2': [1.0, 0.0, 0.0]})
new_data = pd.DataFrame({'col3': [1, 2, 3], 'col4': [1.0, 0.0, 0.0]})
ht = HyperTransformer()
# Run
ht.detect_initial_config(data)
ht.fit(data)
ht.detect_initial_config(new_data)
ht.fit(new_data)
transformed1 = ht.transform(new_data)
transformed2 = ht.transform(new_data)
reverse1 = ht.reverse_transform(transformed1)
reverse2 = ht.reverse_transform(transformed2)
# Assert
expected_transformed = pd.DataFrame({'col3.value': [1, 2, 3], 'col4.value': [1.0, 0.0, 0.0]})
pd.testing.assert_frame_equal(transformed1, expected_transformed)
pd.testing.assert_frame_equal(transformed2, expected_transformed)
pd.testing.assert_frame_equal(reverse1, new_data)
pd.testing.assert_frame_equal(reverse2, new_data)
def test_multiple_fits_with_set_config():
"""HyperTransformer should be able to be used multiple times regardless of the data.
Fitting, transforming and reverse transforming should work when called on different data.
"""
# Setup
data = get_input_data()
ht = HyperTransformer()
# Run
ht.detect_initial_config(data)
ht.set_config(config={
'sdtypes': {'integer': 'categorical'},
'transformers': {'integer': FrequencyEncoder}
})
ht.fit(data)
transformed1 = ht.transform(data)
reverse1 = ht.reverse_transform(transformed1)
ht.fit(data)
transformed2 = ht.transform(data)
reverse2 = ht.reverse_transform(transformed2)
# Assert
pd.testing.assert_frame_equal(transformed1, transformed2)
pd.testing.assert_frame_equal(reverse1, reverse2)
def test_multiple_detect_configs_with_set_config():
"""HyperTransformer should be able to be used multiple times regardless of the data.
Fitting, transforming and reverse transforming should work when called on different data.
"""
# Setup
data = get_input_data()
ht = HyperTransformer()
# Run
ht.detect_initial_config(data)
ht.fit(data)
transformed1 = ht.transform(data)
reverse1 = ht.reverse_transform(transformed1)
ht.set_config(config={
'sdtypes': {'integers': 'categorical'},
'transformers': {'integers': FrequencyEncoder}
})
ht.detect_initial_config(data)
ht.fit(data)
transformed2 = ht.transform(data)
reverse2 = ht.reverse_transform(transformed2)
# Assert
pd.testing.assert_frame_equal(transformed1, transformed2)
pd.testing.assert_frame_equal(reverse1, reverse2)
def test_detect_initial_config_doesnt_affect_fit():
"""HyperTransformer should fit the same way regardless of ``detect_initial_config``.
Calling the ``detect_initial_config`` method should not affect the results of ``fit``,
``transform`` or ``reverse_transform``.
"""
# Setup
data = get_input_data()
ht = HyperTransformer()
# Run
ht.detect_initial_config(data)
ht.fit(data)
transformed1 = ht.transform(data)
reversed1 = ht.reverse_transform(transformed1)
ht.detect_initial_config(data)
ht.fit(data)
transformed2 = ht.transform(data)
reversed2 = ht.reverse_transform(transformed1)
# Assert
pd.testing.assert_frame_equal(transformed1, transformed2)
pd.testing.assert_frame_equal(reversed1, reversed2)
def test_multiple_detects():
"""HyperTransformer should be able to be used multiple times regardless of the data.
Fitting, transforming and reverse transforming should work when called on different data.
"""
# Setup
data = pd.DataFrame({'col2': [1, 2, 3], 'col1': [1.0, 0.0, 0.0]})
new_data = get_input_data()
ht = HyperTransformer()
# Run
ht.detect_initial_config(data)
ht.detect_initial_config(new_data)
ht.fit(new_data)
transformed = ht.transform(new_data)
reverse = ht.reverse_transform(transformed)
# Assert
pd.testing.assert_frame_equal(transformed, get_transformed_data())
pd.testing.assert_frame_equal(reverse, get_reversed_data())
def test_transform_without_fit():
"""HyperTransformer should raise an error when transforming without fitting."""
# Setup
data = pd.DataFrame()
ht = HyperTransformer()
ht.detect_initial_config(data)
# Run / Assert
with pytest.raises(Error):
ht.transform(data)
def test_fit_data_different_than_detect():
"""HyperTransformer should raise an error when transforming without fitting."""
# Setup
ht = HyperTransformer()
detect_data = pd.DataFrame({'col1': [1, 2], 'col2': ['a', 'b']})
data = pd.DataFrame({'col1': [1, 2], 'col3': ['a', 'b']})
# Run / Assert
error_msg = re.escape(
'The data you are trying to fit has different columns than the original '
"detected data (unknown columns: ['col3']). Column names and their "
"sdtypes must be the same. Use the method 'get_config()' to see the expected "
'values.'
)
ht.detect_initial_config(detect_data)
with pytest.raises(Error, match=error_msg):
ht.fit(data)
def test_transform_without_fitting():
"""HyperTransformer shouldn't transform when fit hasn't been called yet."""
# Setup
data = pd.DataFrame({'col1': [1, 2], 'col2': ['a', 'b']})
ht = HyperTransformer()
# Run / Assert
ht.detect_initial_config(data)
error_msg = (
'The HyperTransformer is not ready to use. Please fit your data first using '
"'fit' or 'fit_transform'."
)
with pytest.raises(NotFittedError, match=error_msg):
ht.transform(data)
def test_transform_without_refitting():
"""HyperTransformer shouldn't transform when a new config hasn't been fitted."""
# Setup
data =
|
pd.DataFrame({'col1': [1, 2], 'col2': ['a', 'b']})
|
pandas.DataFrame
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import numpy as np
import pandas as pd
from .... import opcodes
from ....serialization.serializables import BoolField, Float64Field
from ..aggregation import BaseDataFrameExpandingAgg
_stage_info = namedtuple('_stage_info', ('map_groups', 'map_sources', 'combine_sources',
'combine_columns', 'combine_funcs', 'key_to_funcs',
'valid_columns', 'min_periods_func_name'))
_cum_alpha_coeff_func = '_cum_alpha_coeff'
_cum_square_alpha_coeff_func = '_cum_square_alpha_coeff'
def _add_pred_results(pred_results, local_results, axis=0, alpha=None, order=1,
alpha_ignore_na=False, pred_exponent=None, alpha_data=None):
if pred_results[0].ndim == 1:
df_filler = 0
else:
df_filler = pred_results[0].iloc[-1, :].dropna()
df_filler[:] = 0
new_locals = []
combine_axis = pred_results[0].ndim - axis - 1
weight = (1 - alpha) ** order
pred_coeff = weight ** pred_exponent
for idx, (pred_result, local_result) in enumerate(zip(pred_results, local_results)):
local_result.fillna(df_filler, inplace=True)
pred_result = pred_result.mul(pred_coeff).sum(axis=axis)
if alpha_ignore_na:
pred_df = pred_result * weight ** alpha_data.notna().cumsum()
else:
weights = np.arange(1, len(local_result) + 1)
if local_result.ndim == 2:
weights_df = pd.DataFrame(
np.repeat(weights.reshape((len(local_result), 1)), len(local_result.columns), axis=1),
columns=local_result.columns, index=local_result.index)
else:
weights_df = pd.Series(weights, index=local_result.index)
weights_df[alpha_data.isna()] = np.nan
weights_df.ffill(inplace=True)
weights_df.fillna(0, inplace=True)
weights_df = weight ** weights_df
pred_df = weights_df.mul(pred_result, axis=combine_axis)
new_locals.append(local_result.add(pred_df, axis=combine_axis))
return new_locals
def _combine_mean(pred_results, local_results, axis=0, alpha=None, alpha_ignore_na=False,
pred_exponent=None):
if pred_results is None:
return (local_results[0] / local_results[1]).ffill()
alpha_data = local_results[1]
local_results[0] = local_results[0].ffill()
local_results[1] = alpha_data.ffill()
local_sum_data, local_count_data = local_results
if pred_results is not None:
local_sum_data, local_count_data = _add_pred_results(
pred_results, local_results, axis=axis, alpha=alpha, alpha_ignore_na=alpha_ignore_na,
pred_exponent=pred_exponent, alpha_data=alpha_data
)
return local_sum_data / local_count_data
def _combine_var(pred_results, local_results, axis=0, alpha=None, alpha_ignore_na=False,
pred_exponent=None):
local_results[0] = local_results[0].ffill()
alpha_data = local_results[1]
local_results[1] = alpha_data.ffill()
local_results[2] = local_results[2].ffill()
alpha2_data = local_results[3]
local_results[3] = alpha2_data.ffill()
local_sum_data, local_count_data, local_sum_square, local_count2_data = local_results
if pred_results is None:
return (local_sum_square - local_sum_data ** 2 / local_count_data) \
/ (local_count_data - local_count2_data / local_count_data)
pred_sum_data, pred_count_data, pred_sum_square, pred_count2_data = pred_results
local_count2_data, = _add_pred_results(
[pred_count2_data], [local_count2_data], axis=axis, alpha=alpha, order=2,
alpha_ignore_na=alpha_ignore_na, pred_exponent=pred_exponent, alpha_data=alpha_data)
local_sum_square, local_sum_data, local_count_data = \
_add_pred_results(
[pred_sum_square, pred_sum_data, pred_count_data],
[local_sum_square, local_sum_data, local_count_data],
axis=axis, alpha=alpha, alpha_ignore_na=alpha_ignore_na,
pred_exponent=pred_exponent, alpha_data=alpha_data
)
return (local_sum_square - local_sum_data ** 2 / local_count_data) \
/ (local_count_data - local_count2_data / local_count_data)
def _combine_std(pred_results, local_results, axis=0, alpha=None, alpha_ignore_na=False,
pred_exponent=None):
return np.sqrt(_combine_var(
pred_results, local_results, axis=axis, alpha=alpha, alpha_ignore_na=alpha_ignore_na,
pred_exponent=pred_exponent))
def _combine_data_count(pred_results, local_results, axis=0, **__):
if pred_results is None:
return local_results[0]
return local_results[0].add(pred_results[0].sum(), axis=pred_results[0].ndim - axis - 1)
class DataFrameEwmAgg(BaseDataFrameExpandingAgg):
_op_type_ = opcodes.EWM_AGG
_alpha = Float64Field('alpha')
_adjust = BoolField('adjust')
_alpha_ignore_na = BoolField('alpha_ignore_na')
_validate_columns = BoolField('_validate_columns')
_exec_cache = dict()
def __init__(self, alpha=None, adjust=None, alpha_ignore_na=None, validate_columns=None, **kw):
super().__init__(_alpha=alpha, _adjust=adjust, _alpha_ignore_na=alpha_ignore_na,
_validate_columns=validate_columns, **kw)
@property
def alpha(self) -> float:
return self._alpha
@property
def adjust(self) -> bool:
return self._adjust
@property
def alpha_ignore_na(self) -> bool:
return self._alpha_ignore_na
@property
def validate_columns(self) -> bool:
return self._validate_columns
@classmethod
def _get_stage_functions(cls, op: "DataFrameEwmAgg", func):
if func == '_data_count':
return ['_data_count'], _combine_data_count
elif func == 'mean':
return ['cumsum', _cum_alpha_coeff_func], _combine_mean
elif func in {'var', 'std'}:
return ['cumsum', _cum_alpha_coeff_func, 'cumsum2', _cum_square_alpha_coeff_func], \
_combine_var if func == 'var' else _combine_std
else: # pragma: no cover
raise NotImplementedError
@classmethod
def _calc_data_alphas(cls, op: "DataFrameEwmAgg", in_data, order):
exec_cache = cls._exec_cache[op.key]
cache_key = ('_calc_data_alphas', order, id(in_data))
try:
return exec_cache[cache_key]
except KeyError:
pass
cum_df = in_data.copy()
cum_df[cum_df.notna()] = 1
if not op.alpha_ignore_na:
cum_df.ffill(inplace=True)
cum_df = cum_df.cumsum(axis=op.axis) - 1
if not op.alpha_ignore_na:
cum_df[in_data.isna()] = np.nan
result = exec_cache[cache_key] = (1 - op.alpha) ** (order * cum_df)
return result
@classmethod
def _execute_cum_alpha_coeff(cls, op: "DataFrameEwmAgg", in_data, order, final=True):
exec_cache = cls._exec_cache[op.key]
cache_key = ('cum_alpha_coeff', order, id(in_data))
summary = None
try:
result = exec_cache[cache_key]
except KeyError:
alphas = cls._calc_data_alphas(op, in_data, order)
result = alphas.cumsum()
exec_cache[cache_key] = result
if final:
if op.output_agg:
summary = result.ffill()[-1:]
return result, summary
@classmethod
def _execute_cumsum(cls, op: "DataFrameEwmAgg", in_data):
exec_cache = cls._exec_cache[op.key]
cache_key = ('cumsum', id(in_data))
summary = None
try:
result = exec_cache[cache_key]
except KeyError:
min_periods = 1 if op.min_periods > 0 else 0
try:
data = in_data.ewm(alpha=op.alpha, ignore_na=op.alpha_ignore_na, adjust=op.adjust,
min_periods=min_periods).mean()
except ValueError:
in_data = in_data.copy()
data = in_data.ewm(alpha=op.alpha, ignore_na=op.alpha_ignore_na, adjust=op.adjust,
min_periods=min_periods).mean()
alpha_sum, _ = op._execute_cum_alpha_coeff(op, in_data, 1, final=False)
result = exec_cache[cache_key] = data * alpha_sum
if op.output_agg:
summary = result.ffill()[-1:]
return result, summary
@classmethod
def _execute_cumsum2(cls, op: "DataFrameEwmAgg", in_data):
summary = None
min_periods = 1 if op.min_periods > 0 else 0
try:
data = in_data.ewm(alpha=op.alpha, ignore_na=op.alpha_ignore_na, adjust=op.adjust,
min_periods=min_periods).var(bias=True)
except ValueError:
in_data = in_data.copy()
data = in_data.ewm(alpha=op.alpha, ignore_na=op.alpha_ignore_na, adjust=op.adjust,
min_periods=min_periods).var(bias=True)
alpha_sum, _ = op._execute_cum_alpha_coeff(op, in_data, 1)
cumsum, _ = op._execute_cumsum(op, in_data)
result = alpha_sum * data + cumsum ** 2 / alpha_sum
if op.output_agg:
summary = result.ffill()[-1:]
return result, summary
@classmethod
def _execute_map_function(cls, op: "DataFrameEwmAgg", func, in_data):
in_data = in_data._get_numeric_data()
summary = None
min_periods = 1 if op.min_periods > 0 else 0
if func == '_data_count':
result = in_data.expanding(min_periods=min_periods).count()
elif func in (_cum_alpha_coeff_func, _cum_square_alpha_coeff_func):
order = 1 if func == _cum_alpha_coeff_func else 2
result, summary = cls._execute_cum_alpha_coeff(op, in_data, order)
elif func == 'cumsum':
result, summary = cls._execute_cumsum(op, in_data)
elif func == 'cumsum2':
result, summary = cls._execute_cumsum2(op, in_data)
else: # pragma: no cover
raise ValueError('Map function %s not supported')
if op.output_agg:
summary = summary if summary is not None else result.iloc[-1:]
else:
summary = None
return result, summary
@classmethod
def _execute_map(cls, ctx, op: "DataFrameEwmAgg"):
try:
cls._exec_cache[op.key] = dict()
super()._execute_map(ctx, op)
if op.output_agg:
in_data = ctx[op.inputs[0].key]
summaries = list(ctx[op.outputs[1].key])
if op.alpha_ignore_na:
in_count = in_data.count()
if not isinstance(in_count, pd.Series):
in_count = pd.Series([in_count])
summary = in_count
if in_data.ndim == 2:
summary = in_count.to_frame().T
summary.index = summaries[-1].index
else:
remain_counts = in_data.notna()[::-1].to_numpy().argmax(axis=0)
if in_data.ndim > 1:
remain_counts = remain_counts.reshape((1, len(in_data.columns)))
summary =
|
pd.DataFrame(remain_counts, columns=in_data.columns, index=summaries[-1].index)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = '''
Account Name Balance CreditLimit AccountCreated
101 <NAME> 9315.45 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 <NAME> 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65 5000.00 2/5/2007
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = '''
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 5/25/1985
761 <NAME>-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = '''
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00
761 <NAME> 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r'''
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~<NAME>
33+++122.33\\\bar.........<NAME>
++44~~~~12.01 baz~~<NAME>
~~55 11+++foo++++<NAME>-Smith
..66++++++.03~~~bar <NAME>
'''.strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
raise nose.SkipTest(
'Bytes-related test - only needs to work on Python 3')
test = '''
שלום שלום
ום שלל
של ום
'''.strip('\r\n')
expected = pd.read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)], header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(BytesIO(test.encode('utf8')),
header=None, encoding='utf8'))
class CParserTests(ParserTests):
""" base class for CParser Testsing """
def test_buffer_overflow(self):
# GH9205
# test certain malformed input files that cause buffer overflows in
# tokenizer.c
malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
for malf in (malfw, malfs, malfl):
try:
df = self.read_table(StringIO(malf))
except Exception as cperr:
self.assertIn(
'Buffer overflow caught - possible malformed input file.', str(cperr))
def test_buffer_rd_bytes(self):
# GH 12098
# src->buffer can be freed twice leading to a segfault if a corrupt
# gzip file is read with read_csv and the buffer is filled more than
# once before gzip throws an exception
data = '\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09' \
'\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0' \
'\xA6\x4D' + '\x55' * 267 + \
'\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00' \
'\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO'
for i in range(100):
try:
_ = self.read_csv(StringIO(data),
compression='gzip',
delim_whitespace=True)
except Exception as e:
pass
class TestCParserHighMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_table(*args, **kwds)
def test_compact_ints(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_parse_dates_empty_string(self):
# #2263
s = StringIO("Date, test\n2012-01-01, 1\n,2")
result = self.read_csv(s, parse_dates=["Date"], na_filter=False)
self.assertTrue(result['Date'].isnull()[1])
def test_usecols(self):
raise nose.SkipTest(
"Usecols is not supported in C High Memory engine.")
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
# check with delim_whitespace=True
df = self.read_csv(StringIO(data.replace(',', ' ')), comment='#',
delim_whitespace=True)
tm.assert_almost_equal(df.values, expected)
# check with custom line terminator
df = self.read_csv(StringIO(data.replace('\n', '*')), comment='#',
lineterminator='*')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_skiprows_lineterminator(self):
# GH #9079
data = '\n'.join(['SMOSMANIA ThetaProbe-ML2X ',
'2007/01/01 01:00 0.2140 U M ',
'2007/01/01 02:00 0.2141 M O ',
'2007/01/01 04:00 0.2142 D M '])
expected = pd.DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'],
['2007/01/01', '02:00', 0.2141, 'M', 'O'],
['2007/01/01', '04:00', 0.2142, 'D', 'M']],
columns=['date', 'time', 'var', 'flag',
'oflag'])
# test with the three default lineterminators LF, CR and CRLF
df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r\n')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n"
expected = pd.DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# this should ignore six lines including lines with trailing
# whitespace and blank lines. issues 8661, 8679
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# test skipping set of rows after a row with trailing spaces, issue
# #8983
expected = pd.DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
# empty frame
# GH12048
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_dtype_and_names_error(self):
# GH 8833
# passing both dtype and names resulting in an error reporting issue
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = self.read_csv(StringIO(data), sep='\s+', header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), sep='\s+',
header=None, names=['a', 'b'])
expected = DataFrame(
[[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
# fallback casting
result = self.read_csv(StringIO(
data), sep='\s+', header=None, names=['a', 'b'], dtype={'a': np.int32})
expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):
self.read_csv(StringIO(data), sep='\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
class TestCParserLowMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_table(*args, **kwds)
def test_compact_ints(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
def test_compact_ints_as_recarray(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_precise_conversion(self):
# GH #8002
tm._skip_if_32bit()
from decimal import Decimal
normal_errors = []
precise_errors = []
for num in np.linspace(1., 2., num=500): # test numbers between 1 and 2
text = 'a\n{0:.25}'.format(num) # 25 decimal digits of precision
normal_val = float(self.read_csv(StringIO(text))['a'][0])
precise_val = float(self.read_csv(
|
StringIO(text)
|
pandas.compat.StringIO
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = '''
Account Name Balance CreditLimit AccountCreated
101 <NAME> 9315.45 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 <NAME> 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65 5000.00 2/5/2007
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = '''
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 5/25/1985
761 <NAME>-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = '''
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00
761 <NAME> 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r'''
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~<NAME>
33+++122.33\\\bar.........<NAME>
++44~~~~12.01 baz~~<NAME>
~~55 11+++foo++++<NAME>-Smith
..66++++++.03~~~bar <NAME>
'''.strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
raise nose.SkipTest(
'Bytes-related test - only needs to work on Python 3')
test = '''
שלום שלום
ום שלל
של ום
'''.strip('\r\n')
expected = pd.read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)], header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(BytesIO(test.encode('utf8')),
header=None, encoding='utf8'))
class CParserTests(ParserTests):
""" base class for CParser Testsing """
def test_buffer_overflow(self):
# GH9205
# test certain malformed input files that cause buffer overflows in
# tokenizer.c
malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
for malf in (malfw, malfs, malfl):
try:
df = self.read_table(StringIO(malf))
except Exception as cperr:
self.assertIn(
'Buffer overflow caught - possible malformed input file.', str(cperr))
def test_buffer_rd_bytes(self):
# GH 12098
# src->buffer can be freed twice leading to a segfault if a corrupt
# gzip file is read with read_csv and the buffer is filled more than
# once before gzip throws an exception
data = '\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09' \
'\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0' \
'\xA6\x4D' + '\x55' * 267 + \
'\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00' \
'\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO'
for i in range(100):
try:
_ = self.read_csv(StringIO(data),
compression='gzip',
delim_whitespace=True)
except Exception as e:
pass
class TestCParserHighMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_table(*args, **kwds)
def test_compact_ints(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_parse_dates_empty_string(self):
# #2263
s = StringIO("Date, test\n2012-01-01, 1\n,2")
result = self.read_csv(s, parse_dates=["Date"], na_filter=False)
self.assertTrue(result['Date'].isnull()[1])
def test_usecols(self):
raise nose.SkipTest(
"Usecols is not supported in C High Memory engine.")
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
# check with delim_whitespace=True
df = self.read_csv(StringIO(data.replace(',', ' ')), comment='#',
delim_whitespace=True)
tm.assert_almost_equal(df.values, expected)
# check with custom line terminator
df = self.read_csv(StringIO(data.replace('\n', '*')), comment='#',
lineterminator='*')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_skiprows_lineterminator(self):
# GH #9079
data = '\n'.join(['SMOSMANIA ThetaProbe-ML2X ',
'2007/01/01 01:00 0.2140 U M ',
'2007/01/01 02:00 0.2141 M O ',
'2007/01/01 04:00 0.2142 D M '])
expected = pd.DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'],
['2007/01/01', '02:00', 0.2141, 'M', 'O'],
['2007/01/01', '04:00', 0.2142, 'D', 'M']],
columns=['date', 'time', 'var', 'flag',
'oflag'])
# test with the three default lineterminators LF, CR and CRLF
df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r\n')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n"
expected = pd.DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# this should ignore six lines including lines with trailing
# whitespace and blank lines. issues 8661, 8679
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# test skipping set of rows after a row with trailing spaces, issue
# #8983
expected = pd.DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
# empty frame
# GH12048
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_dtype_and_names_error(self):
# GH 8833
# passing both dtype and names resulting in an error reporting issue
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = self.read_csv(StringIO(data), sep='\s+', header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), sep='\s+',
header=None, names=['a', 'b'])
expected = DataFrame(
[[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
# fallback casting
result = self.read_csv(StringIO(
data), sep='\s+', header=None, names=['a', 'b'], dtype={'a': np.int32})
expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):
self.read_csv(StringIO(data), sep='\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
class TestCParserLowMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_table(*args, **kwds)
def test_compact_ints(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
def test_compact_ints_as_recarray(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_precise_conversion(self):
# GH #8002
tm._skip_if_32bit()
from decimal import Decimal
normal_errors = []
precise_errors = []
for num in np.linspace(1., 2., num=500): # test numbers between 1 and 2
text = 'a\n{0:.25}'.format(num) # 25 decimal digits of precision
normal_val = float(self.read_csv(StringIO(text))['a'][0])
precise_val = float(self.read_csv(
StringIO(text), float_precision='high')['a'][0])
roundtrip_val = float(self.read_csv(
StringIO(text), float_precision='round_trip')['a'][0])
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal('{0:.100}'.format(val)) - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
self.assertEqual(roundtrip_val, float(text[2:]))
self.assertTrue(sum(precise_errors) <= sum(normal_errors))
self.assertTrue(max(precise_errors) <= max(normal_errors))
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'object')
def test_pass_dtype_as_recarray(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'},
as_recarray=True)
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'S1')
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv(StringIO(data), index_col=['one', 'two'],
dtype={'one': 'u1', 1: 'f8'})
exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'), np.empty(0, dtype='O')],
names=['one', 'two'])
expected = DataFrame(
{'three': np.empty(0, dtype=np.object)}, index=exp_idx)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 'one.1': 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={'one': 'u1'})
expected = pd.concat([Series([], name='one', dtype='u1')] * 2, axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_indexes(self):
### FIXME in GH9424
raise nose.SkipTest(
"GH 9424; known failure read_csv with duplicate columns")
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={0: 'u1', 1: 'f'})
expected = pd.concat([Series([], name='one', dtype='u1'),
Series([], name='one', dtype='f')], axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_usecols_dtypes(self):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(0, 1, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
result2 = self.read_csv(StringIO(data), usecols=(0, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
self.assertTrue((result.dtypes == [object, np.int, np.float]).all())
self.assertTrue((result2.dtypes == [object, np.float]).all())
def test_usecols_implicit_index_col(self):
# #2654
data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10'
result = self.read_csv(StringIO(data), usecols=['a', 'b'])
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(self):
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
result = self.read_csv(StringIO(data), delim_whitespace=True,
usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(self):
# #2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
df = self.read_csv(StringIO(data), sep='\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(df, expected)
def test_pure_python_failover(self):
data = "a,b,c\n1,2,3#ignore this!\n4,5,6#ignorethistoo"
result = self.read_csv(StringIO(data), comment='#')
expected = DataFrame({'a': [1, 4], 'b': [2, 5], 'c': [3, 6]})
tm.assert_frame_equal(result, expected)
def test_decompression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='gzip')
tm.assert_frame_equal(result, expected)
result = self.read_csv(open(path, 'rb'), compression='gzip')
tm.assert_frame_equal(result, expected)
with
|
tm.ensure_clean()
|
pandas.util.testing.ensure_clean
|
import os
import numpy as np
import pandas as pd
import streamlit as st
import time
from datetime import datetime
from glob import glob
from omegaconf import OmegaConf
from pandas.api.types import is_numeric_dtype
from streamlit_autorefresh import st_autorefresh
from dataloader import read_csv, clear_data
from preprocessing.filter import apply_filter
from preprocessing.target import apply_target, target_encode_numeric, target_encode_category
from preprocessing import delete_nan, replace_nan, delete_outlier, encode_category
from model import split_data, get_best_model
from analysis import get_shap_value, get_importance, simulation_1d, simulation_2d
from graph.evaluation import plot_reg_evaluation, plot_confusion_matrix
from graph.importance import plot_importance
from graph.explanation import plot_shap, plot_simulation_1d, plot_simulation_2d
from graph.matplot import plot_simulation_1d as matplotlib_simulation_1d
from graph.matplot import plot_shap as matplotlib_shap
from helper import get_session_id, encode, convert_figs2zip
# Warning
import warnings
warnings.filterwarnings('ignore')
# # Korean
# import matplotlib
# from matplotlib import font_manager, rc
# font_name = font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name()
# rc('font', family=font_name)
# matplotlib.rcParams['axes.unicode_minus'] = False
# Create Session
if 'config' not in st.session_state:
st.session_state['config'] = OmegaConf.load('config.yaml')
if 'files' not in st.session_state:
st.session_state['files'] = np.sort(glob(
os.path.join(
st.session_state['config']['file']['root'],
'*.csv'
)
))
if 'train_file_path' not in st.session_state:
st.session_state['train_file_path'] = None
if 'filter' not in st.session_state:
st.session_state['filter'] = None
if 'encoder' not in st.session_state:
st.session_state['encoder'] = None
if 'target' not in st.session_state:
st.session_state['target'] = None
if 'feature_all' not in st.session_state:
st.session_state['feature_all'] = None
if 'feature_selected' not in st.session_state:
st.session_state['feature_selected'] = None
if 'data_quality' not in st.session_state:
st.session_state['data_quality'] = None
if 'mode' not in st.session_state:
st.session_state['mode'] = None
if 'model' not in st.session_state:
st.session_state['model'] = None
if 'state_0' not in st.session_state:
st.session_state['state_0'] = None
if '_df_0' not in st.session_state:
st.session_state['_df_0'] = None
if 'state_1' not in st.session_state:
st.session_state['state_1'] = None
if '_df_1' not in st.session_state:
st.session_state['_df_1'] = None
if 'state_2' not in st.session_state:
st.session_state['state_2'] = None
if '_df_2' not in st.session_state:
st.session_state['_df_2'] = None
if 'state_3' not in st.session_state:
st.session_state['state_3'] = None
if '_df_3' not in st.session_state:
st.session_state['_df_3'] = None
# Title
st.markdown('# XAI for tree models')
st.write(f'SESSION ID: {get_session_id()}')
# STEP 1.
st.markdown('### STEP 1. Data preparation')
# Start Time
start_time = time.time()
# State 0: _df_0
state_0 = {}
# Select Train
train_file_path = st.selectbox(
label = 'Train Data',
options = st.session_state['files'],
index = 0
)
state_0['train_file_path'] = train_file_path
# update _df_0
if (
state_0 != st.session_state['state_0']
):
df = read_csv(
path = state_0['train_file_path'],
max_len = st.session_state['config']['data']['max_len'],
add_random_noise = st.session_state['config']['data']['add_random_noise'],
random_state = st.session_state['config']['setup']['random_state'],
)
df = clear_data(df)
# Update session state
st.session_state['train_file_path'] = state_0['train_file_path']
st.session_state['_df_0'] = df
st.session_state['model'] = None
# Print Options
st.sidebar.write('Options')
# State 1: _df_1
state_1 = {}
# Get Filter Number
num_filter = st.sidebar.number_input(
label = 'Filter',
value = 0,
min_value = 0,
max_value = len(st.session_state['_df_0'].columns),
step=1
)
# Get Filter Value
filter = {}
if num_filter > 0:
for i in range(num_filter):
column = st.selectbox(
label = f'Filtered column #{i+1}',
options = [None]+list(st.session_state['_df_0'].columns),
)
if column is not None:
values = list(
np.sort(st.session_state['_df_0'][column].dropna().unique())
)
selected_values = st.multiselect(
label = f'Select values #{i+1}',
options = values,
default = values
)
filter[column] = selected_values
state_1['filter'] = filter
# Get Mode
mode = st.selectbox(
label = 'Type',
options = ['Regression', 'Binary Classification']
)
state_1['mode'] = mode
# Get Target
target = st.selectbox(
label = 'Target',
options = list(st.session_state['_df_0'].columns)
)
state_1['target'] = target
# Target Encoding
if mode == 'Binary Classification':
values = st.session_state['_df_0'][target].dropna()
if is_numeric_dtype(values):
column_c0, column_i0, column_c1, column_i1 = st.columns(4)
with column_c0:
l_q = st.number_input(
label = 'Label 0 Upper Limit (%)',
value = 20,
min_value = 0,
max_value = 100,
step = 1
)
state_1['l_q'] = l_q
with column_c1:
h_q = st.number_input(
label = 'Label 0 Lower Limit (%)',
value = 80,
min_value = 0,
max_value = 100,
step = 1
)
state_1['h_q'] = h_q
with column_i0:
st.metric(
label = 'Label 0 Maximum',
value = f"{np.percentile(values, q=l_q):.4f}"
)
with column_i1:
st.metric(
label = 'Label 1 Minimum',
value = f"{np.percentile(values, q=h_q):.4f}"
)
else:
uniques = list(np.sort(np.unique(values)))
col_0, col_1 = st.columns(2)
with col_0:
label_0 = st.selectbox(
label = 'Label 0',
options = uniques,
index = 0
)
state_1['label_0'] = label_0
with col_1:
label_1 = st.selectbox(
label = 'Label 1',
options = [column for column in uniques if column != label_0],
index = 0
)
state_1['label_1'] = label_1
# update _df_1
if (
state_0 != st.session_state['state_0'] or
state_1 != st.session_state['state_1']
):
# Get DF
df = st.session_state['_df_0'].copy()
# Apply Filter
df = apply_filter(
df = df,
filter = filter
)
# Apply Target
df = apply_target(
df = df,
target = target
)
# Encode target if the mode is binary classification
if state_1['mode'] == 'Binary Classification':
if ('l_q' in state_1) and ('h_q' in state_1):
df = target_encode_numeric(
df = df,
target = state_1['target'],
l_q = state_1['l_q'],
h_q = state_1['h_q']
)
elif ('label_0' in state_1) and ('label_1' in state_1):
df = target_encode_category(
df = df,
target = state_1['target'],
label_0 = state_1['label_0'],
label_1 = state_1['label_1']
)
# Update session state
st.session_state['filter'] = state_1['filter']
st.session_state['target'] = state_1['target']
st.session_state['feature_all'] = [column for column in df.columns if column != state_1['target']]
st.session_state['data_quality'] = df.notnull().sum() / len(df)
st.session_state['mode'] = state_1['mode']
if ('l_q' in state_1) and ('h_q' in state_1):
st.session_state['l_q'] = state_1['l_q']
st.session_state['h_q'] = state_1['h_q']
st.session_state['label_0'] = None
st.session_state['label_1'] = None
elif ('label_0' in state_1) and ('label_1' in state_1):
st.session_state['l_q'] = None
st.session_state['h_q'] = None
st.session_state['label_0'] = state_1['label_0']
st.session_state['label_1'] = state_1['label_1']
else:
st.session_state['l_q'] = None
st.session_state['h_q'] = None
st.session_state['label_0'] = None
st.session_state['label_1'] = None
st.session_state['_df_1'] = df
st.session_state['model'] = None
# State 2: _df_2
state_2 = {}
# NaN Data
nan_data = st.sidebar.selectbox(
label = 'NaN Data',
options = ['Delete', 'Replace']
)
state_2['nan_data'] = nan_data
# Auto Feature Selection
auto_feature_selection = st.sidebar.selectbox(
label = 'Auto Feature Selection',
options = [False, True]
)
state_2['auto_feature_selection'] = auto_feature_selection
# update _df_2
if (
state_0 != st.session_state['state_0'] or
state_1 != st.session_state['state_1'] or
state_2 != st.session_state['state_2']
):
# Get DF
df = st.session_state['_df_1'].copy()
# Encode Data
df, encoder = encode_category(df)
# Update session state
st.session_state['nan_data'] = state_2['nan_data']
st.session_state['auto_feature_selection'] = auto_feature_selection
st.session_state['encoder'] = encoder
st.session_state['_df_2'] = df.reset_index(drop=True)
st.session_state['model'] = None
# State 3: _df_3
state_3 = {}
# Select Features
st.sidebar.markdown("""---""")
st.sidebar.write('Features')
st.sidebar.text(f'Data quality | name')
index = [
st.sidebar.checkbox(
label = f"{st.session_state['data_quality'][column]:.2f} | {column}",
key = f"_{column}",
value = True,
) for column in st.session_state['feature_all']
]
feature_selected = list(np.array(st.session_state['feature_all'])[index])
state_3['feature_selected'] = feature_selected
# Magage Features
def uncheck():
for column in st.session_state['feature_all']:
st.session_state[f'_{column}'] = False
def check():
for column in st.session_state['feature_all']:
st.session_state[f'_{column}'] = True
_, col_1, col_2 = st.sidebar.columns([1, 4, 5])
with col_1:
st.button(
label = 'Check All',
on_click = check
)
with col_2:
st.button(
label = 'Uncheck All',
on_click = uncheck
)
# update _df_3
if (
state_0 != st.session_state['state_0'] or
state_1 != st.session_state['state_1'] or
state_2 != st.session_state['state_2'] or
state_3 != st.session_state['state_3']
):
# Get DF
df = st.session_state['_df_2'].copy()
# Select columns
columns = state_3['feature_selected'] + [st.session_state['target']]
df = df[columns]
# Update session state
st.session_state['feature_selected'] = state_3['feature_selected']
st.session_state['_df_3'] = df
st.session_state['model'] = None
# Update states
st.session_state['state_0'] = state_0
st.session_state['state_1'] = state_1
st.session_state['state_2'] = state_2
st.session_state['state_3'] = state_3
# Data wall time
wall_time = time.time() - start_time
# Print Information
st.sidebar.markdown("""---""")
st.sidebar.write(f"Wall time: {wall_time:.4f} sec")
st.sidebar.write(f"Data Num: {len(st.session_state['_df_3'])}")
st.sidebar.write(f"Target: {st.session_state['target']}")
st.sidebar.write(f"Feature Num: {len(feature_selected)}")
# Print Encoder
columns = st.session_state['feature_selected'] + [st.session_state['target']]
encoder = {}
if len(st.session_state['encoder']) > 0:
for column in columns:
if column in st.session_state['encoder']:
encoder[column] = st.session_state['encoder'][column]
if len(encoder) > 0:
st.sidebar.write('Encoded Features')
st.sidebar.write(encoder)
# Print DF
st.write('Sample Data (5)')
st.write(st.session_state['_df_3'].iloc[:5])
# Train Model
if st.session_state['model'] is None:
st.markdown("""---""")
if st.button('Start Model Training'):
# Log
time_now = str(datetime.now())[:19]
print(f'START | {time_now} | {get_session_id()} | {st.session_state["train_file_path"]}')
# Load Data
df = st.session_state['_df_3'].copy()
features = st.session_state['feature_selected']
target = st.session_state['target']
if st.session_state['mode'] == 'Regression':
mode = 'reg'
if st.session_state['mode'] == 'Binary Classification':
mode = 'clf'
# NaN Data
df = df[features+[target]].copy()
if df.isna().sum().sum() == 0:
st.session_state['nan_processed'] = False
else:
if st.session_state['nan_data'] == 'Delete':
df = delete_nan(df)
elif st.session_state['nan_data'] == 'Replace':
df = replace_nan(
df = df,
random_state = st.session_state['config']['setup']['random_state']
)
st.session_state['nan_processed'] = True
st.session_state['data_num'] = len(df)
# Dataset
datasets = split_data(
df = df,
features = features,
target = target,
mode = mode,
n_splits = st.session_state['config']['split']['n_splits'],
shuffle = True,
random_state = st.session_state['config']['setup']['random_state']
)
# Best Model
best_model, history = get_best_model(
datasets = datasets,
mode = mode,
random_state = st.session_state['config']['setup']['random_state'],
n_jobs = st.session_state['config']['setup']['n_jobs']
)
best_model['features'] = features
best_model['target'] = target
best_model['datasets'] = datasets
# SHAP
source, shap_value = get_shap_value(
config = best_model,
max_num = st.session_state['config']['shap']['max_num']
)
output = get_importance(
shap_value,
sort = st.session_state['config']['importance']['sort'],
normalize = st.session_state['config']['importance']['normalize']
)
shap = {}
shap['features'] = output['features']
shap['importance'] = output['importance']
shap['source'] = source
shap['shap_value'] = shap_value
if (
st.session_state['auto_feature_selection'] and
'random_noise' in shap['features']
):
features = shap['features']
index = np.where(np.array(features)=='random_noise')[0][0]
if index != 0:
# Print Info
st.write('Auto Feature Selection is ON.')
# Set new features
features = features[:index]
# Dataset
datasets = split_data(
df = df,
features = features,
target = target,
mode = mode,
n_splits = st.session_state['config']['split']['n_splits'],
shuffle = True,
random_state = st.session_state['config']['setup']['random_state']
)
# Best Model
best_model, history = get_best_model(
datasets = datasets,
mode = mode,
random_state = st.session_state['config']['setup']['random_state'],
n_jobs = st.session_state['config']['setup']['n_jobs']
)
best_model['features'] = features
best_model['target'] = target
best_model['datasets'] = datasets
# SHAP
source, shap_value = get_shap_value(
config = best_model,
max_num = st.session_state['config']['shap']['max_num']
)
output = get_importance(
shap_value,
sort = st.session_state['config']['importance']['sort'],
normalize = st.session_state['config']['importance']['normalize']
)
shap = {}
shap['features'] = output['features']
shap['importance'] = output['importance']
shap['source'] = source
shap['shap_value'] = shap_value
# Update session state
st.session_state['history'] = history
st.session_state['model'] = best_model
st.session_state['shap'] = shap
# Refresh page
st_autorefresh(interval=100, limit=2)
# Result
else:
# STEP 2. Evaluation
st.markdown('### STEP 2. Evaluation')
# NaN Data
if st.session_state['nan_processed']:
st.write(f"NaN Data process mode is {st.session_state['nan_data']}.")
# Data number
st.write(f"Data Number: {st.session_state['data_num']}")
# Print Best Model
best = {}
best['name'] = st.session_state['model']['name']
best.update(st.session_state['model']['score'])
st.write('Best Model')
st.write(best)
# Print Score
st.write(st.session_state['history'])
# Graph
if st.session_state['mode'] == 'Regression':
st.altair_chart(
plot_reg_evaluation(
true = st.session_state['model']['oob_true'],
pred = st.session_state['model']['oob_pred'],
target = st.session_state['model']['target']
),
use_container_width = True
)
elif st.session_state['mode'] == 'Binary Classification':
st.pyplot(
plot_confusion_matrix(
true = st.session_state['model']['oob_true'],
pred = st.session_state['model']['oob_pred'],
target = st.session_state['model']['target']
)
)
# STEP 3. Feature Importance
features = st.session_state['shap']['features']
importance = st.session_state['shap']['importance']
col_1, col_2 = st.columns([3, 1])
with col_1:
st.markdown('### STEP 3. Feature Importance')
with col_2:
show_number = st.number_input(
label = 'Number',
value = np.minimum(10, len(features)),
min_value = 1,
max_value = len(features),
step = 1
)
st.altair_chart(
plot_importance(
features = features,
importance = importance,
target = st.session_state['model']['target'],
num = show_number
),
use_container_width=True
)
# Download CSV
df_importance =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with tm.assertRaises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_describe(self):
self.series.describe()
self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count': 7, 'unique': 4,
'top': 'a', 'freq': 3}, index=result.index)
assert_series_equal(result, expected)
dt = list(self.ts.index)
dt.append(dt[0])
ser = Series(dt)
rs = ser.describe()
min_date = min(dt)
max_date = max(dt)
xp = Series({'count': len(dt),
'unique': len(self.ts.index),
'first': min_date, 'last': max_date, 'freq': 2,
'top': min_date}, index=rs.index)
assert_series_equal(rs, xp)
def test_describe_empty(self):
result = pd.Series().describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
nanSeries = Series([np.nan])
nanSeries.name = 'NaN'
result = nanSeries.describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
def test_describe_none(self):
noneSeries = Series([None])
noneSeries.name = 'None'
expected = Series([0, 0], index=['count', 'unique'], name='None')
assert_series_equal(noneSeries.describe(), expected)
class TestDataFrame(tm.TestCase, Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
self.assertTrue(df.bool())
df = DataFrame([[False]])
self.assertFalse(df.bool())
df = DataFrame([[False, False]])
self.assertRaises(ValueError, lambda: df.bool())
self.assertRaises(ValueError, lambda: bool(df))
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.ix[2, 'A'] = 3
expected.ix[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.ix[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= '0.17.0':
expected.ix[5, 'A'] = 6.0
else:
expected.ix[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
def test_interp_leading_nans(self):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
tm._skip_if_no_scipy()
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7],
'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with tm.assertRaises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame({'a': [1., 2., 3., 4.],
'b': [np.nan, 2., 3., 4.],
'c': [3, 2, 2, 2]})
expected = result.interpolate(method='linear', axis=1, inplace=False)
result.interpolate(method='linear', axis=1, inplace=True)
assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
def test_describe(self):
tm.makeDataFrame().describe()
tm.makeMixedDataFrame().describe()
tm.makeTimeDataFrame().describe()
def test_describe_percentiles_percent_or_raw(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
df = tm.makeDataFrame()
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[10, 50, 100])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[2])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[-2])
def test_describe_percentiles_equivalence(self):
df = tm.makeDataFrame()
d1 = df.describe()
d2 = df.describe(percentiles=[.25, .75])
assert_frame_equal(d1, d2)
def test_describe_percentiles_insert_median(self):
df = tm.makeDataFrame()
d1 = df.describe(percentiles=[.25, .75])
d2 = df.describe(percentiles=[.25, .5, .75])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('75%' in d2.index)
# none above
d1 = df.describe(percentiles=[.25, .45])
d2 = df.describe(percentiles=[.25, .45, .5])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('45%' in d2.index)
# none below
d1 = df.describe(percentiles=[.75, 1])
d2 = df.describe(percentiles=[.5, .75, 1])
assert_frame_equal(d1, d2)
self.assertTrue('75%' in d1.index)
self.assertTrue('100%' in d2.index)
# edge
d1 = df.describe(percentiles=[0, 1])
d2 = df.describe(percentiles=[0, .5, 1])
assert_frame_equal(d1, d2)
self.assertTrue('0%' in d1.index)
self.assertTrue('100%' in d2.index)
def test_describe_no_numeric(self):
df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8,
'B': ['a', 'b', 'c', 'd'] * 6})
desc = df.describe()
expected = DataFrame(dict((k, v.describe())
for k, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(desc, expected)
ts = tm.makeTimeSeries()
df = DataFrame({'time': ts.index})
desc = df.describe()
self.assertEqual(desc.time['first'], min(ts.index))
def test_describe_empty_int_columns(self):
df = DataFrame([[0, 1], [1, 2]])
desc = df[df[0] < 0].describe() # works
assert_series_equal(desc.xs('count'),
Series([0, 0], dtype=float, name='count'))
self.assertTrue(isnull(desc.ix[1:]).all().all())
def test_describe_objects(self):
df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']})
result = df.describe()
expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')
})
df.loc[4] = pd.Timestamp('2010-01-04')
result = df.describe()
expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-04'), 2,
pd.Timestamp('2010-01-01'),
pd.Timestamp('2010-01-04')]},
index=['count', 'unique', 'top', 'freq',
'first', 'last'])
assert_frame_equal(result, expected)
# mix time and str
df['C2'] = ['a', 'a', 'b', 'c', 'a']
result = df.describe()
expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan]
assert_frame_equal(result, expected)
# just str
expected = DataFrame({'C2': [5, 3, 'a', 4]},
index=['count', 'unique', 'top', 'freq'])
result = df[['C2']].describe()
# mix of time, str, numeric
df['C3'] = [2, 4, 6, 8, 2]
result = df.describe()
expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
assert_frame_equal(df.describe(), df[['C3']].describe())
assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe())
assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe())
def test_describe_typefiltering(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24, dtype='int64'),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
descN = df.describe()
expected_cols = ['numC', 'numD', ]
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descN, expected)
desc = df.describe(include=['number'])
assert_frame_equal(desc, descN)
desc = df.describe(exclude=['object', 'datetime'])
assert_frame_equal(desc, descN)
desc = df.describe(include=['float'])
assert_frame_equal(desc, descN.drop('numC', 1))
descC = df.describe(include=['O'])
expected_cols = ['catA', 'catB']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descC, expected)
descD = df.describe(include=['datetime'])
assert_series_equal(descD.ts, df.ts.describe())
desc = df.describe(include=['object', 'number', 'datetime'])
assert_frame_equal(desc.loc[:, ["numC", "numD"]].dropna(), descN)
assert_frame_equal(desc.loc[:, ["catA", "catB"]].dropna(), descC)
descDs = descD.sort_index() # the index order change for mixed-types
assert_frame_equal(desc.loc[:, "ts":].dropna().sort_index(), descDs)
desc = df.loc[:, 'catA':'catB'].describe(include='all')
assert_frame_equal(desc, descC)
desc = df.loc[:, 'numC':'numD'].describe(include='all')
assert_frame_equal(desc, descN)
desc = df.describe(percentiles=[], include='all')
cnt = Series(data=[4, 4, 6, 6, 6],
index=['catA', 'catB', 'numC', 'numD', 'ts'])
assert_series_equal(desc.count(), cnt)
self.assertTrue('count' in desc.index)
self.assertTrue('unique' in desc.index)
self.assertTrue('50%' in desc.index)
self.assertTrue('first' in desc.index)
desc = df.drop("ts", 1).describe(percentiles=[], include='all')
assert_series_equal(desc.count(), cnt.drop("ts"))
self.assertTrue('first' not in desc.index)
desc = df.drop(["numC", "numD"], 1).describe(percentiles=[],
include='all')
assert_series_equal(desc.count(), cnt.drop(["numC", "numD"]))
self.assertTrue('50%' not in desc.index)
def test_describe_typefiltering_category_bool(self):
df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8),
'B_str': ['a', 'b', 'c', 'd'] * 6,
'C_bool': [True] * 12 + [False] * 12,
'D_num': np.arange(24.) + .5,
'E_ts': tm.makeTimeSeries()[:24].index})
# bool is considered numeric in describe, although not an np.number
desc = df.describe()
expected_cols = ['C_bool', 'D_num']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(desc, expected)
desc = df.describe(include=["category"])
self.assertTrue(desc.columns.tolist() == ["A_cat"])
# 'all' includes numpy-dtypes + category
desc1 = df.describe(include="all")
desc2 = df.describe(include=[np.generic, "category"])
assert_frame_equal(desc1, desc2)
def test_describe_timedelta(self):
df = DataFrame({"td": pd.to_timedelta(np.arange(24) % 20, "D")})
self.assertTrue(df.describe().loc["mean"][0] == pd.to_timedelta(
"8d4h"))
def test_describe_typefiltering_dupcol(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
s = df.describe(include='all').shape[1]
df = pd.concat([df, df], axis=1)
s2 = df.describe(include='all').shape[1]
self.assertTrue(s2 == 2 * s)
def test_describe_typefiltering_groupby(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
G = df.groupby('catA')
self.assertTrue(G.describe(include=['number']).shape == (16, 2))
self.assertTrue(G.describe(include=['number', 'object']).shape == (22,
3))
self.assertTrue(G.describe(include='all').shape == (26, 4))
def test_describe_multi_index_df_column_names(self):
""" Test that column names persist after the describe operation."""
df = pd.DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# GH 11517
# test for hierarchical index
hierarchical_index_df = df.groupby(['A', 'B']).mean().T
self.assertTrue(hierarchical_index_df.columns.names == ['A', 'B'])
self.assertTrue(hierarchical_index_df.describe().columns.names ==
['A', 'B'])
# test for non-hierarchical index
non_hierarchical_index_df = df.groupby(['A']).mean().T
self.assertTrue(non_hierarchical_index_df.columns.names == ['A'])
self.assertTrue(non_hierarchical_index_df.describe().columns.names ==
['A'])
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial')
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', '0.15',
'setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
def test_spline_smooth(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
self.assertNotEqual(s.interpolate(method='spline', order=3, s=0)[5],
s.interpolate(method='spline', order=3)[5])
def test_spline_interpolation(self):
tm._skip_if_no_scipy()
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
# GH #10633
def test_spline_error(self):
tm._skip_if_no_scipy()
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
with tm.assertRaises(ValueError):
s.interpolate(method='spline', order=0)
def test_metadata_propagation_indiv(self):
# groupby
df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
result = df.groupby('A').sum()
self.check_metadata(df, result)
# resample
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20130101', periods=1000, freq='s'))
result = df.resample('1T')
self.check_metadata(df, result)
# merging with override
# GH 6923
_metadata = DataFrame._metadata
_finalize = DataFrame.__finalize__
np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b'])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd'])
DataFrame._metadata = ['filename']
df1.filename = 'fname1.csv'
df2.filename = 'fname2.csv'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'merge':
left, right = other.left, other.right
value = getattr(left, name, '') + '|' + getattr(right,
name, '')
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, ''))
return self
DataFrame.__finalize__ = finalize
result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner')
self.assertEqual(result.filename, 'fname1.csv|fname2.csv')
# concat
# GH 6927
DataFrame._metadata = ['filename']
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab'))
df1.filename = 'foo'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
DataFrame.__finalize__ = finalize
result = pd.concat([df1, df1])
self.assertEqual(result.filename, 'foo+foo')
# reset
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize
def test_tz_convert_and_localize(self):
l0 = date_range('20140701', periods=5, freq='D')
# TODO: l1 should be a PeriodIndex for testing
# after GH2106 is addressed
with tm.assertRaises(NotImplementedError):
period_range('20140701', periods=1).tz_convert('UTC')
with tm.assertRaises(NotImplementedError):
period_range('20140701', periods=1).tz_localize('UTC')
# l1 = period_range('20140701', periods=5, freq='D')
l1 = date_range('20140701', periods=5, freq='D')
int_idx = Index(range(5))
for fn in ['tz_localize', 'tz_convert']:
if fn == 'tz_convert':
l0 = l0.tz_localize('UTC')
l1 = l1.tz_localize('UTC')
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)('US/Pacific')
l1_expected = getattr(idx, fn)('US/Pacific')
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)('US/Pacific')
self.assertTrue(df1.index.equals(l0_expected))
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)('US/Pacific', level=0)
self.assertFalse(df3.index.levels[0].equals(l0))
self.assertTrue(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1))
self.assertFalse(df3.index.levels[1].equals(l1_expected))
df3 = getattr(df2, fn)('US/Pacific', level=1)
self.assertTrue(df3.index.levels[0].equals(l0))
self.assertFalse(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1_expected))
self.assertFalse(df3.index.levels[1].equals(l1))
df4 = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
# TODO: untested
df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa
self.assertTrue(df3.index.levels[0].equals(l0))
self.assertFalse(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1_expected))
self.assertFalse(df3.index.levels[1].equals(l1))
# Bad Inputs
for fn in ['tz_localize', 'tz_convert']:
# Not DatetimeIndex / PeriodIndex
with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
df = DataFrame(index=int_idx)
df = getattr(df, fn)('US/Pacific')
# Not DatetimeIndex / PeriodIndex
with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
df = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
df = getattr(df, fn)('US/Pacific', level=0)
# Invalid level
with tm.assertRaisesRegexp(ValueError, 'not valid'):
df = DataFrame(index=l0)
df = getattr(df, fn)('US/Pacific', level=1)
def test_set_attribute(self):
# Test for consistent setattr behavior when an attribute and a column
# have the same name (Issue #8994)
df = DataFrame({'x': [1, 2, 3]})
df.y = 2
df['y'] = [2, 4, 6]
df.y = 5
assert_equal(df.y, 5)
assert_series_equal(df['y'], Series([2, 4, 6], name='y'))
def test_pct_change(self):
# GH 11150
pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(
0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
mask = pnl.isnull()
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
expected[mask] = np.nan
result = pnl.pct_change(axis=axis, fill_method='pad')
self.assert_frame_equal(result, expected)
class TestPanel(tm.TestCase, Generic):
_typ = Panel
_comparator = lambda self, x, y: assert_panel_equal(x, y)
class TestNDFrame(tm.TestCase):
# tests that don't fit elsewhere
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
tm.assert_series_equal(s.squeeze(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.squeeze(), df)
for p in [tm.makePanel()]:
tm.assert_panel_equal(p.squeeze(), p)
for p4d in [tm.makePanel4D()]:
tm.assert_panel4d_equal(p4d.squeeze(), p4d)
# squeezing
df = tm.makeTimeDataFrame().reindex(columns=['A'])
tm.assert_series_equal(df.squeeze(), df['A'])
p = tm.makePanel().reindex(items=['ItemA'])
tm.assert_frame_equal(p.squeeze(), p['ItemA'])
p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A'])
tm.assert_series_equal(p.squeeze(), p.ix['ItemA', :, 'A'])
p4d = tm.makePanel4D().reindex(labels=['label1'])
tm.assert_panel_equal(p4d.squeeze(), p4d['label1'])
p4d = tm.makePanel4D().reindex(labels=['label1'], items=['ItemA'])
tm.assert_frame_equal(p4d.squeeze(), p4d.ix['label1', 'ItemA'])
# don't fail with 0 length dimensions GH11229 & GH8999
empty_series = pd.Series([], name='five')
empty_frame = pd.DataFrame([empty_series])
empty_panel = pd.Panel({'six': empty_frame})
[tm.assert_series_equal(empty_series, higher_dim.squeeze())
for higher_dim in [empty_series, empty_frame, empty_panel]]
def test_equals(self):
s1 = pd.Series([1, 2, 3], index=[0, 2, 1])
s2 = s1.copy()
self.assertTrue(s1.equals(s2))
s1[1] = 99
self.assertFalse(s1.equals(s2))
# NaNs compare as equal
s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3])
s2 = s1.copy()
self.assertTrue(s1.equals(s2))
s2[0] = 9.9
self.assertFalse(s1.equals(s2))
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s1 = Series([1, 2, np.nan], index=idx)
s2 = s1.copy()
self.assertTrue(s1.equals(s2))
# Add object dtype column with nans
index = np.random.random(10)
df1 = DataFrame(
np.random.random(10, ), index=index, columns=['floats'])
df1['text'] = 'the sky is so blue. we could use more chocolate.'.split(
)
df1['start'] = date_range('2000-1-1', periods=10, freq='T')
df1['end'] = date_range('2000-1-1', periods=10, freq='D')
df1['diff'] = df1['end'] - df1['start']
df1['bool'] = (np.arange(10) % 3 == 0)
df1.ix[::2] = nan
df2 = df1.copy()
self.assertTrue(df1['text'].equals(df2['text']))
self.assertTrue(df1['start'].equals(df2['start']))
self.assertTrue(df1['end'].equals(df2['end']))
self.assertTrue(df1['diff'].equals(df2['diff']))
self.assertTrue(df1['bool'].equals(df2['bool']))
self.assertTrue(df1.equals(df2))
self.assertFalse(df1.equals(object))
# different dtype
different = df1.copy()
different['floats'] = different['floats'].astype('float32')
self.assertFalse(df1.equals(different))
# different index
different_index = -index
different = df2.set_index(different_index)
self.assertFalse(df1.equals(different))
# different columns
different = df2.copy()
different.columns = df2.columns[::-1]
self.assertFalse(df1.equals(different))
# DatetimeIndex
index = pd.date_range('2000-1-1', periods=10, freq='T')
df1 = df1.set_index(index)
df2 = df1.copy()
self.assertTrue(df1.equals(df2))
# MultiIndex
df3 = df1.set_index(['text'], append=True)
df2 = df1.set_index(['text'], append=True)
self.assertTrue(df3.equals(df2))
df2 = df1.set_index(['floats'], append=True)
self.assertFalse(df3.equals(df2))
# NaN in index
df3 = df1.set_index(['floats'], append=True)
df2 = df1.set_index(['floats'], append=True)
self.assertTrue(df3.equals(df2))
# GH 8437
a = pd.Series([False, np.nan])
b = pd.Series([False, np.nan])
c = pd.Series(index=range(2))
d = pd.Series(index=range(2))
e = pd.Series(index=range(2))
f = pd.Series(index=range(2))
c[:-1] = d[:-1] = e[0] = f[0] = False
self.assertTrue(a.equals(a))
self.assertTrue(a.equals(b))
self.assertTrue(a.equals(c))
self.assertTrue(a.equals(d))
self.assertFalse(a.equals(e))
self.assertTrue(e.equals(f))
def test_describe_raises(self):
with tm.assertRaises(NotImplementedError):
tm.makePanel().describe()
def test_pipe(self):
df =
|
DataFrame({'A': [1, 2, 3]})
|
pandas.DataFrame
|
import os
import sys
import datetime
from pkg_resources import resource_filename
import numpy as np
import matplotlib.pyplot as plt
import pandas
import nose.tools as nt
import numpy.testing as nptest
from matplotlib.testing.decorators import image_comparison, cleanup
import pandas.util.testing as pdtest
import wqio
from pycvc import samples, info
from wqio.tests.core_tests import samples_tests, hydro_tests
class _wq_sample_mixin(object):
def test__res_with_units(self):
nt.assert_equal(self.wqs._res_with_units(0.12, 'mg/L'), '0.120 mg/L')
nt.assert_equal(self.wqs._res_with_units(np.nan, 'mg/L'), '--')
def test_siteid(self):
nt.assert_true(hasattr(self.wqs, 'siteid'))
nt.assert_equal(self.wqs.siteid, self.known_siteid)
def test_general_tex_table(self):
nt.assert_true(hasattr(self.wqs, 'general_tex_table'))
nt.assert_equal(self.wqs.general_tex_table, self.known_general_tex_table)
def test_hydro_tex_table(self):
nt.assert_true(hasattr(self.wqs, 'hydro_tex_table'))
nt.assert_equal(self.wqs.hydro_tex_table, self.known_hydro_tex_table)
def test_wq_tex_table(self):
nt.assert_true(hasattr(self.wqs, 'wq_tex_table'))
nt.assert_equal(self.wqs.wq_tex_table, self.known_wq_tex_table)
def test_storm_figure(self):
nt.assert_true(hasattr(self.wqs, 'storm_figure'))
nt.assert_equal(self.wqs.storm_figure, self.known_storm_figure)
def test_other_props(self):
nt.assert_true(hasattr(self.wqs, 'templateISR'))
self.wqs.templateISR = 'test'
nt.assert_equal(self.wqs.templateISR, 'test')
nt.assert_true(hasattr(self.wqs, 'tocentry'))
self.wqs.tocentry = 'test'
nt.assert_equal(self.wqs.tocentry, 'test')
nt.assert_true(hasattr(self.wqs, 'wqstd'))
self.wqs.wqstd = 'test'
nt.assert_equal(self.wqs.wqstd, 'test')
def test_wq_table(self):
df = self.wqs.wq_table(writeToFiles=False)
pdtest.assert_frame_equal(df, self.known_wqtable[df.columns])
class test_CompositeSample(samples_tests.test_CompositeSample_NoStorm, _wq_sample_mixin):
def setup(self):
self.basic_setup()
self.known_siteid = 'Test Site'
self.known_general_tex_table = 'TestSite-2013-02-24-1659-1-General'
self.known_hydro_tex_table = 'TestSite-2013-02-24-1659-2-Hydro'
self.known_wq_tex_table = 'TestSite-2013-02-24-1659-3-WQComposite'
self.known_storm_figure = 'TestSite-2013-02-24-1659-Composite'
self.test_name = 'CompositeNoStorm'
self.known_yfactor = 0.25
self.known_starttime = '2013-02-24 16:59'
self.known_endtime = '2013-02-25 02:59'
self.known_season = 'winter'
self.known_sample_ts_len = 31
self.known_samplefreq = pandas.tseries.offsets.Minute(20)
self.known_samplefreq_type = pandas.tseries.offsets.Minute
self.known_marker = 'x'
self.known_label = 'Composite Sample'
self.known_wqtable = pandas.DataFrame({
'Effluent EMC': {
0: '786 ug/L', 1: '0.160 ug/L', 2: '8.60 ug/L', 3: '140 mg/L',
4: '1,350 ug/L', 5: '6.13 ug/L', 6: '2.20 ug/L', 7: '1.40 mg/L',
8: '1.50 mg/L', 9: '0.0520 mg/L', 10: '1.20 mg/L', 11: '0.500 mg/L',
12: '0.130 mg/L', 13: '110 mg/L', 14: '43.4 ug/L'
},
'Detection Limit': {
0: '0.500 ug/L', 1: '0.0100 ug/L', 2: '0.200 ug/L', 3: '1.00 mg/L',
4: '5.00 ug/L', 5: '0.0500 ug/L', 6: '0.200 ug/L', 7: '0.100 mg/L',
8: '0.100 mg/L', 9: '0.00200 mg/L', 10: '0.100 mg/L', 11: '0.500 mg/L',
12: '0.100 mg/L', 13: '3.00 mg/L', 14: '0.500 ug/L'
},
'Effluent Load': {
0: '40.6 g', 1: '0.00827 g', 2: '0.445 g', 3: '7,240 g',
4: '69.8 g', 5: '0.317 g', 6: '0.114 g', 7: '72.4 g',
8: '77.6 g', 9: '2.69 g', 10: '62.0 g', 11: '0.0259 g',
12: '6.72 g', 13: '5,690 g', 14: '2.24 g'},
'WQ Guideline': {
0: '10.0 ug/L', 1: '10.0 ug/L', 2: '10.0 ug/L', 3: '10.0 mg/L',
4: '10.0 ug/L', 5: '10.0 ug/L', 6: '10.0 ug/L', 7: '10.0 mg/L',
8: '10.0 mg/L', 9: '10.0 mg/L', 10: '10.0 mg/L', 11: '10.0 mg/L',
12: '10.0 mg/L', 13: '10.0 mg/L', 14: '10.0 ug/L'
},
'Parameter': {
0: 'Aluminum (Al)', 1: 'Cadmium (Cd)', 2: 'Copper (Cu)', 3: 'Dissolved Chloride (Cl)',
4: 'Iron (Fe)', 5: 'Lead (Pb)', 6: 'Nickel (Ni)', 7: 'Nitrate (N)',
8: 'Nitrate + Nitrite', 9: 'Orthophosphate (P)', 10: 'Total Kjeldahl Nitrogen (TKN)', 11: 'Total Oil & Grease',
12: 'Total Phosphorus', 13: 'Total Suspended Solids', 14: 'Zinc (Zn)'
}
})
data = pandas.DataFrame({
'concentration': {
0: 786.0, 1: 0.16, 2: 8.60, 3: 140.0,
4: 9000.0, 5: 1350.0, 6: 6.13, 7: 2.20,
8: 1.40, 9: 1.5, 10: 0.052, 11: 1.2,
12: 0.5, 13: 0.13, 14: 110.0, 15: 43.4
},
'detectionlimit': {
0: 0.5, 1: 0.01, 2: 0.20, 3: 1.0,
4: np.nan, 5: 5.0, 6: 0.05, 7: 0.2,
8: 0.1, 9: 0.1, 10: 0.002, 11: 0.1,
12: 0.5, 13: 0.1, 14: 3.0, 15: 0.5
},
'load_outflow': {
0: 40.642016399999555, 1: 0.0082731839999999109,
2: 0.44468363999999516, 3: 7239.0359999999218,
4: 4653665999.9999495, 5: 69.80498999999925,
6: 0.31696636199999656, 7: 0.11375627999999877,
8: 72.390359999999205, 9: 77.561099999999158,
10: 2.6887847999999708, 11: 62.048879999999322,
12: 0.025853699999999719, 13: 6.7219619999999276,
14: 5687.8139999999385, 15: 2.2441011599999756
},
'load_units': {
0: 'g', 1: 'g', 2: 'g', 3: 'g',
4: 'CFU', 5: 'g', 6: 'g', 7: 'g',
8: 'g', 9: 'g', 10: 'g', 11: 'g',
12: 'g', 13: 'g', 14: 'g', 15: 'g'
},
'parameter': {
0: 'Aluminum (Al)', 1: 'Cadmium (Cd)',
2: 'Copper (Cu)', 3: 'Dissolved Chloride (Cl)',
4: 'Escherichia coli', 5: 'Iron (Fe)',
6: 'Lead (Pb)', 7: 'Nickel (Ni)',
8: 'Nitrate (N)', 9: 'Nitrate + Nitrite',
10: 'Orthophosphate (P)', 11: 'Total Kjeldahl Nitrogen (TKN)',
12: 'Total Oil & Grease', 13: 'Total Phosphorus',
14: 'Total Suspended Solids', 15: 'Zinc (Zn)'
},
'units': {
0: 'ug/L', 1: 'ug/L', 2: 'ug/L', 3: 'mg/L',
4: 'CFU/100mL', 5: 'ug/L', 6: 'ug/L', 7: 'ug/L',
8: 'mg/L', 9: 'mg/L', 10: 'mg/L', 11: 'mg/L',
12: 'mg/L', 13: 'mg/L', 14: 'mg/L', 15: 'ug/L'
}
})
self.wqs = samples.CompositeSample(data, self.known_starttime,
endtime=self.known_endtime,
samplefreq=self.known_samplefreq,
storm=None)
self.wqs.siteid = self.known_siteid
self.wqs.label = self.known_label
self.wqs.wqstd = (
info.wqstd_template()
.assign(upper_limit=10)
.query("season == 'summer'")
)
class test_GrabSample(samples_tests.test_GrabSample_NoStorm, _wq_sample_mixin):
def setup(self):
self.basic_setup()
self.known_siteid = 'Test Site'
self.known_general_tex_table = 'TestSite-2013-02-24-1659-1-General'
self.known_hydro_tex_table = 'TestSite-2013-02-24-1659-2-Hydro'
self.known_wq_tex_table = 'TestSite-2013-02-24-1659-3-WQGrab'
self.known_storm_figure = 'TestSite-2013-02-24-1659-Grab'
self.test_name = 'GrabNoStorm'
self.known_yfactor = 0.25
self.known_starttime = '2013-02-24 16:59'
self.known_endtime = '2013-02-25 02:59'
self.known_season = 'winter'
self.known_sample_ts_len = 2
self.known_samplefreq = None
self.known_samplefreq_type = type(None)
self.known_marker = '+'
self.known_label = 'Grab Sample'
self.known_wqtable = pandas.DataFrame({
'Effluent EMC': {
0: '786 ug/L', 1: '0.160 ug/L', 2: '8.60 ug/L', 3: '140 mg/L',
4: '1,350 ug/L', 5: '6.13 ug/L', 6: '2.20 ug/L', 7: '1.40 mg/L',
8: '1.50 mg/L', 9: '0.0520 mg/L', 10: '1.20 mg/L', 11: '0.500 mg/L',
12: '0.130 mg/L', 13: '110 mg/L', 14: '43.4 ug/L'
},
'Detection Limit': {
0: '0.500 ug/L', 1: '0.0100 ug/L', 2: '0.200 ug/L', 3: '1.00 mg/L',
4: '5.00 ug/L', 5: '0.0500 ug/L', 6: '0.200 ug/L', 7: '0.100 mg/L',
8: '0.100 mg/L', 9: '0.00200 mg/L', 10: '0.100 mg/L', 11: '0.500 mg/L',
12: '0.100 mg/L', 13: '3.00 mg/L', 14: '0.500 ug/L'
},
'Effluent Load': {
0: '40.6 g', 1: '0.00827 g', 2: '0.445 g', 3: '7,240 g',
4: '69.8 g', 5: '0.317 g', 6: '0.114 g', 7: '72.4 g',
8: '77.6 g', 9: '2.69 g', 10: '62.0 g', 11: '0.0259 g',
12: '6.72 g', 13: '5,690 g', 14: '2.24 g'},
'WQ Guideline': {
0: '10.0 ug/L', 1: '10.0 ug/L', 2: '10.0 ug/L', 3: '10.0 mg/L',
4: '10.0 ug/L', 5: '10.0 ug/L', 6: '10.0 ug/L', 7: '10.0 mg/L',
8: '10.0 mg/L', 9: '10.0 mg/L', 10: '10.0 mg/L', 11: '10.0 mg/L',
12: '10.0 mg/L', 13: '10.0 mg/L', 14: '10.0 ug/L'
},
'Parameter': {
0: 'Aluminum (Al)', 1: 'Cadmium (Cd)', 2: 'Copper (Cu)', 3: 'Dissolved Chloride (Cl)',
4: 'Iron (Fe)', 5: 'Lead (Pb)', 6: 'Nickel (Ni)', 7: 'Nitrate (N)',
8: 'Nitrate + Nitrite', 9: 'Orthophosphate (P)', 10: 'Total Kjeldahl Nitrogen (TKN)', 11: 'Total Oil & Grease',
12: 'Total Phosphorus', 13: 'Total Suspended Solids', 14: 'Zinc (Zn)'
}
})
data = pandas.DataFrame({
'concentration': {
0: 786.0, 1: 0.16, 2: 8.60, 3: 140.0,
4: 9000.0, 5: 1350.0, 6: 6.13, 7: 2.20,
8: 1.40, 9: 1.5, 10: 0.052, 11: 1.2,
12: 0.5, 13: 0.13, 14: 110.0, 15: 43.4
},
'detectionlimit': {
0: 0.5, 1: 0.01, 2: 0.20, 3: 1.0,
4: np.nan, 5: 5.0, 6: 0.05, 7: 0.2,
8: 0.1, 9: 0.1, 10: 0.002, 11: 0.1,
12: 0.5, 13: 0.1, 14: 3.0, 15: 0.5
},
'load_outflow': {
0: 40.642016399999555, 1: 0.0082731839999999109,
2: 0.44468363999999516, 3: 7239.0359999999218,
4: 4653665999.9999495, 5: 69.80498999999925,
6: 0.31696636199999656, 7: 0.11375627999999877,
8: 72.390359999999205, 9: 77.561099999999158,
10: 2.6887847999999708, 11: 62.048879999999322,
12: 0.025853699999999719, 13: 6.7219619999999276,
14: 5687.8139999999385, 15: 2.2441011599999756
},
'load_units': {
0: 'g', 1: 'g', 2: 'g', 3: 'g',
4: 'CFU', 5: 'g', 6: 'g', 7: 'g',
8: 'g', 9: 'g', 10: 'g', 11: 'g',
12: 'g', 13: 'g', 14: 'g', 15: 'g'
},
'parameter': {
0: 'Aluminum (Al)', 1: 'Cadmium (Cd)',
2: 'Copper (Cu)', 3: 'Dissolved Chloride (Cl)',
4: 'Escherichia coli', 5: 'Iron (Fe)',
6: 'Lead (Pb)', 7: 'Nickel (Ni)',
8: 'Nitrate (N)', 9: 'Nitrate + Nitrite',
10: 'Orthophosphate (P)', 11: 'Total Kjeldahl Nitrogen (TKN)',
12: 'Total Oil & Grease', 13: 'Total Phosphorus',
14: 'Total Suspended Solids', 15: 'Zinc (Zn)'
},
'units': {
0: 'ug/L', 1: 'ug/L', 2: 'ug/L', 3: 'mg/L',
4: 'CFU/100mL', 5: 'ug/L', 6: 'ug/L', 7: 'ug/L',
8: 'mg/L', 9: 'mg/L', 10: 'mg/L', 11: 'mg/L',
12: 'mg/L', 13: 'mg/L', 14: 'mg/L', 15: 'ug/L'
}
})
self.wqs = samples.GrabSample(data, self.known_starttime,
endtime=self.known_endtime,
samplefreq=self.known_samplefreq,
storm=None)
self.wqs.siteid = self.known_siteid
self.wqs.label = self.known_label
self.wqs.wqstd = (
info.wqstd_template()
.assign(upper_limit=10)
.query("season == 'summer'")
)
class test_Storm(hydro_tests.test_Storm):
def setup(self):
# path stuff
self.storm_file = resource_filename('wqio.data', 'teststorm_simple.csv')
self.orig_record = pandas.read_csv(
self.storm_file, index_col='date', parse_dates=True
).resample('5T').fillna(0)
self.hr = wqio.HydroRecord(self.orig_record,
precipcol='rain',
inflowcol='influent',
outflowcol='effluent',
outputfreqMinutes=5,
intereventHours=2,
stormclass=samples.Storm)
self.storm = samples.Storm(self.hr.data, 2,
precipcol=self.hr.precipcol,
inflowcol=self.hr.inflowcol,
outflowcol=self.hr.outflowcol,
freqMinutes=self.hr.outputfreq.n)
self.known_columns = ['rain', 'influent', 'effluent', 'storm']
self.known_index_type = pandas.DatetimeIndex
self.known_start =
|
pandas.Timestamp('2013-05-19 06:10')
|
pandas.Timestamp
|
import itertools
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from solarforecastarbiter.reference_forecasts import forecast
def assert_none_or_series(out, expected):
assert len(out) == len(expected)
for o, e in zip(out, expected):
if e is None:
assert o is None
else:
assert_series_equal(o, e)
def test_resample():
index = pd.date_range(start='20190101', freq='15min', periods=5)
arg = pd.Series([1, 0, 0, 0, 2], index=index)
idx_exp = pd.date_range(start='20190101', freq='1h', periods=2)
expected = pd.Series([0.25, 2.], index=idx_exp)
out = forecast.resample(arg)
assert_series_equal(out, expected)
assert forecast.resample(None) is None
@pytest.fixture
def rfs_series():
return pd.Series([1, 2],
index=pd.DatetimeIndex(['20190101 01', '20190101 02']))
@pytest.mark.parametrize(
'start,end,start_slice,end_slice,fill_method,exp_val,exp_idx', [
(None, None, None, None, 'interpolate', [1, 1.5, 2],
['20190101 01', '20190101 0130', '20190101 02']),
('20190101', '20190101 0230', None, None, 'interpolate',
[1, 1, 1, 1.5, 2, 2],
['20190101', '20190101 0030', '20190101 01', '20190101 0130',
'20190101 02', '20190101 0230']),
('20190101', '20190101 02', '20190101 0030', '20190101 0130', 'bfill',
[1., 1, 2], ['20190101 0030', '20190101 01', '20190101 0130'])
]
)
def test_reindex_fill_slice(rfs_series, start, end, start_slice, end_slice,
fill_method, exp_val, exp_idx):
exp = pd.Series(exp_val, index=pd.DatetimeIndex(exp_idx))
out = forecast.reindex_fill_slice(
rfs_series, freq='30min', start=start, end=end,
start_slice=start_slice, end_slice=end_slice, fill_method=fill_method)
assert_series_equal(out, exp)
def test_reindex_fill_slice_some_nan():
rfs_series = pd.Series([1, 2, None, 4], index=pd.DatetimeIndex([
'20190101 01', '20190101 02', '20190101 03', '20190101 04',
]))
start, end, start_slice, end_slice, fill_method = \
None, None, None, None, 'interpolate'
exp_val = [1, 1.5, 2, 2.5, 3, 3.5, 4]
exp_idx = [
'20190101 01', '20190101 0130', '20190101 02', '20190101 0230',
'20190101 03', '20190101 0330', '20190101 04']
exp = pd.Series(exp_val, index=pd.DatetimeIndex(exp_idx))
out = forecast.reindex_fill_slice(
rfs_series, freq='30min', start=start, end=end,
start_slice=start_slice, end_slice=end_slice, fill_method=fill_method)
assert_series_equal(out, exp)
def test_reindex_fill_slice_all_nan():
arg = pd.Series([None]*3, index=pd.DatetimeIndex(
['20190101 01', '20190101 02', '20190101 03']))
out = forecast.reindex_fill_slice(arg, freq='30min')
exp = pd.Series([None]*5, index=pd.DatetimeIndex(
['20190101 01', '20190101 0130', '20190101 02', '20190101 0230',
'20190101 03']))
|
assert_series_equal(out, exp)
|
pandas.testing.assert_series_equal
|
import glob
import os
import sys
import subprocess
from configparser import ConfigParser
import numpy as np
import pandas as pd
from astropy import units as u
from astropy.io import ascii
from astropy.io import fits as pyfits
from radio_beam import Beam, Beams, commonbeam
import fits_magic as fm
def load_config(config_object, file_=None):
"""
Function to load the config file
"""
config = ConfigParser() # Initialise the config parser
config.readfp(open(file_))
for s in config.sections():
for o in config.items(s):
setattr(config_object, o[0], eval(o[1]))
return config # Save the loaded config file as defaults for later usage
def set_mosdirs(self):
"""
Creates the directory names for the subdirectories to make scripting easier
"""
self.qacontdir = os.path.join(self.qadir, 'continuum')
self.qapoldir = os.path.join(self.qadir, 'polarisation')
self.contworkdir = os.path.join(self.basedir, self.obsid, self.mossubdir, self.moscontdir)
self.contimagedir = os.path.join(self.contworkdir, 'images')
self.contbeamdir = os.path.join(self.contworkdir, 'beams')
self.contmosaicdir = os.path.join(self.contworkdir, 'mosaic')
self.polworkdir = os.path.join(self.basedir, self.obsid, self.mossubdir, self.mospoldir)
self.polimagedir = os.path.join(self.polworkdir, 'images')
self.polbeamdir = os.path.join(self.polworkdir, 'beams')
self.polmosaicdir = os.path.join(self.polworkdir, 'mosaic')
def gen_contdirs(self):
"""
Function to generate the necessary continuum directories
"""
if os.path.isdir(self.contworkdir):
pass
else:
os.makedirs(self.contworkdir)
if os.path.isdir(self.contimagedir):
pass
else:
os.makedirs(self.contimagedir)
if os.path.isdir(self.contbeamdir):
pass
else:
os.makedirs(self.contbeamdir)
if os.path.isdir(self.contmosaicdir):
pass
else:
os.makedirs(self.contmosaicdir)
def copy_contimages(self):
"""
Function to copy the continuum images to the working directory
"""
if self.cont_mode == 'all':
# copy all the images from the continuum directory
print('Copying images for all available beams')
for image in range(40):
os.system('cp ' + os.path.join(self.basedir, self.obsid) + '/' + str(image).zfill(2) + '/continuum/image_mf_*.fits ' + self.contimagedir + '/I' + str(image).zfill(2) + '.fits')
elif self.cont_mode == 'qa':
# Load the qa-continuum file and only copy the images with good quality
c_arr = np.full(40, True)
if os.path.isfile(os.path.join(self.qacontdir, self.obsid, 'dynamicRange.dat')):
data = ascii.read(os.path.join(self.qacontdir, self.obsid, 'dynamicRange.dat'))
c_arr[np.where(data['col2'] == 'X')] = False
for image in range(40):
if c_arr[image]:
os.system('cp ' + os.path.join(self.basedir, self.obsid) + '/' + str(image).zfill(2) + '/continuum/image_mf_*.fits ' + self.contimagedir + '/I' + str(image).zfill(2) + '.fits')
else:
print('Image for beam ' + str(image).zfill(2) + ' not available or validated as bad!')
else:
print('No continuum quality assurance available for observation id ' + str(self.obsid) + '. Copying all available images.')
for image in range(40):
os.system('cp ' + os.path.join(self.basedir, self.obsid) + '/' + str(image).zfill(2) + '/continuum/image_mf_*.fits ' + self.contimagedir + '/I' + str(image).zfill(2) + '.fits')
elif self.cont_mode == 'param':
# Copy all images fullfilling the criteria given for the continuum mosaic
print('Copying all images with a synthesised beam with a maximum size of bmaj=' + str(self.cont_bmaj) + ' and bmin=' + str(self.cont_bmin) + ' and a maximum image rms of ' + str(self.cont_rms))
for image in range(40):
os.system('cp ' + os.path.join(self.basedir, self.obsid) + '/' + str(image).zfill(2) + '/continuum/image_mf_*.fits ' + self.contimagedir + '/I' + str(image).zfill(2) + '.fits')
if os.path.isfile(self.contimagedir + '/I' + str(image).zfill(2) + '.fits'):
bmaj, bmin = fm.get_beam(self.contimagedir + '/I' + str(image).zfill(2) + '.fits')
rms = fm.get_rms(self.contimagedir + '/I' + str(image).zfill(2) + '.fits')
if (bmaj*3600.0 > self.cont_bamj) or (bmin*3600.0 > self.cont_bmin) or (rms > self.cont_rmsclip):
print('Total power image of Beam ' + str(image).zfill(2) + ' exceeds the specified parameters and is not used!')
os.remove(self.contimagedir + '/I' + str(image).zfill(2) + '.fits')
else:
pass
else:
print('Image for Beam ' + str(image).zfill(2) + ' is not available!')
elif (type(self.cont_mode) == list):
# Copy only the beams given as a list
for image in self.cont_mode:
os.system('cp ' + os.path.join(self.basedir, self.obsid) + '/' + str(image).zfill(2) + '/continuum/image_mf_*.fits ' + self.contimagedir + '/I' + str(image).zfill(2) + '.fits')
if os.path.isfile(self.contimagedir + '/I00.fits'):
if self.cont_use00:
print('Using Beam 00 for mosaicking!')
else:
print('Not using Beam 00 for mosiacking!')
os.remove(self.contimagedir + '/I00.fits')
else:
pass
def copy_contbeams(self):
"""
Find the right beam models in time and frequency for the appropriate beams and copy them over to the working directory
"""
if self.cont_pbtype == 'drift':
# Get the right directory with the minimum difference in time with regard to the observation
beamtimes = sorted(glob.glob(self.beamsrcdir + '*'))
beamtimes_arr = [float(bt.split('/')[-1][:6]) for bt in beamtimes]
bt_array = np.unique(beamtimes_arr)
obstime = float(self.obsid[:6])
deltat = np.abs(bt_array - obstime)
loc_min = np.argmin(deltat)
rightbeamdir = beamtimes[loc_min]
# Get the frequencies of the beam models
channs = sorted(glob.glob(os.path.join(rightbeamdir, 'beam_models/chann_[0-9]')))
freqs = np.full(len(channs), np.nan)
for b, beam in enumerate(channs):
hdul = pyfits.open(os.path.join(beam, rightbeamdir.split('/')[-1] + '_00_I_model.fits'))
freqs[b] = hdul[0].header['CRVAL3']
hdul.close()
# Copy the beam models with the right frequency over to the working directory and regrid them to the image size
for beam in range(40):
if os.path.isfile(self.contimagedir + '/I' + str(beam).zfill(2) + '.fits'):
hdulist = pyfits.open(self.contimagedir + '/I' + str(beam).zfill(2) + '.fits')
freq = hdulist[0].header['CRVAL3']
nchann = np.argmin(np.abs(freqs - freq)) + 1
os.system('cp ' + os.path.join(rightbeamdir, 'beam_models/chann_' + str(nchann) + '/') + rightbeamdir.split('/')[-1] + '_' + str(beam).zfill(2) + '_I_model.fits ' + self.contbeamdir + '/B' + str(beam).zfill(2) + '.fits')
elif self.cont_pbtype == 'gaussian':
for beam in range(40):
if os.path.isfile(self.contimagedir + '/I' + str(beam).zfill(2) + '.fits'):
# Get the frequency from the image
hdu_cont = pyfits.open(self.contimagedir + '/I' + str(beam).zfill(2) + '.fits')
freq = hdu_cont[0].header['CRVAL3']
# Get the cellsize from the beam images and recalculate it based on the frequency of the image
hdu_beam = pyfits.open(self.beamsrcdir + str(beam).zfill(2) + '_gp_avg_orig.fits')
hdu_beam_hdr = hdu_beam[0].header
hdu_beam_data = hdu_beam[0].data
cs1 = hdu_beam_hdr['CDELT1']
cs2 = hdu_beam_hdr['CDELT2']
new_cs1 = cs1 * (1.36063551903e09 / freq)
new_cs2 = cs2 * (1.36063551903e09 / freq)
hdu_beam_hdr['CDELT1'] = new_cs1
hdu_beam_hdr['CDELT2'] = new_cs2
# Write the new not regridded beam to a temporary file
pyfits.writeto(self.contbeamdir + '/B' + str(beam).zfill(2) + '.fits', data=hdu_beam_data, header=hdu_beam_hdr, overwrite=True)
else:
print('Mode ' + str(self.cont_pbtype) + ' is not supported. Exiting script!')
sys.exit()
def get_contfiles(self):
"""
Get a list of the images and pbimages in the continuum working directory
"""
images = sorted(glob.glob(self.contimagedir + '/I[0-9][0-9].fits'))
pbimages = sorted(glob.glob(self.contbeamdir + '/B[0-9][0-9].fits'))
return images, pbimages
def clean_contmosaic_tmp_data(self):
os.system('rm -rf ' + self.contimagedir + '/*_reconv_tmp.fits')
os.system('rm -rf ' + self.contimagedir + '/*_reconv_tmp_pbcorr.fits')
os.system('rm -rf ' + self.contimagedir + '/*_mos.fits')
os.system('rm -rf ' + self.contimagedir + '/*_reconv_tmp_uncorr.fits')
os.system('rm -rf ' + self.contimagedir + '/*_uncorr.fits')
def gen_poldirs(self):
"""
Function to generate the necessary polarisation directories
"""
if os.path.isdir(self.polworkdir):
pass
else:
os.makedirs(self.polworkdir)
if os.path.isdir(self.polimagedir):
pass
else:
os.makedirs(self.polimagedir)
if os.path.isdir(self.polbeamdir):
pass
else:
os.makedirs(self.polbeamdir)
if os.path.isdir(self.polmosaicdir):
pass
else:
os.makedirs(self.polmosaicdir)
def copy_polimages(self, veri):
"""
Function to copy the polarisation images of a specific subband to the working directory
"""
for b in range(40):
for sb in range(self.pol_start_sb, self.pol_end_sb + 1):
if veri[b, sb]:
qcube = pyfits.open(os.path.join(self.basedir, self.obsid, str(b).zfill(2), 'polarisation/Qcube.fits'))
ucube = pyfits.open(os.path.join(self.basedir, self.obsid, str(b).zfill(2), 'polarisation/Ucube.fits'))
qhdu = qcube[0]
uhdu = ucube[0]
qhdr = qhdu.header
uhdr = uhdu.header
qplane = qhdu.data[sb,:,:]
uplane = uhdu.data[sb,:,:]
newqfreq = qhdr['CRVAL3'] + float(sb) * qhdr['CDELT3']
newufreq = uhdr['CRVAL3'] + float(sb) * uhdr['CDELT3']
qhdr.update(NAXIS3=1, CRVAL3=newqfreq)
uhdr.update(NAXIS3=1, CRVAL3=newufreq)
# Get the beam synthesised beam parameters and put them into the header
qbmaj = get_param(self, 'polarisation_B' + str(b).zfill(2) + '_targetbeams_qu_beamparams')[:, 0, 0][sb]
qbmin = get_param(self, 'polarisation_B' + str(b).zfill(2) + '_targetbeams_qu_beamparams')[:, 1, 0][sb]
qbpa = get_param(self, 'polarisation_B' + str(b).zfill(2) + '_targetbeams_qu_beamparams')[:, 2, 0][sb]
ubmaj = get_param(self, 'polarisation_B' + str(b).zfill(2) + '_targetbeams_qu_beamparams')[:, 0, 1][sb]
ubmin = get_param(self, 'polarisation_B' + str(b).zfill(2) + '_targetbeams_qu_beamparams')[:, 1, 1][sb]
ubpa = get_param(self, 'polarisation_B' + str(b).zfill(2) + '_targetbeams_qu_beamparams')[:, 2, 1][sb]
qhdr.update(BMAJ=qbmaj / 3600.0, BMIN=qbmin / 3600.0, BPA=qbpa)
uhdr.update(BMAJ=ubmaj / 3600.0, BMIN=ubmin / 3600.0, BPA=ubpa)
pyfits.writeto(self.polimagedir + '/Q_B' + str(b).zfill(2) + '_SB' + str(sb).zfill(2) + '.fits', data=qplane, header=qhdr, overwrite=True)
pyfits.writeto(self.polimagedir + '/U_B' + str(b).zfill(2) + '_SB' + str(sb).zfill(2) + '.fits', data=uplane, header=uhdr, overwrite=True)
qlist = glob.glob(self.polimagedir + '/Q_B00_SB*.fits')
ulist = glob.glob(self.polimagedir + '/U_B00_SB*.fits')
if len(qlist) == 0 and len(ulist) == 0:
pass
else:
if self.pol_use00:
print('Using Beam 00 for polarisation mosaicking!')
else:
print('Not using Beam 00 for polarisation mosaicking!')
for qim in qlist:
os.remove(qim)
for uim in ulist:
os.remove(uim)
def copy_polbeams(self):
"""
Find the right beam models in time and frequency for the appropriate beams and copy them over to the working directory
"""
if self.pol_pbtype == 'drift':
# Get the right directory with the minimum difference in time with regard to the observation
beamtimes = sorted(glob.glob(self.beamsrcdir + '*'))
beamtimes_arr = [float(bt.split('/')[-1][:6]) for bt in beamtimes]
bt_array = np.unique(beamtimes_arr)
obstime = float(self.obsid[:6])
deltat = np.abs(bt_array - obstime)
loc_min = np.argmin(deltat)
rightbeamdir = beamtimes[loc_min]
# Get the frequencies of the beam models
channs = sorted(glob.glob(os.path.join(rightbeamdir, 'beam_models/chann_[0-9]')))
freqs = np.full(len(channs), np.nan)
for b, beam in enumerate(channs):
hdul = pyfits.open(os.path.join(beam, rightbeamdir.split('/')[-1] + '_00_I_model.fits'))
freqs[b] = hdul[0].header['CRVAL3']
hdul.close()
# Copy the beam models with the right frequency over to the working directory
for b in range(40):
for sb in range(self.pol_start_sb, self.pol_end_sb + 1):
if os.path.isfile(self.polimagedir + '/Q_B' + str(b).zfill(2) + '_SB' + str(sb).zfill(2) + '.fits'):
hdulist = pyfits.open(self.polimagedir + '/Q_B' + str(b).zfill(2) + '_SB' + str(sb).zfill(2) + '.fits')
freq = hdulist[0].header['CRVAL3']
nchann = np.argmin(np.abs(freqs - freq)) + 1
os.system('cp ' + os.path.join(rightbeamdir, 'beam_models/chann_' + str(nchann) + '/') + rightbeamdir.split('/')[-1] + '_' + str(b).zfill(2) + '_I_model.fits ' + self.polbeamdir + '/PB_B' + str(b).zfill(2) + '_SB' + str(sb).zfill(2) + '.fits')
elif self.pol_pbtype == 'gaussian':
for b in range(40):
for sb in range(self.pol_start_sb, self.pol_end_sb + 1):
if os.path.isfile(self.polimagedir + '/Q_B' + str(b).zfill(2) + '_SB' + str(sb).zfill(2) + '.fits'):
# Get the frequency from the image
hdu_pol = pyfits.open(self.polimagedir + '/Q_B' + str(b).zfill(2) + '_SB' + str(sb).zfill(2) + '.fits')
freq = hdu_pol[0].header['CRVAL3']
# Get the cellsize from the beam images and recalculate it based on the frequency of the image
hdu_beam = pyfits.open(self.beamsrcdir + str(b).zfill(2) + '_gp_avg_orig.fits')
hdu_beam_hdr = hdu_beam[0].header
hdu_beam_data = hdu_beam[0].data
cs1 = hdu_beam_hdr['CDELT1']
cs2 = hdu_beam_hdr['CDELT2']
new_cs1 = cs1 * (1.36063551903e09 / freq)
new_cs2 = cs2 * (1.36063551903e09 / freq)
hdu_beam_hdr['CDELT1'] = new_cs1
hdu_beam_hdr['CDELT2'] = new_cs2
# Write the new not regridded beam to a temporary file
pyfits.writeto(self.polbeamdir + '/PB_B' + str(b).zfill(2) + '_SB' + str(sb).zfill(2) + '.fits', data=hdu_beam_data, header=hdu_beam_hdr, overwrite=True)
def get_polfiles(self, sb):
"""
Get a list of the images and pbimages in the polarisation working directory
"""
qimages = sorted(glob.glob(self.polimagedir + '/Q_B[0-9][0-9]_SB' + str(sb).zfill(2) + '.fits'))
uimages = sorted(glob.glob(self.polimagedir + '/U_B[0-9][0-9]_SB' + str(sb).zfill(2) + '.fits'))
pbimages = sorted(glob.glob(self.polbeamdir + '/PB_B[0-9][0-9]_SB' + str(sb).zfill(2) + '.fits'))
return qimages, uimages, pbimages
def get_common_psf(self, veri, format='fits'):
"""
Common psf for the list of fits files
"""
beams = []
if format == 'fits':
bmajes = []
bmines = []
bpas = []
for f in veri:
ih = pyfits.getheader(f)
bmajes.append(ih['BMAJ'])
bmines.append(ih['BMIN'])
bpas.append(ih['BPA'])
bmajarr = np.array(bmajes)
bminarr = np.array(bmines)
bpaarr = np.array(bpas)
for i in range(0, len(bmajes) - 1):
ni = i + 1
beams = Beams((bmajarr[[i, ni]]) * u.deg, (bminarr[[i, ni]]) * u.deg, bpaarr[[i, ni]] * u.deg)
common = commonbeam.commonbeam(beams)
bmajarr[ni] = common.major/u.deg
bminarr[ni] = common.minor / u.deg
bpaarr[ni] = common.pa / u.deg
elif format == 'array':
bmajes = np.empty(0)
bmines = np.empty(0)
bpas = np.empty(0)
for b in range(40):
for sb in range(self.pol_start_sb, self.pol_end_sb + 1):
if veri[b,sb]:
bmajes = np.append(bmajes, (get_param(self, 'polarisation_B' + str(b).zfill(2) + '_targetbeams_qu_beamparams')[:, 0, 0][sb]))
bmines = np.append(bmines, (get_param(self, 'polarisation_B' + str(b).zfill(2) + '_targetbeams_qu_beamparams')[:, 1, 0][sb]))
bpas = np.append(bpas, (get_param(self, 'polarisation_B' + str(b).zfill(2) + '_targetbeams_qu_beamparams')[:, 2, 0][sb]))
bmajarr = bmajes[~pd.isnull(bmajes)]
bminarr = bmines[~pd.isnull(bmines)]
bpaarr = bpas[~
|
pd.isnull(bpas)
|
pandas.isnull
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp(
'2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00',
tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp(
'2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(value=5), exp)
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
from pandas import tslib
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
self.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')
self.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
self.assert_series_equal(result, s)
self.assertFalse(result is s)
s2 = s.copy()
s2.dropna(inplace=True)
self.assert_series_equal(s2, s)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notnull(ts)])
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isnull(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notnull(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
class TestSeriesInterpolateData(TestData, tm.TestCase):
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interpolate_pchip(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_akima(self):
tm._skip_if_no_scipy()
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_from_derivatives(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(
|
Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])
|
pandas.Index
|
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all =
|
pd.concat([df1, s3, df2], axis=1)
|
pandas.concat
|
# generate features
import networkx as nx
import pandas as pd
import numpy as np
from networkx.algorithms import node_classification
import time
from collections import Counter
from utils import normalize_features
def dayday_feature(data, n_class=2, label_most_common_1=19, flag_unlabel=0):
t1 = time.time()
data = data.copy()
x = data['fea_table'].copy()
num_nodes = x.shape[0]
nodes_all = list(x.index)
df = data['edge_file'].copy()
max_weight = max(df['edge_weight'])
df.rename(columns={'edge_weight': 'weight'}, inplace=True)
degree_in_1st = np.zeros(num_nodes)
degree_out_1st = np.zeros(num_nodes)
weight_in_1st = np.zeros(num_nodes)
weight_out_1st = np.zeros(num_nodes)
for source, target, weight in df.values:
source = int(source)
target = int(target)
degree_in_1st[target] += 1
degree_out_1st[source] += 1
weight_in_1st[target] += weight
weight_out_1st[source] += weight
degree_1st_diff = degree_in_1st - degree_out_1st
weight_1st_diff = weight_in_1st - weight_out_1st
features_1 = np.concatenate([
degree_in_1st.reshape(-1, 1),
degree_out_1st.reshape(-1, 1),
weight_in_1st.reshape(-1, 1),
weight_out_1st.reshape(-1, 1),
degree_1st_diff.reshape(-1, 1),
weight_1st_diff.reshape(-1, 1)
], axis=1)
features_in_1st = pd.DataFrame({"node_index": np.arange(num_nodes), "degree_in_1st": degree_in_1st, "weight_in_1st": weight_in_1st})
df_degree_in_1st = pd.merge(left=df, right=features_in_1st, left_on="src_idx", right_on="node_index", how="left")
df_degree_in_1st_info = df_degree_in_1st.groupby('dst_idx')['degree_in_1st'].agg({
'degree_in_1st_sum': np.sum, 'degree_in_1st_mean': np.mean, 'degree_in_1st_min': np.min, 'degree_in_1st_max': np.max, 'degree_in_1st_median': np.median
})
df_weight_in_1st_info = df_degree_in_1st.groupby('dst_idx')['weight_in_1st'].agg({
'weight_in_1st_sum': np.sum, 'weight_in_1st_mean': np.mean, 'weight_in_1st_min': np.min, 'weight_in_1st_max': np.max, 'weight_in_1st_median': np.median
})
df_degree_in_2nd = pd.DataFrame({"node_index": df_degree_in_1st_info.index, "degree_in_2nd": df_degree_in_1st_info['degree_in_1st_sum']})
df_degree_in_2nd = pd.merge(left=df, right=df_degree_in_2nd, how="left", left_on="src_idx", right_on="node_index")
df_degree_in_2nd_info = df_degree_in_2nd.groupby('dst_idx')['degree_in_2nd'].agg({
'degree_in_2nd_sum': np.sum, 'degree_in_2nd_mean': np.mean, 'degree_in_2nd_min': np.min, 'degree_in_2nd_max': np.max, 'degree_in_2nd_median': np.median
})
features_2_index = df_degree_in_1st_info.index
features_2_t = np.hstack([df_degree_in_1st_info.values, df_weight_in_1st_info.values, df_degree_in_2nd_info.values])
features_2 = np.zeros((num_nodes, features_2_t.shape[1]))
for i, index in enumerate(features_2_index):
features_2[index] = features_2_t[i]
train_y = data['train_label'].copy()
df_info_in = pd.merge(left=df, right=train_y, how='left', left_on='src_idx', right_on='node_index')
if flag_unlabel == 0:
df_info_in.dropna(inplace=True)
else:
df_info_in.fillna(-1, inplace=True)
df_labels_in_count = df_info_in.pivot_table(index=["dst_idx"], columns='label', aggfunc='size', fill_value=0)
df_labels_in_precent = pd.crosstab(index=df_info_in.dst_idx, columns=df_info_in.label, normalize='index')
df_labels_in_without_most_common = df_info_in.copy()
df_labels_in_without_most_common = df_labels_in_without_most_common[df_labels_in_without_most_common.label != label_most_common_1]
df_labels_in_precent_without_most_common = pd.crosstab(index=df_labels_in_without_most_common.dst_idx, columns=df_labels_in_without_most_common.label, normalize='index')
df_labels_weight_count_in = df_info_in.pivot_table(index=['dst_idx'], columns='label', values='weight', aggfunc='sum', fill_value=0)
df_labels_weight_percent_in = pd.crosstab(index=df_info_in.dst_idx, columns=df_info_in.label, values=df_info_in.weight, aggfunc='sum', normalize='index')
df_labels_weight_percent_in_without_most_common = pd.crosstab(
index=df_labels_in_without_most_common.dst_idx, columns=df_labels_in_without_most_common.label, values=df_labels_in_without_most_common.weight,
aggfunc='sum', normalize='index')
features_3_index = list(df_labels_in_count.index)
features_3_t = np.hstack((df_labels_in_count.values, df_labels_in_precent.values, df_labels_weight_count_in.values, df_labels_weight_percent_in.values))
features_3 = np.zeros((num_nodes, features_3_t.shape[1]))
for i, index in enumerate(features_3_index):
features_3[index] = features_3_t[i]
labels_in_temp = features_3[:, :n_class]
labels_weight_in_temp = features_3[:, 2*n_class:3*n_class]
features_labels_all_in_2nd = np.zeros((num_nodes, n_class))
features_labels_weight_all_in_2nd = np.zeros((num_nodes, n_class))
for source, target, weight in df.values:
source = int(source)
target = int(target)
features_labels_all_in_2nd[source] += labels_in_temp[target]
features_labels_weight_all_in_2nd[source] += labels_weight_in_temp[target]
features_labels_all_in_2nd_percent = np.delete(features_labels_all_in_2nd, label_most_common_1, axis=1)
features_labels_all_in_2nd_percent = normalize_features(features_labels_all_in_2nd_percent)
features_out_1st = pd.DataFrame({"node_index": np.arange(num_nodes), "degree_out_1st": degree_out_1st, "weight_out_1st": weight_out_1st})
df_degree_out_1st = pd.merge(left=df, right=features_out_1st, left_on="dst_idx", right_on="node_index", how="left")
df_degree_out_1st_info = df_degree_out_1st.groupby('src_idx')['degree_out_1st'].agg({
'degree_out_1st_sum': np.sum, 'degree_out_1st_mean': np.mean, 'degree_out_1st_min': np.min, 'degree_out_1st_max': np.max, 'degree_out_1st_median': np.median
})
df_weight_out_1st_info = df_degree_out_1st.groupby('src_idx')['weight_out_1st'].agg({
'weight_out_1st_sum': np.sum, 'weight_out_1st_mean': np.mean, 'weight_out_1st_min': np.min, 'weight_out_1st_max': np.max, 'weight_out_1st_median': np.median
})
df_degree_out_2nd = pd.DataFrame({"node_index": df_degree_out_1st_info.index, "degree_out_2nd": df_degree_out_1st_info['degree_out_1st_sum']})
df_degree_out_2nd = pd.merge(left=df, right=df_degree_out_2nd, how="left", left_on="dst_idx", right_on="node_index")
df_degree_out_2nd_info = df_degree_out_2nd.groupby('src_idx')['degree_out_2nd'].agg({
'degree_out_2nd_sum': np.sum, 'degree_out_2nd_mean': np.mean, 'degree_out_2nd_min': np.min, 'degree_out_2nd_max': np.max, 'degree_out_2nd_median': np.median
})
features_4_index = df_degree_out_1st_info.index
features_4_t = np.hstack([df_degree_out_1st_info.values, df_weight_out_1st_info.values, df_degree_out_2nd_info.values])
features_4 = np.zeros((num_nodes, features_4_t.shape[1]))
for i, index in enumerate(features_4_index):
features_4[index] = features_4_t[i]
df_info_out = pd.merge(left=df, right=train_y, how='left', left_on='dst_idx', right_on='node_index')
if flag_unlabel == 0:
df_info_out.dropna(inplace=True)
else:
df_info_out.fillna(-1, inplace=True)
df_labels_out_count = df_info_out.pivot_table(index=["src_idx"], columns='label', aggfunc='size', fill_value=0)
df_labels_out_precent = pd.crosstab(index=df_info_out.src_idx, columns=df_info_out.label, normalize='index')
df_labels_out_without_most_common = df_info_out.copy()
df_labels_out_without_most_common = df_labels_out_without_most_common[df_labels_out_without_most_common.label != label_most_common_1]
df_labels_out_precent_without_most_common = pd.crosstab(index=df_labels_out_without_most_common.src_idx, columns=df_labels_out_without_most_common.label, normalize='index')
df_labels_weight_count_out = df_info_out.pivot_table(index=['src_idx'], columns='label', values='weight', aggfunc='sum', fill_value=0)
df_labels_weight_percent_out = pd.crosstab(index=df_info_out.src_idx, columns=df_info_out.label, values=df_info_out.weight, aggfunc='sum', normalize='index')
df_labels_weight_percent_out_without_most_common = pd.crosstab(
index=df_labels_out_without_most_common.src_idx, columns=df_labels_out_without_most_common.label, values=df_labels_out_without_most_common.weight,
aggfunc='sum', normalize='index')
features_5_index = list(df_labels_out_count.index)
features_5_t = np.hstack((df_labels_out_count.values, df_labels_out_precent.values, df_labels_weight_count_out.values, df_labels_weight_percent_out.values))
features_5 = np.zeros((num_nodes, features_5_t.shape[1]))
for i, index in enumerate(features_5_index):
features_5[index] = features_5_t[i]
features_merge = np.concatenate([
features_1,
features_2,
features_3,
features_4,
features_5,
features_labels_all_in_2nd,
features_labels_all_in_2nd_percent
], axis=1)
features_merge = np.unique(features_merge, axis=1)
features_merge = np.delete(features_merge, np.argwhere(np.sum(features_merge, axis=0)==0), axis=1)
return features_merge
def dayday_feature_old(data, flag_unlabel=0):
t1 = time.time()
data = data.copy()
x = data['fea_table'].copy()
num_nodes = x.shape[0]
nodes_all = list(x.index)
df = data['edge_file'].copy()
max_weight = max(df['edge_weight'])
df.rename(columns={'edge_weight': 'weight'}, inplace=True)
degree_in_1st = np.zeros(num_nodes)
degree_out_1st = np.zeros(num_nodes)
weight_in_1st = np.zeros(num_nodes)
weight_out_1st = np.zeros(num_nodes)
for source, target, weight in df.values:
source = int(source)
target = int(target)
degree_in_1st[target] += 1
degree_out_1st[source] += 1
weight_in_1st[target] += weight
weight_out_1st[source] += weight
degree_1st_diff = degree_in_1st - degree_out_1st
weight_1st_diff = weight_in_1st - weight_out_1st
features_1 = np.concatenate([
degree_in_1st.reshape(-1, 1),
degree_out_1st.reshape(-1, 1),
weight_in_1st.reshape(-1, 1),
weight_out_1st.reshape(-1, 1),
degree_1st_diff.reshape(-1, 1),
weight_1st_diff.reshape(-1, 1)
], axis=1)
features_in_1st = pd.DataFrame({"node_index": np.arange(num_nodes), "degree_in_1st": degree_in_1st, "weight_in_1st": weight_in_1st})
df_degree_in_1st =
|
pd.merge(left=df, right=features_in_1st, left_on="src_idx", right_on="node_index", how="left")
|
pandas.merge
|
#!/usr/bin/env python
'''
<NAME> October 2018
Scripts for looking at and evaluating input data files for dvmdostem.
Generally data has been prepared by M. Lindgren of SNAP for the IEM project and
consists of directories of well labled .tif images, with one image for each
timestep.
This script has (or will have) a variety of routines for summarizing the data
and displaying plots that will let us look for problems, missing data, or
anomolies.
'''
import os
import sys
import subprocess
import glob
import pickle
import multiprocessing
import datetime as dt
from osgeo import gdal
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as ticker
TMP_DATA = 'climatology-intermediate-data'
def timeseries_summary_stats_and_plots(base_path, secondary_path_list):
'''
'''
# Decades for projected, truncated first
fx_periods = [
(2006,2010),(2010,2020),(2020,2030),(2030,2040),(2040,2050),
(2050,2060),(2060,2070),(2070,2080),(2080,2090),(2090,2100)
]
# Decades for historic, truncated at end
hist_periods = [
(1901,1911),(1911,1921),(1921,1931),(1931,1941),(1941,1951),
(1951,1961),(1961,1971),(1971,1981),(1981,1991),(1991,2001),
(2001,2011),(2011,2015)
]
procs = []
for i in secondary_path_list:
if 'pr_total' in i.lower():
units = 'mm month-1'
elif 'tas_mean' in i.lower():
units = 'degrees C'
elif 'vap_mean' in i.lower():
units = 'hPa'
elif 'rsds_mean' in i.lower():
units = 'MJ-m2-d1'
elif 'hurs_mean' in i.lower():
units = 'percent'
else:
print("ERROR! hmmm can't find variable in {}".format(i))
if '_cru' in i.lower():
periods = hist_periods
elif '_mri' in i.lower():
periods = fx_periods
elif '_ncar' in i.lower():
periods = fx_periods
secondary_path = i
print("MAIN PROCESS! [{}] Starting worker...".format(os.getpid()))
p = multiprocessing.Process(target=worker_func, args=(base_path, secondary_path, units, periods))
procs.append(p)
p.start()
print("Done starting processes. Looping to set join on each process...")
for p in procs:
p.join()
print("DONE! Plots should be saved...")
def worker_func(base_path, secondary_path, units, periods):
'''
'''
print("worker function! pid:{}".format(os.getpid()))
print(" [{}] {}".format(os.getpid(), base_path))
print(" [{}] {}".format(os.getpid(), secondary_path))
print(" [{}] {}".format(os.getpid(), units))
monthlies_figure = get_monthlies_figure(
base_path, secondary_path,
title='\n'.join((base_path, secondary_path)),
units=units,
src='fresh',
save_intermediates=False,
madata=None
)
overveiw_figure, period_averages = get_overview_figure(
periods,
base_path, secondary_path,
title='\n'.join((base_path, secondary_path)),
units=units,
src='fresh', # can be: fresh, pickle, or passed
save_intermediates=False,
padata=None
)
individual_figs, _ = get_period_avg_figures(
periods,
base_path, secondary_path,
title=os.path.dirname(secondary_path),
units=units,
src='passed',
padata=period_averages
)
# Create multi-page pdf document
import matplotlib.backends.backend_pdf
ofname = "climatology_{}.pdf".format(secondary_path.split("/")[0])
print("Building PDF with many images: {}".format(ofname))
pdf = matplotlib.backends.backend_pdf.PdfPages(ofname)
pdf.savefig(monthlies_figure)
pdf.savefig(overveiw_figure)
for f in individual_figs:
pdf.savefig(f)
pdf.close()
print("Done saving pdf: {}".format(ofname))
def create_vrt(filelist, ofname):
'''
Creates a GDAL vrt (virtual file format) for a series of input files.
Expects the each of the files in the filelist to be a single band GeoTiff.
The files will be combined into a single .vrt file with one Band for each
of the input files. The single VRT file may then be further manipulated with
GDAL (i.e take the average over all the bands).
Parameters
----------
filelist : list of strings (paths) to files that will be combined
ofname : string for a filename that will be written
Returns
-------
None
Use Cases, Examples
-------------------
- Create a monthly or decadal summary file for a set of images representing
a timeseries (e.g. tifs that will be pre-processed and turned to netcdf files
for dvmdostem runs).
'''
basename = os.path.basename(ofname)
basename_noext, ext = os.path.splitext(basename)
temporary_filelist_file = os.path.join("/tmp/", "filelist-pid-{}-{}.txt".format(os.getpid(), basename_noext))
with open(temporary_filelist_file, 'w') as f:
f.write("\n".join(filelist))
result = subprocess.check_call([
'gdalbuildvrt',
'-overwrite',
'-separate',
ofname,
'-input_file_list', temporary_filelist_file
])
os.remove(temporary_filelist_file)
def average_over_bands(ifname, bands='all'):
'''
Given an input file (`ifname`), this function computes the average over all
the bands and returns the result. Assumes the bands are named Band1, Band2,
etc.
Parameters
----------
ifname : str
A multi-band file that can be opened and read with GDAL. Expects that all
bands have data and are the same spatial extents. Ignored data less
than -9999.
bands : str
One of 'all', 'first10', or 'first3'. Selects a subset of bands for faster
processing for testing and development.
Returns
-------
avg : numpy masked array
Returned array is the same shape as an individual band in the input file,
and with each pixel being the average of the pixel values in all of the
input file's bands.
'''
ds = gdal.Open(ifname)
print(" [ DESCRIPTION ]: ", ds.GetDescription())
print(" [ RASTER BAND COUNT ]: ", ds.RasterCount)
print(" [ RASTER Y SIZE ]: ", ds.RasterYSize)
print(" [ RASTER X SIZE ]: ", ds.RasterXSize)
if bands == 'all':
band_range = list(range(1, ds.RasterCount+1))
elif bands == 'first10':
band_range = list(range(1, 10+1))
elif bands == 'first3':
band_range = list(range(1, 3+1))
print(" [ AVERAGE OVER BANDS ]: {}".format(len(band_range)))
print(" [ START BAND ]: {}".format(band_range[0]))
print(" [ END BAND ]: {}".format(band_range[-1]))
# allocate a storage location
running_sum = np.ma.masked_less_equal(np.zeros((ds.RasterYSize, ds.RasterXSize)), -9999)
for band in band_range:
dsb = ds.GetRasterBand(band)
if dsb is None:
print("huh??")
# continue (? as per example here: https://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html)
masked_data = np.ma.masked_less_equal(dsb.ReadAsArray(), -9999)
running_sum += masked_data
print("adding band: {} band min/max: {}/{} running_sum min/max: {}/{}".format(
band,
masked_data.min(), masked_data.max(),
running_sum.min(), running_sum.max()
))
# Compute average
avg = running_sum / float(len(band_range)+1)
# Close gdal file
ds = None
return avg
def read_period_averages(periods):
'''
Reads pickled period average data from the TMP_DATA directory. Expects files
to be in a further sub-directory, period-averages, and have names
like: "pa-{start}-{end}.pickle".
Parameters
----------
periods : list of tuples
Each tuple should have values (start, end) that are used to define the
period.
Returns
-------
period_averages : list
A list of (masked) numpy arrays that have been un-pickled from the TMP_DATA
directory. The pickles are expected to be the period averages built using
other routines in this script.
'''
print("Reading period average pickles into list...")
period_averages = []
for i, (start, end) in enumerate(periods):
path = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()), 'pa-{}-{}.pickle'.format(start, end))
pa = pickle.load(file(path))
period_averages.append(pa)
print("Done reading period average pickles into list.")
return period_averages
def read_monthly_pickles(months=list(range(1,13))):
print("reading monthly pickle files for months {}...".format(months))
mavgs = []
for m in months:
path = os.path.join(
TMP_DATA,
'month-averages-pid{}'.format(os.getpid()),
'month-{:02d}.pickle'.format(m)
)
ma = pickle.load(file(path))
mavgs.append(ma)
print("Returning monthly averages list..")
return mavgs
def calculate_period_averages(periods, base_path, secondary_path, save_intermediates=False):
'''Given a stack of tif files, one file for each month, this routine will
calculate the averages for the supplied periods. Periods are expected to be
selections of years, i.e. 1901 to 1911.
Parameters
----------
periods : list of tuples
each tuple has a start and end year for the period
base_path : str
path on the file system where files are located
secondary_path : str
remainder of path on file system where files will be found. The secondary
path string is expected to be somethign like this:
"ar5_MRI-CGCM3_rcp85_{month:}_{year:}.tif"
with the one set of braces for the month one set of braces for the year.
This function will fill the braces to match any month and the years
specified in the periods tuples
save_intermediates : bool
when true, period average array will be pickled for each period. Will be
saved like so 'climatology/period-averages/pa-{}-{}.pickle'
Returns
-------
list of 2D masked numpy arrays
'''
# Ensure there is a place to put the vrt files
path = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()))
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
# Make the VRTs for the periods
for i, (start, end) in enumerate(periods):
print("[ period {} ] Making vrt for period {} to {} (range {})".format(i, start, end, list(range(start, end))))
filelist = []
for year in range(start, end):
final_secondary_path = secondary_path.format(month="*", year="{:04d}")
#print os.path.join(base_path, final_secondary_path.format(year))
single_year_filelist = sorted(glob.glob(os.path.join(base_path, final_secondary_path.format(year))))
#print "Length of single year filelist {}".format(len(single_year_filelist))
filelist += single_year_filelist
print("Length of full filelist: {} ".format(len(filelist)))
vrtp = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()), "period-{}-{}.vrt".format(start, end))
create_vrt(filelist, vrtp)
# Calculate the period averages from the VRT files
period_averages = []
for i, (start, end) in enumerate(periods):
# Find the average over the selected range
vrtp = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()), "period-{}-{}.vrt".format(start, end))
pa = average_over_bands(vrtp, bands='all')
period_averages.append(pa)
if save_intermediates:
# Make sure there is a place to put our pickles
path = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()))
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
print("Dumping pickle for period {} to {}".format(start, end))
pickle.dump(pa, file(os.path.join(path, "pa-{}-{}.pickle".format(start, end)), 'wb'))
# Clean up any intermediate files.
if not save_intermediates:
papath = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()))
for f in os.listdir(papath):
os.remove(os.path.join(papath, f))
os.rmdir(papath)
print("Returning period averages list...")
return period_averages
def calculate_monthly_averages(months, base_path, secondary_path, save_intermediates=False):
'''
'''
# Make sure there is a place to put our pickles and vrt files
intermediates_path = os.path.join(TMP_DATA, 'month-averages-pid{}'.format(os.getpid()))
try:
os.makedirs(intermediates_path)
except OSError:
if not os.path.isdir(intermediates_path):
raise
# Build the vrt files
print("Creating monthly VRT files...")
for im, MONTH in enumerate(months[:]):
final_secondary_path = secondary_path.format(month="{:02d}", year="*").format(im+1)
filelist = sorted(glob.glob(os.path.join(base_path, final_secondary_path)))
if len(filelist) < 1:
print("ERROR! No files found in {}".format( os.path.join(base_path, final_secondary_path) ))
vrt_path = os.path.join(intermediates_path,"month-{:02d}.vrt".format(im+1))
create_vrt(filelist, vrt_path)
print("Computing monthly averages from monthly VRT files...")
# make list of expected input vrt paths
ivp_list = [os.path.join(intermediates_path,"month-{:02d}.vrt".format(im)) for im in range(1, len(months)+1)]
monthly_averages = [average_over_bands(ivp, bands='all') for ivp in ivp_list]
if save_intermediates:
print("Saving pickles...")
for im, ma in enumerate(monthly_averages):
pp = os.path.join(intermediates_path, "month-{:02d}.pickle".format(im+1))
pickle.dump(ma, file(pp, 'wb'))
print("Done saving pickles...")
# Clean up any intermediate files.
if not save_intermediates:
mapath = os.path.join(TMP_DATA, 'month-averages-pid{}'.format(os.getpid()))
for f in os.listdir(mapath):
os.remove(os.path.join(mapath, f))
os.rmdir(mapath)
print("Returning monthly_averages list...")
return monthly_averages
def get_monthlies_figure(base_path, secondary_path, title, units,
src='fresh', save_intermediates=True, madata=None ):
'''
Creates a single figure with 12 subplots, each showing the average for that
month across the timeseries.
'''
months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
if src == 'fresh':
monthly_averages = calculate_monthly_averages(months, base_path, secondary_path, save_intermediates=save_intermediates)
elif src == 'pickle':
monthly_averages = read_monthly_pickles(months=list(range(1,13)))
elif src == 'passed':
monthly_averages = madata
else:
print("Invalid argument for src! '{}'".format(src))
vmax = np.max([avg.max() for avg in monthly_averages])
vmin = np.min([avg.min() for avg in monthly_averages])
print("vmax: {} vmin: {}".format(vmax, vmin))
print("Creating monthlies figure...")
fig, axes = plt.subplots(figsize=(11,8.5), nrows=3, ncols=4, sharex=True, sharey=True)
imgs = []
for ax, avg, month in zip(axes.flat, monthly_averages, months):
im = ax.imshow(avg, vmin=vmin, vmax=vmax, cmap='gist_ncar')
imgs.append(im)
ax.set_title(month)
cbar = fig.colorbar(imgs[0], ax=axes.ravel().tolist())
cbar.set_label(units)
fig.suptitle(title)
print("Done creating monthlies figure.")
return fig
def get_overview_figure(periods, base_path, secondary_path, title='',
units='', src='fresh', save_intermediates=True, padata=None):
'''
Creates and returns a matplotlib figure that has ??
Parameters
----------
Returns
-------
fig : matplotlib figure instance
'''
if src == 'fresh':
period_averages = calculate_period_averages(periods, base_path, secondary_path, save_intermediates=save_intermediates)
elif src == 'pickle':
period_averages = read_period_averages(periods)
elif src == 'passed':
period_averages = padata
else:
print("Invalid argument for src! '{}'".format(src))
print("Converting to stacked masked array...")
pa2 = np.ma.stack(period_averages)
vmax = pa2.max()
vmin = pa2.min()
print("vmax: {} vmin: {}".format(vmax, vmin))
NCOLS = 4 # fixed number of cols, may add more rows
NROWS = len(period_averages)/NCOLS
if (len(period_averages) % NCOLS) > 0:
NROWS += 1
if len(period_averages) < NCOLS:
NCOLS = len(period_averages)
NROWS = 1
overview_fig, axes = plt.subplots(nrows=NROWS, ncols=NCOLS, sharex=True, sharey=True)
overview_fig.set_size_inches((11, 8.5), forward=True)
imgs = [] # in case we need to manipulate the images all at once
for ax, avg, period in zip(axes.flat, period_averages, periods):
print("plotting image for period:", period)
# Setting vmax and vmin normalized the colorbars across all images
im = ax.imshow(avg, vmin=vmin, vmax=vmax, cmap='gist_ncar')
ax.set_title('{} to {}'.format(period[0], period[1]))
imgs.append(im)
# set a colorbar on the first axes
cbar = overview_fig.colorbar(imgs[0], ax=axes.ravel().tolist())
cbar.set_label(units)
overview_fig.suptitle(title)
return overview_fig, period_averages
def get_period_avg_figures(periods, base_path, secondary_path,
title='', units='', src='fresh', save_intermediates=True, padata=None):
'''
Parameters
----------
Returns
-------
'''
if src == 'fresh':
period_averages = calculate_period_averages(periods, base_path, secondary_path, save_intermediates=save_intermediates)
elif src == 'pickle':
period_averages = read_period_averages(periods)
elif src == 'passed':
period_averages = padata
else:
print("Invalid argument for src! '{}'".format(src))
print("Converting to stacked masked array...")
pa2 = np.ma.stack(period_averages)
vmax = pa2.max()
vmin = pa2.min()
print("vmax: {} vmin: {}".format(vmax, vmin))
ind_figures = []
for i, ((start,end), periodavg) in enumerate(zip(periods, pa2)):
fig = plt.figure()
fig.suptitle(title) #fontsize=8
im = plt.imshow(periodavg, vmin=vmin, vmax=vmax, cmap='gist_ncar')
ax = fig.axes[0]
ax.set_title('Average, {} to {}'.format(start, end))
cbar = plt.colorbar()
cbar.set_label(units)
ind_figures.append(fig)
return ind_figures, padata
def worker_func2(f):
if f == 'file3':
time.sleep(1)
if f == 'file7':
time.sleep(5)
print("will open, read, average {}".format(f))
return f
def worker_func3(in_file_path):
'''
'''
# Deduce month and year from file name
bname = os.path.basename(in_file_path)
n, ext = os.path.splitext(bname)
parts = n.split('_')
month, year = [int(p) for p in parts[-2:]]
date = dt.date(year=year, month=month, day=1)
# Open the file, get some stats
ds = gdal.Open(in_file_path)
ds_array = ds.ReadAsArray()
ds_m = np.ma.masked_less_equal(ds_array, -9999)
data_dict = dict(
fname=bname,
date=date,
statewide_mean=ds_m.mean(),
statewide_min=ds_m.min(),
statewide_max=ds_m.max(),
statewide_std=ds_m.std()
)
return data_dict
def generate_spatial_summary_stats(base_path, secondary_path):
'''
'''
# This produces a bunch of csv files with statewide averages
for sec_path in secondary_path_list[0:]:
files = sorted(glob.glob(os.path.join(base_path, sec_path.format(month='*', year='*'))))
p = multiprocessing.Pool()
results = p.map(worker_func3, files[0:])
p.close()
p.join()
s_results = sorted(results, key=lambda k: k['date'])
stats_path = "SPATIAL_SUMMARY_STATS_{}.csv".format(sec_path.split('/')[0])
import pandas as pd
df = pd.DataFrame(s_results)
df.to_csv(stats_path)
def plot_timeseries_of_spatial_summary_stats():
'''
'''
# Build this automatically:
# - look for SPATIAL_SUMMARY_STATS_*
ss_file_list = [
'SPATIAL_SUMMARY_STATS_hurs_mean_pct_ar5_MRI-CGCM3_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_hurs_mean_pct_ar5_NCAR-CCSM4_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_hurs_mean_pct_iem_CRU-TS40_historical_1901_2015_fix.csv',
'SPATIAL_SUMMARY_STATS_pr_total_mm_ar5_MRI-CGCM3_rcp85_2006_2100.csv',
'SPATIAL_SUMMARY_STATS_pr_total_mm_ar5_NCAR-CCSM4_rcp85_2006_2100.csv',
'SPATIAL_SUMMARY_STATS_pr_total_mm_iem_cru_TS40_1901_2015.csv',
'SPATIAL_SUMMARY_STATS_rsds_mean_MJ-m2-d1_ar5_MRI-CGCM3_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_rsds_mean_MJ-m2-d1_ar5_NCAR-CCSM4_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_rsds_mean_MJ-m2-d1_iem_CRU-TS40_historical_1901_2015_fix.csv',
'SPATIAL_SUMMARY_STATS_tas_mean_C_ar5_MRI-CGCM3_rcp85_2006_2100.csv',
'SPATIAL_SUMMARY_STATS_tas_mean_C_ar5_NCAR-CCSM4_rcp85_2006_2100.csv',
'SPATIAL_SUMMARY_STATS_tas_mean_C_iem_cru_TS40_1901_2015.csv',
'SPATIAL_SUMMARY_STATS_vap_mean_hPa_ar5_MRI-CGCM3_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_vap_mean_hPa_ar5_NCAR-CCSM4_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_vap_mean_hPa_iem_CRU-TS40_historical_1901_2015_fix.csv',
]
# Create multi-page pdf document
import matplotlib.backends.backend_pdf
ofname = "climatology_statewide_averages.pdf".format()
print("Saving PDF: {}".format(ofname))
pdf = matplotlib.backends.backend_pdf.PdfPages(ofname)
var_list = ['tas_mean','pr_total','rsds_mean','vap_mean','hurs_mean']
unit_list = ['celsius', 'mm month-1', 'MJ-m2-d1','hPa', 'percent']
for var, units in zip(var_list, unit_list):
# Figure out the right files to work on
var_files = [x for x in ss_file_list if var in x.lower()]
print(var_files)
print()
h_file = [x for x in var_files if 'cru' in x.lower()]
pmri_file = [x for x in var_files if 'mri' in x.lower()]
pncar_file = [x for x in var_files if 'ncar' in x.lower()]
# Filtering above should result in single item lists, unpack for convenience.
h_file = h_file[0]
pmri_file = pmri_file[0]
pncar_file = pncar_file[0]
print("var: ", var)
print("hfile: ", h_file)
print("pmri_file: ", pmri_file)
print("pncar_file: ", pncar_file)
print()
# Read data into DataFrames
hdf = pd.read_csv( h_file )
hdf.set_index( pd.to_datetime(hdf['date']), inplace=True )
pmri_df = pd.read_csv( pmri_file )
pmri_df.set_index( pd.to_datetime(pmri_df['date']), inplace=True )
pncar_df = pd.read_csv( pncar_file )
pncar_df.set_index(
|
pd.to_datetime(pncar_df['date'])
|
pandas.to_datetime
|
import os
# os.environ["OMP_NUM_THREADS"] = "16"
import logging
logging.basicConfig(filename=snakemake.log[0], level=logging.INFO)
import pandas as pd
import numpy as np
# seak imports
from seak.data_loaders import intersect_ids, EnsemblVEPLoader, VariantLoaderSnpReader, CovariatesLoaderCSV
from seak.scoretest import ScoretestNoK
from seak.lrt import LRTnoK, pv_chi2mixture, fit_chi2mixture
from pysnptools.snpreader import Bed
from util.association import BurdenLoaderHDF5
from util import Timer
class GotNone(Exception):
pass
# set up the covariatesloader
covariatesloader = CovariatesLoaderCSV(snakemake.params.phenotype,
snakemake.input.covariates_tsv,
snakemake.params.covariate_column_names,
sep='\t',
path_to_phenotypes=snakemake.input.phenotypes_tsv)
# initialize the null models
Y, X = covariatesloader.get_one_hot_covariates_and_phenotype('noK')
null_model_score = ScoretestNoK(Y, X)
null_model_lrt = LRTnoK(X, Y)
# set up function to filter variants:
def maf_filter(mac_report):
# load the MAC report, keep only observed variants with MAF below threshold
mac_report = pd.read_csv(mac_report, sep='\t', usecols=['SNP', 'MAF', 'Minor', 'alt_greater_ref'])
if snakemake.params.filter_highconfidence:
vids = mac_report.SNP[(mac_report.MAF < snakemake.params.max_maf) & (mac_report.Minor > 0) & ~(mac_report.alt_greater_ref.astype(bool)) & (mac_report.hiconf_reg.astype(bool))]
else:
vids = mac_report.SNP[(mac_report.MAF < snakemake.params.max_maf) & (mac_report.Minor > 0) & ~(mac_report.alt_greater_ref.astype(bool))]
# this has already been done in filter_variants.py
# load the variant annotation, keep only variants in high-confidece regions
# anno = pd.read_csv(anno_tsv, sep='\t', usecols=['Name', 'hiconf_reg'])
# vids_highconf = anno.Name[anno.hiconf_reg.astype(bool).values]
# vids = np.intersect1d(vids, vids_highconf)
return mac_report.set_index('SNP').loc[vids]
# genotype path, vep-path:
assert len(snakemake.params.ids) == len \
(snakemake.input.bed), 'Error: length of chromosome IDs does not match length of genotype files'
geno_vep = zip(snakemake.params.ids, snakemake.input.bed, snakemake.input.vep_tsv, snakemake.input.ensembl_vep_tsv, snakemake.input.mac_report, snakemake.input.h5_lof, snakemake.input.iid_lof, snakemake.input.gid_lof)
stats = []
simulations = []
i_gene = 0
# enter the chromosome loop:
timer = Timer()
for i, (chromosome, bed, vep_tsv, ensembl_vep_tsv, mac_report, h5_lof, iid_lof, gid_lof) in enumerate(geno_vep):
if snakemake.params.debug:
# skip most chromosomes if we are debugging...
if chromosome not in ['chr9','chr16','chr21']:
continue
# set up the ensembl vep loader for the chromosome
spliceaidf = pd.read_csv(vep_tsv,
sep='\t',
usecols=['name', 'chrom', 'end', 'gene', 'max_effect'],
index_col='name')
# get set of variants for the chromosome:
mac_report = maf_filter(mac_report)
filter_vids = mac_report.index.values
# filter by MAF
keep = intersect_ids(filter_vids, spliceaidf.index.values)
spliceaidf = spliceaidf.loc[keep]
spliceaidf.reset_index(inplace=True)
# filter by impact:
spliceaidf = spliceaidf[spliceaidf.max_effect >= snakemake.params.min_impact]
# set up the regions to loop over for the chromosome
regions = pd.read_csv(snakemake.input.regions_bed, sep='\t', header=None, usecols=[0 ,1 ,2 ,3], dtype={0 :str, 1: np.int32, 2 :np.int32, 3 :str})
regions.columns = ['chrom', 'start', 'end', 'name']
# discard all genes for which we don't have annotations
gene_ids = regions.name.str.split('_', expand=True) # table with two columns, ensembl-id and gene-name
regions['gene'] = gene_ids[1] # this is the gene name
regions['ensembl_id'] = gene_ids[0]
regions.set_index('gene', inplace=True)
genes = intersect_ids(np.unique(regions.index.values), np.unique(spliceaidf.gene)) # intersection of gene names
regions = regions.loc[genes].reset_index() # subsetting
regions = regions.sort_values(['chrom' ,'start' ,'end'])[['chrom' ,'start' ,'end' ,'name' ,'gene','ensembl_id']]
# check if the variants are protein LOF variants, load the protein LOF variants:
ensemblvepdf = pd.read_csv(ensembl_vep_tsv, sep='\t', usecols=['Uploaded_variation', 'Gene'])
# this column will contain the gene names:
genes = intersect_ids(np.unique(ensemblvepdf.Gene.values), regions.ensembl_id) # intersection of ensembl gene ids
ensemblvepdf = ensemblvepdf.set_index('Gene').loc[genes].reset_index()
ensemblvepdf['gene'] = gene_ids.set_index(0).loc[ensemblvepdf.Gene.values].values
# set up the merge
ensemblvepdf.drop(columns=['Gene'], inplace=True) # get rid of the ensembl ids, will use gene names instead
ensemblvepdf.rename(columns={'Uploaded_variation':'name'}, inplace=True)
ensemblvepdf['is_plof'] = 1.
ensemblvepdf = ensemblvepdf[~ensemblvepdf.duplicated()] # if multiple ensembl gene ids map to the same gene names, this prevents a crash.
# we add a column to the dataframe indicating whether the variant is already annotated as protein loss of function by the ensembl variant effect predictor
spliceaidf = pd.merge(spliceaidf, ensemblvepdf, on=['name','gene'], how='left', validate='one_to_one')
spliceaidf['is_plof'] = spliceaidf['is_plof'].fillna(0.).astype(bool)
# initialize the loader
# Note: we use "end" here because the start + 1 = end, and we need 1-based coordiantes (this would break if we had indels)
eveploader = EnsemblVEPLoader(spliceaidf['name'], spliceaidf['chrom'].astype('str') + ':' + spliceaidf['end'].astype('str'), spliceaidf['gene'], data=spliceaidf[['max_effect','is_plof']].values)
# set up the variant loader (splice variants) for the chromosome
plinkloader = VariantLoaderSnpReader(Bed(bed, count_A1=True, num_threads=4))
plinkloader.update_variants(eveploader.get_vids())
plinkloader.update_individuals(covariatesloader.get_iids())
# set up the protein LOF burden loader
bloader_lof = BurdenLoaderHDF5(h5_lof, iid_lof, gid_lof)
bloader_lof.update_individuals(covariatesloader.get_iids())
# set up the splice genotype + vep loading function
def get_splice(interval):
try:
V1 = eveploader.anno_by_interval(interval, gene=interval['name'].split('_')[1])
except KeyError:
raise GotNone
if V1.index.empty:
raise GotNone
vids = V1.index.get_level_values('vid')
V1 = V1.droplevel(['gene'])
temp_genotypes, temp_vids = plinkloader.genotypes_by_id(vids, return_pos=False)
temp_genotypes -= np.nanmean(temp_genotypes, axis=0)
G1 = np.ma.masked_invalid(temp_genotypes).filled(0.)
ncarrier = np.sum(G1 > 0.5, axis=0)
cummac = mac_report.loc[vids].Minor
# spliceAI max score
weights = V1[0].values.astype(np.float64)
is_plof = V1[1].values.astype(bool)
# "standardized" positions -> codon start positions
# pos = V1[0].values.astype(np.int32)
return G1, vids, weights, ncarrier, cummac, is_plof
# set up the protein-LOF loading function
def get_plof(interval):
try:
G2 = bloader_lof.genotypes_by_id(interval['name']).astype(np.float)
except KeyError:
G2 = None
return G2
# set up the test-function for a single gene
def test_gene(interval, seed):
pval_dict = {}
sim_dict = {}
pval_dict['gene'] = interval['name']
sim_dict['gene'] = interval['name']
def call_score(GV, name):
pv = null_model_score.pv_alt_model(GV)
if pv < 0.:
logging.warning('negative value encountered in p-value computation for gene {}, p-value: {}, using saddle instead.'.format(interval['name'], pv))
pv = null_model_score.pv_alt_model(GV, method='saddle')
pval_dict['pv_score_' + name] = pv
def call_lrt(GV, name):
lik = null_model_lrt.altmodel(GV)
sim = null_model_lrt.pv_sim(100, seed=seed)
pval_dict['lrtstat_' + name] = lik['stat']
pval_dict['alteqnull_' + name] = float(lik['alteqnull'])
sim_dict[name] = sim['res']
# load splice variants
G1, vids, weights, ncarrier, cummac, is_plof = get_splice(interval)
# keep indicates which variants are NOT "protein LOF" variants, i.e. variants already identified by the ensembl VEP
keep = ~is_plof
# do a score burden test (max weighted), this is different than the baseline!
G1_burden = np.max(np.where(G1 > 0.5, np.sqrt(weights), 0.), axis=1, keepdims=True)
call_score(G1_burden, 'linwb')
# linear weighted kernel
G1 = G1.dot(np.diag(np.sqrt(weights), k=0))
# do a score test (local collapsing)
call_score(G1, 'linw')
# if gene is nominally significant:
if (pval_dict['pv_score_linwb'] < snakemake.params.sclrt_nominal_significance_cutoff) | (pval_dict['pv_score_linw'] < snakemake.params.sclrt_nominal_significance_cutoff):
# do lrt tests
call_lrt(G1_burden, 'linwb')
call_lrt(G1, 'linw')
# load plof burden
G2 = get_plof(interval)
if G2 is not None:
if np.any(keep):
# merged (single variable)
G1_burden_mrg = np.maximum(G2, G1_burden)
call_score(G1_burden_mrg, 'linwb_mrgLOF')
call_lrt(G1_burden_mrg, 'linwb_mrgLOF')
# concatenated ( >= 2 variables)
# we separate out the ones that are already part of the protein LOF variants!
G1 = np.concatenate([G1[:,keep], G2], axis=1)
call_score(G1, 'linw_cLOF')
call_lrt(G1, 'linw_cLOF')
else:
logging.info('All Splice-AI variants for gene {} where already identified by the Ensembl variant effect predictor'.format(interval['name']))
pval_dict['nCarrier'] = ncarrier.sum()
pval_dict['cumMAC'] = cummac.sum()
pval_dict['n_snp'] = len(vids)
# we add some extra metadata columns
pval_dict['n_snp_notLOF'] = np.sum(keep)
# print(keep)
# print(len(keep))
pval_dict['nCarrier_notLOF'] = ncarrier[keep].sum()
pval_dict['cumMAC_notLOF'] = cummac[keep].sum()
# 0.5 is the recommended spliceAI cutoff
pval_dict['n_greater_05'] = np.sum(weights >= 0.5)
pval_dict['n_greater_05_notLOF'] = np.sum(weights[keep] >= 0.5)
# sanity check
assert pval_dict['cumMAC'] >= pval_dict['nCarrier'], 'Error: something is broken.'
return pval_dict, sim_dict
logging.info('loaders for chromosome {} initialized in {:.1f} seconds.'.format(chromosome, timer.check()))
# run tests for all genes on the chromosome
for _, region in regions.iterrows():
try:
gene_stats, sims = test_gene(region, i_gene)
except GotNone:
continue
stats.append(gene_stats)
simulations.append(sims)
i_gene += 1
if (i_gene % 100) == 0:
logging.info('tested {} genes...'.format(i_gene))
# print(i_gene)
logging.info('all tests for chromosome {} performed in {:.2f} minutes.'.format(chromosome, timer.check( ) /60.))
logging.info('genes tested so far: {}'.format(i_gene + 1))
# when all chromosomes are done:
# generate results table:
stats =
|
pd.DataFrame.from_dict(stats)
|
pandas.DataFrame.from_dict
|
# License: BSD 3 clause
"""
In this example, we simulate a unidimensional (ground truth) MHP with a
multimodal Gaussian kernel with three modes.
We estimate the parameters of this MHP using ASLSD, with a SBF Gaussian model
with ten modes.
"""
import os
import sys
# add the path of packages to system path
nb_dir = os.path.split(os.getcwd())[0]
if nb_dir not in sys.path:
sys.path.append(nb_dir)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from aslsd.basis_kernels.basis_kernel_gaussian import GaussianKernel
from aslsd.kernels.kernel import KernelModel
from aslsd.models.mhp import MHP
# Define a ground truth MHP
true_kernel = KernelModel([GaussianKernel(), GaussianKernel(),
GaussianKernel()])
true_mhp = MHP([[true_kernel]])
# Define true parameter values
true_mu = np.array([0.01])
true_omega = np.array([0.2, 0.3, 0.4])
true_beta = np.array([0.4, 0.6, 0.8])
true_delta = np.array([1., 3., 8.])
true_ker_param = [[np.zeros(9)]]
for ix in range(3):
true_ker_param[0][0][3*ix] = true_omega[ix]
true_ker_param[0][0][3*ix+1] = true_beta[ix]
true_ker_param[0][0][3*ix+2] = true_delta[ix]
# Simulate a path of the ground truth
T_f = 10**7
list_times = true_mhp.simulate(T_f, mu=true_mu, kernel_param=true_ker_param,
seed=1234, verbose=True)
# Visualize simulated data
inter_arrivals =
|
pd.Series(list_times[0][1:]-list_times[0][:-1])
|
pandas.Series
|
import pandas as pd
import os
# this file contains variables and names given in turkish words
# blood transfusions related data
writer = pd.ExcelWriter('tümü.xlsx', engine='xlsxwriter')
writer2 = pd.ExcelWriter('ozet.xlsx', engine='xlsxwriter')
writer3 = pd.ExcelWriter('hasta başı toplam transfüzyon sayısı.xlsx', engine='xlsxwriter')
file_list = []
for file in os.listdir("veriler"):
if file.endswith(".xls") or file.endswith(".xlsx"):
file_list.append(file)
continue
else:
continue
print(file_list)
hast_list = [s.strip('.xls') for s in file_list]
print(hast_list)
pivot = pd.DataFrame(columns=['HGB > 10', "Kanıtsız ES Sayı", "Son 24s Kanıtsız ES", 'Toplam ES Sayısı',
"PLT > 100.000", "Kanıtsız PLT Sayı", "Son 24s Kanıtsız PLT", "Toplam PLT Trans.",
"INR < 1,5", "Kanıtsız TDP Sayı", "Son 24s Kanıtsız TDP", "Top TDP Trans.",
"Toplam End. Dışı", "Toplam Kanıtsız", " Toplam Son 24s Kanıtsız", "Toplam Transfüzyon",
"Toplam Hasta Sayısı"], index=hast_list)
for adi in hast_list:
print(f'Şu an {adi} hastanesi işlenmekte...')
kan_ham_tablo = pd.read_excel("veriler/" + adi + ".xlsx", dtype='object')
kan_ham_tablo['Çıkış Tarihi'] = pd.DatetimeIndex(kan_ham_tablo['Çıkış Tarihi'], dayfirst=True)
print(kan_ham_tablo.info())
hasta_sayisi = kan_ham_tablo['Dosya No'].nunique()
toplam_transfuzyon = kan_ham_tablo['Dosya No'].count()
kan_ham_tablo['HGB'] = kan_ham_tablo["Geçmiş"].str.extract(r'(HGB = \d+\.\d+|HGB = \d+)', expand=False)
kan_ham_tablo['HGB Değer'] = kan_ham_tablo["HGB"].str.extract(r'(\d+\.\d+|\d+)', expand=False)
kan_ham_tablo['PLT'] = kan_ham_tablo["Geçmiş"].str.extract(r'(PLT = \d+\.\d+|PLT = \d+)', expand=False)
kan_ham_tablo['PLT Değer'] = kan_ham_tablo["PLT"].str.extract(r'(\d+\.\d+|\d+)', expand=False)
kan_ham_tablo['aPTT'] = kan_ham_tablo["Geçmiş"].str.extract(r'(aPTT = \d+\.\d+|aPTT = \d+)', expand=False)
kan_ham_tablo['aPTT Değer'] = kan_ham_tablo["aPTT"].str.extract(r'(\d+\.\d+|\d+)', expand=False)
kan_ham_tablo['PT'] = kan_ham_tablo["Geçmiş"].str.extract(r'(PT = \d+\.\d+|PT = \d+)', expand=False)
kan_ham_tablo['PT Değer'] = kan_ham_tablo["PT"].str.extract(r'(\d+\.\d+|\d+)', expand=False)
kan_ham_tablo['INR'] = kan_ham_tablo["Geçmiş"].str.extract(r'(INR = \d+\.\d+|INR = \d+)', expand=False)
kan_ham_tablo['INR Değer'] = kan_ham_tablo["INR"].str.extract(r'(\d+\.\d+|\d+)', expand=False)
kan_ham_tablo['HGB Değer'] = kan_ham_tablo['HGB Değer'].astype(float)
kan_ham_tablo['PLT Değer'] = kan_ham_tablo['PLT Değer'].astype(float)
kan_ham_tablo['aPTT Değer'] = kan_ham_tablo['aPTT Değer'].astype(float)
kan_ham_tablo['PT Değer'] = kan_ham_tablo['PT Değer'].astype(float)
kan_ham_tablo['INR Değer'] = kan_ham_tablo['INR Değer'].astype(float)
kayit_icin = kan_ham_tablo.drop(["HGB", "PLT", "aPTT", "PT", "INR"], 1)
kayit_icin.to_excel(writer, sheet_name=adi)
pivot_hasta = pd.pivot_table(kayit_icin, values='Kan Ürünü Cinsi', index='Dosya No', aggfunc='count')
pivot_hasta = pivot_hasta.sort_values(by='Kan Ürünü Cinsi', ascending=False)
pivot_hasta.to_excel(writer3, sheet_name=adi)
kayit_icin['gecmis_tarih'] = kayit_icin["Geçmiş"].str.extract(r'(\d+\.\d+.\d+ \d+:\d+)', expand=False)
kayit_icin['gecmis_tarih'] = pd.DatetimeIndex(kayit_icin['gecmis_tarih'], dayfirst=True)
kayit_icin['tarih_fark'] = kayit_icin['Çıkış Tarihi'] - kayit_icin['gecmis_tarih']
hgb_trans_toplam = kayit_icin[kayit_icin['Kan Ürünü Cinsi'].str.contains('ritrosit')]
hgb_end_disi = hgb_trans_toplam[hgb_trans_toplam['HGB Değer'] > 10]
hgb_end_disi = len(hgb_end_disi)
hgb_no_kanit = hgb_trans_toplam[~hgb_trans_toplam["Geçmiş"].str.contains('HGB', na=False)]
hgb_no_kanit = len(hgb_no_kanit)
hgb_dolu_gecmis = hgb_trans_toplam[hgb_trans_toplam["Geçmiş"].str.contains('HGB', na=False)]
hgb_date_diff = hgb_dolu_gecmis[hgb_dolu_gecmis['tarih_fark'] > pd.Timedelta(days=1)]
print(hgb_date_diff)
hgb_no_kanit_24 = len(hgb_date_diff)
hgb_trans_toplam = len(hgb_trans_toplam)
if hgb_trans_toplam == 0:
hgb_oran = 0
else:
hgb_oran = hgb_end_disi / hgb_trans_toplam
plt_trans_toplam = kayit_icin[kayit_icin['Kan Ürünü Cinsi'].str.contains('rombosit|PLT')]
plt_end_disi = plt_trans_toplam[plt_trans_toplam['PLT Değer'] > 100]
plt_end_disi = len(plt_end_disi)
plt_no_kanit = plt_trans_toplam[~plt_trans_toplam["Geçmiş"].str.contains('PLT', na=False)]
plt_no_kanit = len(plt_no_kanit)
plt_dolu_gecmis = plt_trans_toplam[plt_trans_toplam["Geçmiş"].str.contains('PLT', na=False)]
plt_date_diff = plt_dolu_gecmis[plt_dolu_gecmis['tarih_fark'] > pd.Timedelta(days=1)]
print(plt_date_diff)
plt_no_kanit_24 = len(plt_date_diff)
plt_trans_toplam = len(plt_trans_toplam)
if plt_trans_toplam == 0:
plt_oran = 0
else:
plt_oran = plt_end_disi / plt_trans_toplam
inr_trans_toplam = kayit_icin[kayit_icin['Kan Ürünü Cinsi'].str.contains('lazma')]
inr_end_disi = inr_trans_toplam[inr_trans_toplam['INR Değer'] < 1.5]
inr_end_disi = len(inr_end_disi)
inr_no_kanit = inr_trans_toplam[~inr_trans_toplam["Geçmiş"].str.contains('INR', na=False)]
inr_no_kanit = len(inr_no_kanit)
inr_dolu_gecmis = inr_trans_toplam[inr_trans_toplam["Geçmiş"].str.contains('INR', na=False)]
inr_date_diff = inr_dolu_gecmis[inr_dolu_gecmis['tarih_fark'] >
|
pd.Timedelta(days=1)
|
pandas.Timedelta
|
import os
import numpy as np
from itertools import product
from collections import defaultdict
import pandas as pd
import json
from nlafl import common
class HeatMapValue:
IsSet = False
def set_dir_version(dir,version):
HeatMapValue.dir = dir
HeatMapValue.version = version
HeatMapValue.IsSet = True
def __init__(self,num_pop_client,remove_pop_client,poison_count,trial_ind,file_path,upsample_count=0):
if not HeatMapValue.IsSet:
raise ValueError("First set directory and version")
self.num_pop_client = num_pop_client
self.remove_pop_client = remove_pop_client
self.poison_count = poison_count
self.trial_ind = trial_ind
self.file_path = file_path
self.upsample_count = upsample_count
self.readValues()
def readValues(self,):
fullPath = os.path.expanduser(os.path.join(HeatMapValue.dir,HeatMapValue.version,self.file_path))
try:
data = np.load(fullPath,allow_pickle=True)[()]
self.targetAcc_50 = np.mean([data["pop_accs"][49+i][1] for i in range(5)])
self.targetAcc_100 = np.mean([data["pop_accs"][99+i][1] for i in range(5)])
try:
self.targetAcc_200 = np.mean([data["pop_accs"][199+i][1] for i in range(5)])
except:
self.targetAcc_200 = np.nan
except FileNotFoundError :
# print(fullPath)
# print('notFound')
self.targetAcc_50=np.nan
self.targetAcc_100=np.nan
self.targetAcc_200=np.nan
def averageByTrialIndex(list_heatvalue):
acc50 = defaultdict(list)
acc100 = defaultdict(list)
acc200 = defaultdict(list)
for value in list_heatvalue:
acc50[(value.num_pop_client,value.remove_pop_client,value.poison_count,value.upsample_count)].append(value.targetAcc_50)
acc100[(value.num_pop_client,value.remove_pop_client,value.poison_count,value.upsample_count)].append(value.targetAcc_100)
acc200[(value.num_pop_client,value.remove_pop_client,value.poison_count,value.upsample_count)].append(value.targetAcc_200)
# acc100= {k:np.mean(v) for k,v in acc100}
# acc200= {k:np.mean(v) for k,v in acc200}
return acc50,acc100,acc200
def getDf(dir,version,mode,agg):
dir = os.path.join(dir,version)
HeatMapValue.set_dir_version(dir,version)
poisonRatios = [0,3,7]
removePopClientRatios = [0,3,7]
trialInds = [0,1,2,4,42]
numPopClients = [15]
if mode == 'c1':
cmd = os.path.join(dir , "results_upsample_multitarget_0_{numPopClient}_0_{remove_pop_client}_15_{poison_count}_{trialInd}_{agg}_10.0_-1_0_each_each.npy")
elif mode == 'c2':
cmd = os.path.join(dir , "results_upsample_multitarget_0_{numPopClient}_0_{remove_pop_client}_30_{poison_count}_{trialInd}_{agg}_10.0_-1_0_agg_each.npy" )
elif mode == 'pk':
cmd = os.path.join(dir , "results_upsample_multitarget_0_{numPopClient}_{remove_pop_client}_0_-1_{poison_count}_{trialInd}_{agg}_10.0_-1_0_each_each.npy")
filesnames = list()
for numPopClient in numPopClients:
for poisonRatio, removePopClientRatio,trial_ind in product(poisonRatios,removePopClientRatios,trialInds):
poisonCount = poisonRatio
removePopClientCount = removePopClientRatio
filePath = cmd.format(numPopClient=numPopClient,trialInd= trial_ind,remove_pop_client =removePopClientCount, poison_count= poisonCount,agg=agg )
filesnames.append(HeatMapValue(numPopClient,removePopClientCount,poisonCount,trial_ind,filePath))
acc50,acc100,acc200 = HeatMapValue.averageByTrialIndex(filesnames)
df = defaultdict(list)
for k,v in acc100.items():
num_pop_client,remove_pop_client,poison_count,upsample_count = k
acc100_ = removeNanAndAverage(v)
acc50_ = removeNanAndAverage(acc50[k])
acc200_ = removeNanAndAverageSilent(acc200[k])
# df["num_pop_client"].append(num_pop_client)
df["remove_pop_client"].append(remove_pop_client)
df["poison_count"].append(poison_count)
df["acc50"].append(acc50_ )
df["acc100"].append(acc100_)
df["acc200"].append(acc200_ )
df =
|
pd.DataFrame(df)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Bootstrap - Top Gun Stochastic Modelling Class
Created on Tue Sep 8 08:17:30 2020
@author: <NAME>
"""
# %% IMPORTs CELL
# Default Imports
import numpy as np
import pandas as pd
import scipy.linalg as LA
# Plotly for charting
import plotly.express as px
import plotly.graph_objs as go
import plotly.io as pio
# %% CLASS MODULE
class Bootstrap(object):
""" Portfolio Stochastic Modelling Class Modules
Currently offers empirical ONLY stochastic modelling for individual ports
as well as across a range of ports (called frontiers) as well as a range
of charts, analysis and markdown-report outputs (you need to go run the
markdown elsewhere).
INPUTS:
wgts - dataframe where indices are asset classes & cols are portfolios
mu - vector of expected returns (as pd.Series)
vol - vector of expected volatilies (as pd.Series)
hist - dataframe of historic returns (NOT index px/levels)
cor - dataframe of correlation matrix
nsims - number of Monte Carlo simulations
psims - no of periods to run simulation over (default = 260w)
f - annualisation factor (default = 52 for weekly returns data)
MAIN FUNCTIONS:
empirical() - runs emperical sims for 1 vector of weights
sim_stats() - calc descriptive stats for 1 simulation output
port_stats() - rtn, vol & marginal-contribution given inputs
emperical_frontier() - runs empirical analysis across all ports in wgts
will do stochastic sims, sim_stats, port_stats & return
a dictionary with all the outputs (stored as self.results)
correl_rmt_filtered() - allows us to build RMT filtered correl matrix
for other correl work look at correls module
CHARTING FUNCTIONS:
plot_collection_all(): runs default plots for frontier & ports
plot_collection_frontier(): runs plots to analyse across portfolios
plot_collection_port(): runs plots to analyse timeseries of simulations
NB/ for details of individual charts go look below, or run collection
then load each plotly figures from the collection to see what it is
REPORTING:
In all cases we produce a templated markdown script with Plotly plots
already embedded as HTML - these can be fed to report_writer or anything
which turns markdown to a static html/pdf.
markdown_master(): combines frontier report with all ports; options avail
markdown_frontier_report(): frontier analysis
markdown_port_report(): individual portfolio report
DEVELOPMENT:
- check correlation matrix PSD in class properties
Author: <NAME>
"""
## Initialise class
def __init__(self, wgts, mu, vol, # these aren't optional
alpha=None, te=None, tgts=None, # optional
hist=None, cor=None, # Need something
nsims=1000, f=52, psims=260, # standard params
**kwargs):
### ORDER OF INITIALISATION IS IMPORTANT ###
### Non-optional class inputs
self.wgts = wgts
self.mu = mu # [has @property]
self.vol = vol # [has @property]
# From required inputs we set these
self.universe = mu.index # list of asset classes [has @property]
self.port_names = wgts.columns # useful to have names of portfolios
### Optional class inputs
# alpha - set to vector of zeros of None passed [has @property]
if alpha is None:
alpha = pd.Series(np.zeros(len(mu)), index=mu.index, name='alpha')
self.alpha = alpha
# tracking error - set to vector of zeros if None passed [has @property]
if te is None:
te = pd.Series(np.zeros(len(mu)), index=mu.index, name='te')
self.te = te
# tgts set to vector of of zeros of length the numper of portfolios
if tgts is None:
tgts = pd.Series(np.zeros(len(wgts.columns)),
index=wgts.columns,
name='tgts')
self.tgts = tgts
# Historical Timeseries Data & Correlation
# ORDER IMPORTANT HERE
# if hist provided set a default correlation matrix as RMT
# if cor also provided we then override the default
# this is a little inefficient, but meh... hardly matters
if hist is not None:
self.cor = self.correl_rmt_filtered(hist.corr())
self.hist = hist
# Override default correl (from hist) if cor specifically passed
if cor is not None:
self.cor = cor # check symmetrical in properties
### STANDARD SETTINGS
self.nsims = nsims # number of simulations
self.f = f # annualisation factor
self.psims = psims # no of periods in MC simulation
self.plots = dict() # initisalise dict for plotly plots (useful later)
## Update Plotly template
colourmap = ['grey', 'teal', 'purple', 'black', 'deeppink', 'skyblue', 'lime', 'green','darkorange', 'gold', 'navy', 'darkred',]
fig = go.Figure(layout=dict(
font={'family':'Garamond', 'size':14},
plot_bgcolor= 'white',
colorway=colourmap,
showlegend=True,
legend={'orientation':'v'},
margin = {'l':75, 'r':50, 'b':25, 't':50},
xaxis= {'anchor': 'y1', 'title': '', 'hoverformat':'.1%', 'tickformat':'.0%',
'showline':True, 'linecolor': 'gray',
'zeroline':True, 'zerolinewidth':1 , 'zerolinecolor':'whitesmoke',
'showgrid': True, 'gridcolor': 'whitesmoke',
},
yaxis= {'anchor': 'x1', 'title': '', 'hoverformat':'.1%', 'tickformat':'.0%',
'showline':True, 'linecolor':'gray',
'zeroline':True, 'zerolinewidth':1 , 'zerolinecolor':'whitesmoke',
'showgrid': True, 'gridcolor': 'whitesmoke'
},
updatemenus= [dict(type='buttons',
active=-1, showactive = True,
direction='down',
y=0.5, x=1.1,
pad = {'l':0, 'r':0, 't':0, 'b':0},
buttons=[])],
annotations=[{'text': 'Source: STANLIB Multi-Strategy',
'xref': 'paper', 'x': 0.5, 'ax': 0,
'yref': 'paper', 'y': 0.5, 'ay': 0,
'align':'right'}],))
# Save template
pio.templates['multi_strat'] = pio.to_templated(fig).layout.template
return
# %% CLASS PROPERTIES
# Expected Returns (mu) - Ideally pd.Series but MUST be a (1xN) vector
@property
def mu(self): return self.__mu
@mu.getter
def mu(self): return self.__mu
@mu.setter
def mu(self, x):
if isinstance(x, pd.Series):
x.name = 'mu'
elif len(np.shape(x)) != 1:
raise ValueError('mu input is non-vector: {} given'.format(x))
self.__mu = x
# Alpha (alpha) - Ideally pd.Series but MUST be a (1xN) vector
@property
def alpha(self): return self.__alpha
@alpha.getter
def alpha(self): return self.__alpha
@alpha.setter
def alpha(self, x):
if isinstance(x, pd.Series):
x.name = 'alpha'
elif len(np.shape(x)) != 1:
raise ValueError('alpha input is non-vector: {} given'.format(x))
self.__alpha = x
# Volatility (vol) - Ideally pd.Series but MUST be a (1xN) vector
@property
def vol(self): return self.__vol
@vol.getter
def vol(self): return self.__vol
@vol.setter
def vol(self, x):
if isinstance(x, pd.Series):
x.name = 'vol'
elif len(np.shape(x)) != 1:
raise ValueError('vol input is non-vector: {} given'.format(x))
self.__vol = x
# Tracking Error (te) - Ideally pd.Series but MUST be a (1xN) vector
@property
def te(self): return self.__te
@te.getter
def te(self): return self.__te
@te.setter
def te(self, x):
if isinstance(x, pd.Series):
x.name = 'te'
elif len(np.shape(x)) != 1:
raise ValueError('te input is non-vector: {} given'.format(x))
self.__te = x
# Correlation Matrix
# Currently just check if symmetrical
# Add test positive semi-definate
@property
def cor(self): return self.__cor
@cor.getter
def cor(self): return self.__cor
@cor.setter
def cor(self, x):
if x.shape[0] != x.shape[1]:
raise ValueError('Correl Matrix non-symmetrical: {} given'.format(x))
self.__cor = x
# nsims - number of simulations to run - needs to be an integer
@property
def nsims(self): return self.__nsims
@nsims.getter
def nsims(self): return self.__nsims
@nsims.setter
def nsims(self, x):
if not isinstance(x, int):
raise ValueError('nsims needs to be an integer: {} given'.format(x))
self.__nsims = int(x)
# psims - number of periods per MC Sim - needs to be an integer
@property
def psims(self): return self.__psims
@psims.getter
def psims(self): return self.__psims
@psims.setter
def psims(self, x):
if not isinstance(x, int):
raise ValueError('psims needs to be an integer: {} given'.format(x))
self.__psims = int(x)
# f - annualisation factor needs to be an integer
@property
def f(self): return self.__f
@f.getter
def f(self): return self.__f
@f.setter
def f(self, x):
if not isinstance(x, int):
raise ValueError('annualisation factor needs to be an integer: {} given'.format(x))
self.__f = int(x)
# %% Emperical Bootstrap
def empirical(self, **kwargs):
""" Monte-Carlo Simulation using Scaled Empirical Data
Jacos idea to take the historical timeseries and standardise, then once
standardised we can input our own means and volatility estimates. This
will maintain higher moments (skew & kurtosis) from the original ts
but allow us to use our own forward estimates.
Note a serious problem of this approach is the length of the historical
data. Correlation is essentially taken by picking x random periods from
this data - as a result we are literally just recycling the same periods
over and over in a new order making this analysis less useful for longer
simulations or sims where this historical period is short.
OUTPUT:
pd.DataFrame with each simulation being a row (starting at 0) and
each column being a period along the sim. Column[0] representing
time-0 is set at a portfolio value of 1
INPUTS:
w = vector of port wgts ideally pd.Series()
mu = vector of exp rtns idieally pd.Series()
alpha (OPTIONAL) = vector of asset class alpha expectations
vol = vector of annualised volatilities
te (OPTIONAL) tracking error of alpha sources to asset class beta
f = int() annualisation factor (default=52)
nsims = int() number of simulations
psims = int() no of observation periods simulation
DEVELOPMENTS:
* Correlation of Alpha sources == 0; could incorporate alpha correl matrix
* Converting hist_rtns to np.array may speed things up; rest is already np
Author: Jaco's brainpower; adapted by David
"""
## INPUTS
w = kwargs['w'] if 'w' in kwargs else self.w
mu = kwargs['mu'] if 'mu' in kwargs else self.mu
vol = kwargs['vol'] if 'vol' in kwargs else self.vol
hist = kwargs['hist'] if 'hist' in kwargs else self.hist
nsims = kwargs['nsims'] if 'nims' in kwargs else self.nsims
f = kwargs['f'] if 'f' in kwargs else self.f
psims = kwargs['psims'] if 'psims' in kwargs else self.psims
## OPTIONAL INPUTS
alpha = np.zeros(len(w)) if 'alpha' not in kwargs else kwargs['alpha']
te = np.zeros(len(w)) if 'te' not in kwargs else kwargs['te']
# De-Annualise Returns & Vols
mu, alpha = (mu / f), (alpha / f)
vol, te = (vol / np.sqrt(f)), (te / np.sqrt(f))
# Re-scale historical return series
std_rtn = (hist - hist.mean()) / hist.std() # standardise
std_rtn = std_rtn.mul(vol, axis=1).add(mu, axis=1) # re-scale
for i in range(0, nsims):
#irtn = std_rtn.iloc[:simlength]
irtn = std_rtn.sample(psims)
ialpha = np.random.normal(alpha, te, (psims, len(w)))
irtn = irtn + ialpha
# Build simulated path & add to simulations array
path = (1 + (irtn @ w)).cumprod(axis=0)
# create sims array on 1st iteration
# add to sims stack on further iterations
if i == 0:
sims = path
else:
sims = np.vstack((sims, path))
# Convert to DataFrame - adjust columns to start at 1 not 0
# insert vec PortValue==1 at col.0; concat because pd.insert is crap
df = pd.DataFrame(sims, columns=range(1, psims+1))
v1 = pd.DataFrame(np.ones((nsims, 1)), columns=[0])
# round on the output to save space in chart memory later
return pd.concat([v1, df], axis=1).round(5)
def sim_stats(self, sims, tgt=0, method='annualise', **kwargs):
""" Descriptive Statistics for dataframe of Monte Carlo Sims
INPUTS:
sims - df with rows as sims; columns as periods along sim path
tgt - numerical return bogie (default = 0)
periods - list periods on sim path to calc (default = all > 1yr)
note column varnames must be numerical as used in annualisation
method annualise (default) - annualises periodic return
relative - subtracts annualised return by target
terminal - looks at terminal value
Author: <NAME>
"""
# percentiles we want to see
pc = [0.01, 0.05, 0.1, 0.25, 0.4, 0.5, 0.6, 0.75, 0.9, 0.95, 0.99]
# periods from simulations to analyse
if 'periods' in kwargs:
periods = kwargs['periods']
else:
periods = sims.columns[self.f:]
## SUBSET & ANNUALISE
# subset sims to required dates; if len(periods) is 1 this will return
# a pandas series so need to convert back to df (to access cols later)
sims = sims.loc[:, periods]
sims = sims.to_frame() if isinstance(sims, pd.Series) else sims
anns = sims ** (self.f / sims.columns) - 1 # annualised rtns from paths
# Alternative calc methods available
# 0. 'annualise' (default) annualised returns
# 1. relative reduces returns by target (& tgt == 0)
# 2. terminal assumes portval rtns to sims (& tgt == 1)
if method == 'relative':
anns, tgt = (anns - tgt), 0
elif method == 'terminal':
anns, tgt = sims, 1
# Stats (not computed by pd.describe)
stats = pd.DataFrame(index=anns.columns)
stats['median'] = anns.median()
stats['skew'] = anns.skew()
stats['kurtosis'] = anns.kurtosis()
stats['target'] = tgt
stats['prob'] = anns[anns > tgt].count() / anns.count()
return pd.concat([stats.T, anns.describe(percentiles=pc)], axis=0)
def port_stats(self, w=None, mu=None, vol=None, cor=None, **kwargs):
""" Portfolio Risk & Return Stats including MCR
NB/ This function ought to be elsewhere in the package; it may therefore
get replicated and then removed in the fullness of time.
INPUT:
w - wgts df with assets in index & ports as cols
mu - pd.Series of expected returns
vol - pd.Series of volatilities
cor - np.array or pd.Dataframe correlation matrix
OUTPUT: dictionary with keys risk, rtn, mcr, tcr & pcr
REFERENCES:
[1] http://webuser.bus.umich.edu/ppasquar/shortpaper6.pdf
Author: David (whilst loitering in the Scottish sunshine)
"""
## INPUTS - from self if None provided
if w is None: w = self.wgts
if mu is None: mu = self.mu
if vol is None: vol = self.vol
if cor is None: cor = self.cor
## CALCULATIONS
rtn = w.multiply(mu, axis=0).sum() # expected return
# convert w to dataframe if series passed
# this is a problem with the change in dimensions
if isinstance(w, pd.Series):
w = w.to_frame()
wa = np.array(w) # wgts to arrays for matrix algebra
vcv = np.diag(vol) @ cor @ np.diag(vol) # covariance matrix
v = np.sqrt(np.diag(wa.T @ vcv @ wa)) # portfolio volatility
# Marginal Contribution to Risk
# where MCR = (w.T * VCV) / vol
mcr = np.transpose((wa.T @ vcv) / v.reshape((w.shape[1],1)))
mcr.columns, mcr.index = w.columns, mu.index
tcr = mcr * wa # total contibution to risk
pcr = tcr / v # percentage TCR (sums to 100%)
# convert vol pack to pandas series
v = pd.Series(data=v, index=[rtn.index])
# Ingest to class
self.port_rtn = rtn
self.port_vol = v
self.mcr = mcr
self.tcr = tcr
self.pcr = pcr
return dict(risk=v, rtn=rtn, mcr=mcr, tcr=tcr, pcr=pcr)
def empirical_frontier(self, wgts=None, tgts=None, alpha=True, **kwargs):
""" Runs Stochastic Modelling on whole Frontier
Frontier here refers to any set of portfolio weights - original use case
was to run analysis on each port on an MVO efficient frontier
"""
## INPUTS
# pull wgts dataframe from self if None provided
wgts = self.wgts if wgts is None else wgts
# Return Targets can be provided, pulled from object or zeros
if tgts is None:
# if None provided grab tgts from self
tgts = self.tgts
elif tgts == 0:
# array of zeros otherwise
tgts = np.zeros(wgts.shape[1])
# Alpha
# Remember that alpha & te are set ONLY via kwargs in emperical bootstrap
# For frontier if alpha is false create 2x series of zeros
if alpha:
alpha, te = self.alpha, self.te
else:
alpha = pd.Series(name='alpha', index=wgts.index,
data=np.zeros(wgts.shape[0]))
te = pd.Series(name='te', index=wgts.index,
data=np.zeros(wgts.shape[0]))
# Output storage dictionary
# keys as names of ports being tested, values will be dicts themselves
data = dict.fromkeys(wgts.columns)
# port_stats() works on whole frontier anyway to do before iteration
portstats = self.port_stats(w=wgts) # NB/ not part of MC sim
## iterate across frontier (columns of wgts df)
for i, port in enumerate(wgts):
# Pull inputs from self - for the bootstrap
# Not technically required; useful to store so we know exactly
# which inputs went into a given model
# also append some portstats stuff (MCR, TCR, PCR) which is useful
# although not used at all in the stochastic modelling
df = pd.concat([wgts[port], self.mu, self.vol, alpha, te,
|
pd.Series(portstats['mcr'][port], name='mcr')
|
pandas.Series
|
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
from unittest import TestCase
import numpy as np
import pandas as pd
import pandas.testing as pdt
from tests.fixtures import DataTestCase
from tsfresh import extract_features, extract_relevant_features, select_features
from tsfresh.feature_extraction.settings import ComprehensiveFCParameters
from tsfresh.utilities.dataframe_functions import impute
class RelevantFeatureExtractionDataTestCase(DataTestCase):
"""
Test case for the relevant_feature_extraction function
"""
def test_functional_equality(self):
"""
`extract_relevant_features` should be equivalent to running first `extract_features` with impute and
`select_features` afterwards.
Meaning it should produce the same relevant features and the values of these features should be identical.
:return:
"""
df, y = self.create_test_data_sample_with_target()
relevant_features = extract_relevant_features(
df,
y,
column_id="id",
column_value="val",
column_kind="kind",
column_sort="sort",
)
extracted_features = extract_features(
df,
column_id="id",
column_value="val",
column_kind="kind",
column_sort="sort",
impute_function=impute,
)
selected_features = select_features(extracted_features, y)
self.assertEqual(
set(relevant_features.columns),
set(selected_features.columns),
"Should select the same columns:\n\t{}\n\nvs.\n\n\t{}".format(
relevant_features.columns, selected_features.columns
),
)
relevant_columns = relevant_features.columns
relevant_index = relevant_features.index
self.assertTrue(
relevant_features.equals(
selected_features.loc[relevant_index][relevant_columns]
),
"Should calculate the same feature values",
)
class RelevantFeatureExtractionTestCase(TestCase):
def setUp(self):
np.random.seed(42)
y = pd.Series(np.random.binomial(1, 0.5, 20), index=range(20))
df = pd.DataFrame(index=range(100))
df["a"] = np.random.normal(0, 1, 100)
df["b"] = np.random.normal(0, 1, 100)
df["id"] = np.repeat(range(20), 5)
X = pd.DataFrame(index=range(20))
X["f1"] = np.random.normal(0, 1, 20)
X["f2"] = np.random.normal(0, 1, 20)
self.df = df
self.X = X
self.y = y
def test_extracted_features_contain_X_features(self):
X = extract_relevant_features(self.df, self.y, self.X, column_id="id")
self.assertIn("f1", X.columns)
self.assertIn("f2", X.columns)
pdt.assert_series_equal(self.X["f1"], X["f1"])
pdt.assert_series_equal(self.X["f2"], X["f2"])
pdt.assert_index_equal(self.X["f1"].index, X["f1"].index)
pdt.assert_index_equal(self.X["f2"].index, X["f2"].index)
def test_extraction_null_as_column_name(self):
df1 = pd.DataFrame(
data={
0: range(10),
1: np.repeat([0, 1], 5),
2: np.repeat([0, 1, 2, 3, 4], 2),
}
)
X1 = extract_features(df1, column_id=1, column_sort=2)
self.assertEqual(len(X1), 2)
df2 = pd.DataFrame(
data={
1: range(10),
0: np.repeat([0, 1], 5),
2: np.repeat([0, 1, 2, 3, 4], 2),
}
)
X2 = extract_features(df2, column_id=0, column_sort=2)
self.assertEqual(len(X2), 2)
df3 = pd.DataFrame(
data={
0: range(10),
2: np.repeat([0, 1], 5),
1: np.repeat([0, 1, 2, 3, 4], 2),
}
)
X3 = extract_features(df3, column_id=2, column_sort=1)
self.assertEqual(len(X3), 2)
def test_raises_mismatch_index_df_and_y_df_more(self):
y = pd.Series(range(3), index=[1, 2, 3])
df_dict = {
"a": pd.DataFrame({"val": [1, 2, 3, 4, 10, 11], "id": [1, 1, 1, 1, 2, 2]}),
"b": pd.DataFrame({"val": [5, 6, 7, 8, 12, 13], "id": [4, 4, 3, 3, 2, 2]}),
}
self.assertRaises(
ValueError,
extract_relevant_features,
df_dict,
y,
None,
None,
None,
"id",
None,
"val",
)
def test_raises_mismatch_index_df_and_y_y_more(self):
y = pd.Series(range(4), index=[1, 2, 3, 4])
df = pd.DataFrame({"val": [1, 2, 3, 4, 10, 11], "id": [1, 1, 1, 1, 2, 2]})
self.assertRaises(
ValueError,
extract_relevant_features,
df,
y,
None,
None,
None,
"id",
None,
"val",
)
def test_raises_y_not_series(self):
y = np.arange(10)
df_dict = {
"a": pd.DataFrame({"val": [1, 2, 3, 4, 10, 11], "id": [1, 1, 1, 1, 2, 2]}),
"b":
|
pd.DataFrame({"val": [5, 6, 7, 8, 12, 13], "id": [4, 4, 3, 3, 2, 2]})
|
pandas.DataFrame
|
import os
import pandas as pd #for data analysis
import matplotlib.pyplot as plt
import cv2
import numpy as np
import math
import pydicom as pydicom
import tensorflow as tf
import tensorflow_addons as tfa
import sklearn
from sklearn.model_selection import train_test_split
import tensorflow.keras.backend as K
import matplotlib.pyplot as plt
from tqdm import tqdm
import argparse
import gdcm
import random
import scipy.ndimage
import collections
import imblearn
import numpy
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras import utils as np_utils
from keras.utils.np_utils import to_categorical
from random import seed
from random import random
from random import randint
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from tensorflow import keras
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.models import load_model
from tensorflow.keras import preprocessing
from tensorflow.keras import models
## ---------- Data set up functions, balancing data, and spliting dataset ------------ ######
#this function extracts the pixel_data and resizes it to a user defined size defult is 50, and the associated vector for classification is generated
#NOTE: Slight changes are made to all of the code to make futher improvements using better libraries
def getImagesAndLabels(imageArray, labelArray, img_px_size=50, visualize=False):
np.random.seed = 1
images = []
labels = []
uids = []
idx = 0
print("getting images and labels")
for file, mortality in tqdm(zip(imageArray.iteritems(), labelArray.iteritems()),total = len(imageArray)):
uid = file[1]
label=mortality[1]
path = uid
image = pydicom.read_file(path)
if image.Modality != "SR":
if "PixelData" in image:
idx += 1
resized_image = cv2.resize(np.array(image.pixel_array),(img_px_size,img_px_size))
value = randint(1, 10)
factor = random()
if value == 3:
fig, ax = plt.subplots(1,2)
ax[0].imshow(resized_image)
resized_image = np.fliplr(resized_image)
ax[1].imshow(resized_image)
#NOTE: within a docker container you will not be able to see these visualization
#uncomment this line if you would like to see what the image looks like when flipped accross the y axis
# plt.show()
#this set of code is commented out as visuilization is not possible within a docker container but if you run this seperatly or within jupyter you are able to visulize every 15th image, change 15 to what ever number you like if you want to visulize more or less
if visualize:
#every 15th image is "visualized" changing the 15 will allow you view every xth image
if idx%15==0:
fig = plt.figure()
plt.imshow(resized_image)
# plt.show()
images.append(resized_image)
labels.append(label)
uids.append(uid)
print("total subjects avilable: ", idx)
print("lenth of images", len(images))
return images, labels, uids
#this function will balance data however compared to the TrainModel-Container, after gaining futher understanding, test data is not blanced to mimic real world work. Credit for help understanding this Sotiras, A. Assistant Professor of Radiology @WASHU
#as the dataset was imbalanced, balancing tehniques were applied, in this case the number of dicom for each class is counted and then balanced according to user's preference, it can either be undersampeled or over sampeled
def balanceData(imageArray, labelArray, underSample = False,):
# print(imageArray, labelArray)
concatinatedArrray = pd.concat([imageArray, labelArray], axis=1)
count_class_0, count_class_1 = concatinatedArrray.mortality.value_counts()
df_class_0 = concatinatedArrray[concatinatedArrray['mortality'] == 0]
df_class_1 = concatinatedArrray[concatinatedArrray['mortality'] == 1]
print("alive", len(df_class_0), "dead", len(df_class_1))
# print("before balancing")
concatinatedArrray.mortality.value_counts().plot(kind='bar', title='before balancing');
#undersampleling of data is done if user cooses to under sample
if underSample:
df_class_0_under = df_class_0.sample(count_class_1)
df_test_under =
|
pd.concat([df_class_0_under, df_class_1], axis=0)
|
pandas.concat
|
import numpy as np
import pandas as pd
from collections import OrderedDict
from .utils import is_list, to_list, is_fitted
class Attributes:
"""
The Attributes class handles checking and setting the attributes
for the InterpretToolkit, GlobalInterpret, and LocalInterpret classes.
Attributes is a base class to be inherited by those classes and
should never be instantiated
"""
def set_estimator_attribute(self, estimator_objs, estimator_names):
"""
Checks the type of the estimators and estimator_names attributes.
If a list or not a dict, then the estimator argument
is converted to a dict for processing.
Parameters
----------
estimators : object, list of objects
A fitted estimator object or list thereof implementing `predict` or
`predict_proba`.
Multioutput-multiclass classifiers are not supported.
estimator_names : string, list
Names of the estimators (for internal and plotting purposes)
"""
estimator_is_none = estimator_objs == None
# Convert the estimator_objs to a list, if it is not already.
if not is_list(estimator_objs):
estimator_objs = to_list(estimator_objs)
# Convert the name of the estimator_objs to a list,
# if is not already.
if not is_list(estimator_names):
estimator_names = to_list(estimator_names)
# Check that the estimator_objs and estimator_names are the same size.
if not estimator_is_none:
assert len(estimator_objs) == len(
estimator_names
), "Number of estimator objects is not equal to the number of estimator names given!"
# Check that the estimator objects have been fit!
if not estimator_is_none:
if not all([is_fitted(m) for m in estimator_objs]):
raise ValueError(
"One or more of the estimators given has NOT been fit!"
)
# Create a dictionary from the estimator_objs and estimator_names.
# Then set the attributes.
self.estimators = OrderedDict(
[(name, obj) for name, obj in zip(estimator_names, estimator_objs)]
)
self.estimator_names = estimator_names
def set_y_attribute(self, y):
"""
Checks the type of the y attribute.
"""
# check that y are assigned correctly
if type(y) == type(None):
raise ValueError("y is required!")
if is_list(y):
self.y = np.array(y)
elif isinstance(y, np.ndarray):
self.y = y
elif isinstance(y, (pd.DataFrame, pd.Series)):
self.y = y.values
else:
if y is not None:
raise TypeError("y must be an numpy array or pandas.DataFrame.")
else:
self.y = None
def set_X_attribute(self, X, feature_names=None):
"""
Check the type of the X attribute.
"""
# make sure data is the form of a pandas dataframe regardless of input type
if type(X) == type(None):
raise ValueError("X are required!")
if isinstance(X, np.ndarray):
if feature_names is None:
raise Exception("Feature names must be specified if using NumPy array.")
else:
self.X =
|
pd.DataFrame(data=X, columns=feature_names)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import os
import datetime
def process_diagnostics(save=0):
df_ms1 = pd.read_csv('..\\data\\raw\\transfer_2018-03-08\\diagnostic\\2018-03-02 - MS1 - Database Merge.csv')
df_ms2 =
|
pd.read_csv('..\\data\\raw\\transfer_2018-03-08\\diagnostic\\2018-03-05 - MS2 - Database Merge.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
"""
import os
from datetime import datetime
from oemof.tabular.datapackage import building
import pandas as pd
def eGo_offshore_wind_profiles(
buses,
weather_year,
scenario_year,
datapackage_dir,
raw_data_path,
correction_factor=0.8,
):
"""
Parameter
---------
buses: array like
List with buses represented by iso country code
weather_year: integer or string
Year to select from raw data source
scenario_year: integer or string
Year to use for timeindex in tabular resource
datapackage_dir: string
Directory for tabular resource
raw_data_path: string
"""
filepath = building.download_data(
"https://github.com/znes/FlEnS/archive/master.zip",
unzip_file="FlEnS-master/open_eGo/NEP_2035/nep_2035_seq.csv",
directory=raw_data_path,
)
wind = pd.read_csv(
filepath, parse_dates=True, index_col=0, header=[0, 1, 2, 3, 4]
)
wind.columns = wind.columns.droplevel([0, 2, 3, 4])
wind.reset_index(inplace=True)
sequences_df = pd.DataFrame()
# use vernetzen data
filepath_2050 = building.download_data(
"https://github.com/znes/FlEnS/archive/master.zip",
unzip_file="FlEnS-master/Socio-ecologic/2050_seq.csv",
directory=raw_data_path,
)
wind_2050 = pd.read_csv(
filepath_2050, parse_dates=True, index_col=0, header=[0, 1, 2, 3, 4]
)
wind_2050.columns = wind_2050.columns.droplevel([0, 2, 3, 4])
wind_2050["DE_wind_offshore"] = (
wind_2050["DEdr19_wind_offshore"] * 0.2
+ wind_2050["DEdr20_wind_offshore"] * 0.4
+ wind_2050["DEdr21_wind_offshore"] * 0.4
)
wind_2050.reset_index(inplace=True)
wind_2050["DE_wind_onshore"] = wind["DE_wind_onshore"]
wind = wind_2050
for c in buses:
if c + "_wind_offshore" in wind.columns:
sequences_df[c + "-offshore-profile"] = (
wind[c + "_wind_offshore"] * correction_factor
) # correction factor
sequences_df.index = building.timeindex(year=str(scenario_year))
building.write_sequences(
"volatile_profile.csv",
sequences_df,
directory=os.path.join(datapackage_dir, "data", "sequences"),
)
def ninja_pv_profiles(
buses, weather_year, scenario_year, datapackage_dir, raw_data_path
):
"""
Parameter
---------
buses: array like
List with buses represented by iso country code
weather_year: integer or string
Year to select from raw data source
scenario_year: integer or string
Year to use for timeindex in tabular resource
datapackage_dir: string
Directory for tabular resource
raw_data_path: string
Path where raw data file `ninja_pv_europe_v1.1_merra2.csv`
is located
"""
filepath = building.download_data(
"https://www.renewables.ninja/static/downloads/ninja_europe_pv_v1.1.zip",
unzip_file="ninja_pv_europe_v1.1_merra2.csv",
directory=raw_data_path,
)
year = str(weather_year)
countries = buses
raw_data =
|
pd.read_csv(filepath, index_col=[0], parse_dates=True)
|
pandas.read_csv
|
import datetime as dt
import pandas as pd
from bs4 import BeautifulSoup
import re
import requests
import time
today = dt.date.today()
zenhan = str.maketrans("1234567890","1234567890","")
token = "***<PASSWORD>***"
auth = {"Authorization": token}
query = "unit_id:133089874031904245 全裸 OR 下半身露出 "
limit = "50"
url = "https://api.nordot.jp/v1.0/search/contentsholder/posts.list"
csvDir = "(Directory)"
cur = ""
# 月初からの検索
today = today.replace(day=1)
today_iso = dt.datetime.combine(today, dt.time(0,0,0)).isoformat()+"+09:00"
created_at_d1 = "created_at:>=" + today_iso
query += created_at_d1
parameter = {"query": query, "limit": limit}
def apiGet(page, cursor=""):
para = parameter
if (page >= 2) and (len(cursor) != 0):
para["cursor"] = cursor
r = requests.get(url, params = para, headers = auth)
j = r.json()
if j["paging"]["has_next"] == 1:
c = j["paging"]["next_cursor"]
else:
c = ""
return j, c
def parse(req):
posts = []
for post in req["posts"]:
entry = {}
aid = post["id"]
title = post["title"].translate(zenhan)
desc = post["description"].translate(zenhan)
pub_at = dt.datetime.strptime(post["published_at"], "%Y-%m-%dT%H:%M:%S+00:00")
pub_at += dt.timedelta(hours=9) #Japan
if len(desc) == 0:
r = requests.get(url="https://nordot.app/"+str(aid))
s = BeautifulSoup(r.text, "html.parser")
desc = s.find(class_="ma__p").text.translate(zenhan)
time.sleep(1)
# 露出時刻
if "ごろ" not in desc:
naked = "AM 12:00"
elif "分ごろ" not in desc:
naked = re.sub(r".+(午[前後]1?[0-9])時ごろ.+", r"\1:00", desc)
else:
naked = re.sub(r".+(午[前後]1?[0-9])時([0-9]{1,2})分ごろ.+", r"\1:\2", desc)
naked = naked.replace("午前", "AM ")
naked = naked.replace("午後", "PM ")
naked = re.sub(r"M 0:", "M 12:", naked)
ntime = dt.datetime.strptime(naked, "%p %I:%M")
#露出都道府県
place = re.match(r"^((.{1,3}?))", title).group(1)
#露出日
tsuki = re.match(r".+、([0-9]{1,2})月.+", desc)
nichi = re.match(r".+((?<![0-9])([0-9])(?![0-9])|[12][0-9]|3[01])日.+", desc)
if tsuki != None:
ndate = dt.date(pub_at.year, int(tsuki.group(1)), int(nichi.group(1)))
else:
ndate = dt.date(pub_at.year, pub_at.month, int(nichi.group(1)))
if dt.date.today() - ndate < dt.timedelta(0):
ndate = ndate.replace(year=pub_at.year - 1)
#全裸かどうか
if "全裸" in title:
zenra = "全裸"
else:
zenra = "下半身露出"
#事案の格納
entry["article_id"] = aid
entry["naked_at"] = dt.datetime(
ndate.year, ndate.month, ndate.day,
ntime.hour, ntime.minute, ntime.second
)
entry["created_at"] = pub_at
entry["zenra"] = zenra
entry["place"] = place
posts.append(entry)
return posts
def json2DF(jsonInput):
df =
|
pd.json_normalize(jsonInput)
|
pandas.json_normalize
|
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["cos2"] = (loading_outlier_scale_df["PC1"] ** 2) + (
loading_outlier_scale_df["PC2"] ** 2)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_df.iloc[:, 2], columns=['cos2'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='cos2')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar['cos2'] = (loading_scale_df_covar["PC1"] ** 2) + (loading_scale_df_covar["PC2"] ** 2)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["cos2"] = (loading_outlier_scale_df_covar["PC1"] ** 2) + (
loading_outlier_scale_df_covar["PC2"] ** 2)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_df_covar.iloc[:, 2],
columns=['cos2'])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar,
line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='cos2')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
textposition='bottom right', textfont=dict(size=12)
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers',
hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)), mirror=True,
ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)), mirror=True,
ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# # x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["cos2"] = (loading_scale_input_df["PC1"] ** 2) + (loading_scale_input_df["PC2"] ** 2)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_df.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# # x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["cos2"] = (loading_scale_input_outlier_df["PC1"] ** 2) + \
(loading_scale_input_outlier_df["PC2"] ** 2)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_df.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='cos2')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["cos2"] = (loading_scale_input_df_covar["PC1"] ** 2) + (
loading_scale_input_df_covar["PC2"] ** 2)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["cos2"] = (loading_scale_input_outlier_df_covar["PC1"] ** 2) + \
(loading_scale_input_outlier_df_covar["PC2"] ** 2)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_df_covar.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='cos2')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
variance = Var_scale_input_outlier_covar
data = loading_scale_input_outlier_line_graph_sort_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('contrib-plot', 'figure'),
[
Input('outlier-value-contrib', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-contrib", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df["PC1_cos2"] = loading_scale_df["PC1"] ** 2
loading_scale_df["PC2_cos2"] = loading_scale_df["PC2"] ** 2
loading_scale_df["PC1_contrib"] = \
(loading_scale_df["PC1_cos2"] * 100) / (loading_scale_df["PC1_cos2"].sum(axis=0))
loading_scale_df["PC2_contrib"] = \
(loading_scale_df["PC2_cos2"] * 100) / (loading_scale_df["PC2_cos2"].sum(axis=0))
loading_scale_df["contrib"] = loading_scale_df["PC1_contrib"] + loading_scale_df["PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_scale_dataf = pd.concat([loading_scale_df.iloc[:, 0:2], loading_scale_df.iloc[:, 6]], axis=1)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_dataf, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["PC1_cos2"] = loading_outlier_scale_df["PC1"] ** 2
loading_outlier_scale_df["PC2_cos2"] = loading_outlier_scale_df["PC2"] ** 2
loading_outlier_scale_df["PC1_contrib"] = \
(loading_outlier_scale_df["PC1_cos2"] * 100) / (loading_outlier_scale_df["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df["PC2_contrib"] = \
(loading_outlier_scale_df["PC2_cos2"] * 100) / (loading_outlier_scale_df["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df["contrib"] = loading_outlier_scale_df["PC1_contrib"] + loading_outlier_scale_df[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf = pd.concat(
[loading_outlier_scale_df.iloc[:, 0:2], loading_outlier_scale_df.iloc[:, 6]], axis=1)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_dataf, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='contrib')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar["PC1_cos2"] = loading_scale_df_covar["PC1"] ** 2
loading_scale_df_covar["PC2_cos2"] = loading_scale_df_covar["PC2"] ** 2
loading_scale_df_covar["PC1_contrib"] = \
(loading_scale_df_covar["PC1_cos2"] * 100) / (loading_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_df_covar["PC2_contrib"] = \
(loading_scale_df_covar["PC2_cos2"] * 100) / (loading_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_df_covar["contrib"] = loading_scale_df_covar["PC1_contrib"] + loading_scale_df_covar[
"PC2_contrib"]
loading_scale_dataf_covar = pd.concat([loading_scale_df_covar.iloc[:, 0:2], loading_scale_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_dataf_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_dataf_covar.iloc[:, 2], columns=['contrib'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX OUTLIERS REMOVED
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["PC1_cos2"] = loading_outlier_scale_df_covar["PC1"] ** 2
loading_outlier_scale_df_covar["PC2_cos2"] = loading_outlier_scale_df_covar["PC2"] ** 2
loading_outlier_scale_df_covar["PC1_contrib"] = \
(loading_outlier_scale_df_covar["PC1_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["PC2_contrib"] = \
(loading_outlier_scale_df_covar["PC2_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["contrib"] = loading_outlier_scale_df_covar["PC1_contrib"] + \
loading_outlier_scale_df_covar[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf_covar = pd.concat(
[loading_outlier_scale_df_covar.iloc[:, 0:2], loading_outlier_scale_df_covar.iloc[:, 6]], axis=1)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_dataf_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_outlier_scale_dff_covar = pd.concat(
[zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='contrib')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0),
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["PC1_cos2"] = loading_scale_input_df["PC1"] ** 2
loading_scale_input_df["PC2_cos2"] = loading_scale_input_df["PC2"] ** 2
loading_scale_input_df["PC1_contrib"] = \
(loading_scale_input_df["PC1_cos2"] * 100) / (loading_scale_input_df["PC1_cos2"].sum(axis=0))
loading_scale_input_df["PC2_contrib"] = \
(loading_scale_input_df["PC2_cos2"] * 100) / (loading_scale_input_df["PC2_cos2"].sum(axis=0))
loading_scale_input_df["contrib"] = loading_scale_input_df["PC1_contrib"] + loading_scale_input_df[
"PC2_contrib"]
loading_scale_input_dataf = pd.concat(
[loading_scale_input_df.iloc[:, 0:2], loading_scale_input_df.iloc[:, 6]], axis=1)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_dataf, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["PC1_cos2"] = loading_scale_input_outlier_df["PC1"] ** 2
loading_scale_input_outlier_df["PC2_cos2"] = loading_scale_input_outlier_df["PC2"] ** 2
loading_scale_input_outlier_df["PC1_contrib"] = \
(loading_scale_input_outlier_df["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df["PC2_contrib"] = \
(loading_scale_input_outlier_df["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df["contrib"] = loading_scale_input_outlier_df["PC1_contrib"] + \
loading_scale_input_outlier_df[
"PC2_contrib"]
loading_scale_input_outlier_dataf = pd.concat(
[loading_scale_input_outlier_df.iloc[:, 0:2], loading_scale_input_outlier_df.iloc[:, 6]], axis=1)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat(
[loading_scale_input_outlier_dataf, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_dataf.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='contrib')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["PC1_cos2"] = loading_scale_input_df_covar["PC1"] ** 2
loading_scale_input_df_covar["PC2_cos2"] = loading_scale_input_df_covar["PC2"] ** 2
loading_scale_input_df_covar["PC1_contrib"] = \
(loading_scale_input_df_covar["PC1_cos2"] * 100) / (loading_scale_input_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_df_covar["PC2_contrib"] = \
(loading_scale_input_df_covar["PC2_cos2"] * 100) / (loading_scale_input_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_df_covar["contrib"] = loading_scale_input_df_covar["PC1_contrib"] + \
loading_scale_input_df_covar[
"PC2_contrib"]
loading_scale_input_dataf_covar = pd.concat(
[loading_scale_input_df_covar.iloc[:, 0:2], loading_scale_input_df_covar.iloc[:, 6]], axis=1)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_dataf_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["PC1_cos2"] = loading_scale_input_outlier_df_covar["PC1"] ** 2
loading_scale_input_outlier_df_covar["PC2_cos2"] = loading_scale_input_outlier_df_covar["PC2"] ** 2
loading_scale_input_outlier_df_covar["PC1_contrib"] = \
(loading_scale_input_outlier_df_covar["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["PC2_contrib"] = \
(loading_scale_input_outlier_df_covar["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["contrib"] = loading_scale_input_outlier_df_covar["PC1_contrib"] + \
loading_scale_input_outlier_df_covar[
"PC2_contrib"]
loading_scale_input_outlier_dataf_covar = pd.concat(
[loading_scale_input_outlier_df_covar.iloc[:, 0:2], loading_scale_input_outlier_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat(
[loading_scale_input_outlier_dataf_covar, line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff_covar = pd.concat(
[zero_scale_input_outlier_df_covar, zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='contrib')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_sort_covar
variance = Var_scale_input_outlier_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0)
))
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('download-link', 'download'),
[Input('all-custom-choice', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value')])
def update_filename(all_custom, outlier, matrix_type):
if all_custom == 'All' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_data.csv'
return download
@app.callback(Output('download-link', 'href'),
[Input('all-custom-choice', 'value'),
Input('feature-input', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')])
def update_link(all_custom, input, outlier, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
# COVARIANCE MATRIX REMOVING OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
# COVARIANCE MATRIX OUTLIERS REMOVED
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
csv_string = dat.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return csv_string
@app.callback(Output('download-link-correlation', 'download'),
[Input('eigenA-outlier', 'value'),
])
def update_filename(outlier):
if outlier == 'Yes':
download = 'feature_correlation_removed_outliers_data.csv'
elif outlier == 'No':
download = 'feature_correlation_data.csv'
return download
@app.callback([Output('data-table-correlation', 'data'),
Output('data-table-correlation', 'columns'),
Output('download-link-correlation', 'href')],
[Input("eigenA-outlier", 'value'),
Input('csv-data', 'data')], )
def update_output(outlier, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff_table = correlation_dff * correlation_dff
r2_dff_table.insert(0, 'Features', features)
data_frame = r2_dff_table
if outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier_table = correlation_dff_outlier * correlation_dff_outlier
r2_dff_outlier_table.insert(0, 'Features', features_outlier)
data_frame = r2_dff_outlier_table
data = data_frame.to_dict('records')
columns = [{"name": i, "id": i, "deletable": True, "selectable": True, 'type': 'numeric',
'format': Format(precision=3, scheme=Scheme.fixed)} for i in data_frame.columns]
csv_string = data_frame.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return data, columns, csv_string
@app.callback(Output('download-link-eigenA', 'download'),
[Input("matrix-type-data-table", 'value'),
Input('eigenA-outlier', 'value')])
def update_filename(matrix_type, outlier):
if outlier == 'Yes' and matrix_type == "Correlation":
download = 'Eigen_Analysis_correlation_matrix_removed_outliers_data.csv'
elif outlier == 'Yes' and matrix_type == "Covariance":
download = 'Eigen_Analysis_covariance_matrix_removed_outliers_data.csv'
elif outlier == 'No' and matrix_type == "Correlation":
download = 'Eigen_Analysis_correlation_matrix_data.csv'
elif outlier == "No" and matrix_type == "Covariance":
download = 'Eigen_Analysis_covariance_matrix_data.csv'
return download
@app.callback([Output('data-table-eigenA', 'data'),
Output('data-table-eigenA', 'columns'),
Output('download-link-eigenA', 'href')],
[Input('all-custom-choice', 'value'),
Input("eigenA-outlier", 'value'),
Input('feature-input', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')], )
def update_output(all_custom, outlier, input, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))],
columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
Var_dfff = pd.concat([(Var_cumsum * 100)], axis=1)
Eigen_Analysis = pd.concat([PC_df.T, Eigen_df.T, Var_df.T, Var_dfff.T], axis=0)
Eigen_Analysis = Eigen_Analysis.rename(columns=Eigen_Analysis.iloc[0])
Eigen_Analysis = Eigen_Analysis.drop(Eigen_Analysis.index[0])
Eigen_Analysis.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
Var_dfff_outlier = pd.concat([Var_cumsum_outlier * 100], axis=1)
Eigen_Analysis_Outlier = pd.concat(
[PC_df_outlier.T, Eigen_df_outlier.T, Var_df_outlier.T, Var_dfff_outlier.T],
axis=0)
Eigen_Analysis_Outlier = Eigen_Analysis_Outlier.rename(columns=Eigen_Analysis_Outlier.iloc[0])
Eigen_Analysis_Outlier = Eigen_Analysis_Outlier.drop(Eigen_Analysis_Outlier.index[0])
Eigen_Analysis_Outlier.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_Outlier
elif outlier == "No" and matrix_type == "Covariance":
features1 = dff.columns
features = list(features1)
x_covar = dff.loc[:, features].values
pca_covar = PCA(n_components=len(features))
principalComponents_covar = pca_covar.fit_transform(x_covar)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
dfff_covar = finalDf_covar
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
Var_dfff_covar = pd.concat([(Var_cumsum_covar * 100)], axis=1)
Eigen_Analysis_covar = pd.concat([PC_df_covar.T, Eigen_df_covar.T, Var_df_covar.T, Var_dfff_covar.T],
axis=0)
Eigen_Analysis_covar = Eigen_Analysis_covar.rename(columns=Eigen_Analysis_covar.iloc[0])
Eigen_Analysis_covar = Eigen_Analysis_covar.drop(Eigen_Analysis_covar.index[0])
Eigen_Analysis_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier_covar = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier_covar = outlier_dff.loc[:, ].values
pca_outlier_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier_covar)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
,
columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
dfff_outlier_covar = finalDf_outlier_covar
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier_covar = np.interp(70,
Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier_covar = math.ceil(PC_interp_outlier_covar)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
Var_dfff_outlier_covar = pd.concat([Var_cumsum_outlier_covar * 100], axis=1)
Eigen_Analysis_Outlier_covar = pd.concat(
[PC_df_outlier_covar.T, Eigen_df_outlier_covar.T, Var_df_outlier_covar.T, Var_dfff_outlier_covar.T],
axis=0)
Eigen_Analysis_Outlier_covar = Eigen_Analysis_Outlier_covar.rename(
columns=Eigen_Analysis_Outlier_covar.iloc[0])
Eigen_Analysis_Outlier_covar = Eigen_Analysis_Outlier_covar.drop(Eigen_Analysis_Outlier_covar.index[0])
Eigen_Analysis_Outlier_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_Outlier_covar
elif all_custom == "Custom":
if outlier == 'No' and matrix_type == "Correlation":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# INPUT DATA WITH OUTLIERS
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
eigenvalues_scale_input = pca_scale_input.explained_variance_
Eigen_df_scale_input = pd.DataFrame(data=eigenvalues_scale_input, columns=["Eigenvaues"])
PC_df_scale_input = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_input))],
columns=['Principal Component'])
Var_df_scale_input = pd.DataFrame(data=Var_scale_input,
columns=['Cumulative Proportion of Explained Ratio'])
Var_cumsum_scale_input = Var_df_scale_input.cumsum()
Var_dfff_scale_input = pd.concat([Var_cumsum_scale_input * 100], axis=1)
Eigen_Analysis_scale_input = pd.concat([PC_df_scale_input.T, Eigen_df_scale_input.T,
Var_df_scale_input.T, Var_dfff_scale_input.T], axis=0)
Eigen_Analysis_scale_input = Eigen_Analysis_scale_input.rename(columns=Eigen_Analysis_scale_input.iloc[0])
Eigen_Analysis_scale_input = Eigen_Analysis_scale_input.drop(Eigen_Analysis_scale_input.index[0])
Eigen_Analysis_scale_input.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input
elif outlier == "Yes" and matrix_type == "Correlation":
dff_input = dff.drop(columns=dff[input])
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
dff_target = dff[input]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# INPUT DATA WITH REMOVING OUTLIERS
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
eigenvalues_scale_input_outlier = pca_scale_input_outlier.explained_variance_
Eigen_df_scale_input_outlier = pd.DataFrame(data=eigenvalues_scale_input_outlier, columns=["Eigenvaues"])
PC_df_scale_input_outlier = pd.DataFrame(
data=['PC' + str(i + 1) for i in range(len(features_input_outlier))],
columns=['Principal Component'])
Var_df_scale_input_outlier = pd.DataFrame(data=Var_scale_input_outlier,
columns=['Cumulative Proportion of Explained '
'Ratio'])
Var_cumsum_scale_input_outlier = Var_df_scale_input_outlier.cumsum()
Var_dfff_scale_input_outlier = pd.concat([Var_cumsum_scale_input_outlier * 100], axis=1)
Eigen_Analysis_scale_input_outlier = pd.concat([PC_df_scale_input_outlier.T, Eigen_df_scale_input_outlier.T,
Var_df_scale_input_outlier.T,
Var_dfff_scale_input_outlier.T], axis=0)
Eigen_Analysis_scale_input_outlier = Eigen_Analysis_scale_input_outlier.rename(
columns=Eigen_Analysis_scale_input_outlier.iloc[0])
Eigen_Analysis_scale_input_outlier = Eigen_Analysis_scale_input_outlier.drop(
Eigen_Analysis_scale_input_outlier.index[0])
Eigen_Analysis_scale_input_outlier.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
x_scale_input_covar = dff_input.loc[:, features_input].values
# INPUT DATA WITH OUTLIERS
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target],
axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
eigenvalues_scale_input_covar = pca_scale_input_covar.explained_variance_
Eigen_df_scale_input_covar = pd.DataFrame(data=eigenvalues_scale_input_covar, columns=["Eigenvaues"])
PC_df_scale_input_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_input))],
columns=['Principal Component'])
Var_df_scale_input_covar = pd.DataFrame(data=Var_scale_input_covar,
columns=['Cumulative Proportion of Explained Ratio'])
Var_cumsum_scale_input_covar = Var_df_scale_input_covar.cumsum()
Var_dfff_scale_input_covar = pd.concat([Var_cumsum_scale_input_covar * 100], axis=1)
Eigen_Analysis_scale_input_covar = pd.concat([PC_df_scale_input_covar.T, Eigen_df_scale_input_covar.T,
Var_df_scale_input_covar.T, Var_dfff_scale_input_covar.T],
axis=0)
Eigen_Analysis_scale_input_covar = Eigen_Analysis_scale_input_covar.rename(
columns=Eigen_Analysis_scale_input_covar.iloc[0])
Eigen_Analysis_scale_input_covar = Eigen_Analysis_scale_input_covar.drop(
Eigen_Analysis_scale_input_covar.index[0])
Eigen_Analysis_scale_input_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dff_input = dff.drop(columns=dff[input])
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
dff_target = dff[input]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
# INPUT DATA WITH REMOVING OUTLIERS
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
eigenvalues_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_
Eigen_df_scale_input_outlier_covar = pd.DataFrame(data=eigenvalues_scale_input_outlier_covar,
columns=["Eigenvaues"])
PC_df_scale_input_outlier_covar = pd.DataFrame(
data=['PC' + str(i + 1) for i in range(len(features_input_outlier))],
columns=['Principal Component'])
Var_df_scale_input_outlier_covar = pd.DataFrame(data=Var_scale_input_outlier_covar,
columns=['Cumulative Proportion of Explained '
'Ratio'])
Var_cumsum_scale_input_outlier_covar = Var_df_scale_input_outlier_covar.cumsum()
Var_dfff_scale_input_outlier_covar = pd.concat([Var_cumsum_scale_input_outlier_covar * 100], axis=1)
Eigen_Analysis_scale_input_outlier_covar = pd.concat(
[PC_df_scale_input_outlier_covar.T, Eigen_df_scale_input_outlier_covar.T,
Var_df_scale_input_outlier_covar.T,
Var_dfff_scale_input_outlier_covar.T], axis=0)
Eigen_Analysis_scale_input_outlier_covar = Eigen_Analysis_scale_input_outlier_covar.rename(
columns=Eigen_Analysis_scale_input_outlier_covar.iloc[0])
Eigen_Analysis_scale_input_outlier_covar = Eigen_Analysis_scale_input_outlier_covar.drop(
Eigen_Analysis_scale_input_outlier_covar.index[0])
Eigen_Analysis_scale_input_outlier_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input_outlier_covar
data = data_frame_EigenA.to_dict('records')
columns = [{"name": i, "id": i, "deletable": True, "selectable": True, 'type': 'numeric',
'format': Format(precision=3, scheme=Scheme.fixed)} for i in data_frame_EigenA.columns]
csv_string = data_frame_EigenA.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return data, columns, csv_string
@app.callback(Output('download-link-loadings', 'download'),
[Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value')])
def update_filename(outlier, matrix_type):
if outlier == 'Yes' and matrix_type == "Correlation":
download = 'Loadings_correlation_matrix_removed_outliers_data.csv'
elif outlier == 'Yes' and matrix_type == "Covariance":
download = 'Loadings_covariance_matrix_removed_outliers_data.csv'
elif outlier == 'No' and matrix_type == "Correlation":
download = 'Loadings_correlation_matrix_data.csv'
elif outlier == 'No' and matrix_type == "Covariance":
download = 'Loadings_covariance_matrix_data.csv'
return download
@app.callback([Output('data-table-loadings', 'data'),
Output('data-table-loadings', 'columns'),
Output('download-link-loadings', 'href')],
[Input('all-custom-choice', 'value'),
Input("eigenA-outlier", 'value'),
Input('feature-input', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')], )
def update_output(all_custom, outlier, input, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale,
columns=["PC" + str(i + 1) for i in range(len(features))])
line_group_scale_df = pd.DataFrame(data=features, columns=['Features'])
loading_scale_dataf = pd.concat([line_group_scale_df, loading_scale_df], axis=1)
data_frame = loading_scale_dataf
elif outlier == 'Yes' and matrix_type == "Correlation":
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
,
columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale,
columns=["PC" + str(i + 1) for i in range(len(features_outlier))])
line_group_outlier_scale_df = pd.DataFrame(data=features_outlier, columns=['Features'])
loading_outlier_scale_dataf = pd.concat([line_group_outlier_scale_df, loading_outlier_scale_df], axis=1)
data_frame = loading_outlier_scale_dataf
elif outlier == "No" and matrix_type == "Covariance":
features1 = dff.columns
features = list(features1)
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar,
columns=["PC" + str(i + 1) for i in range(len(features))])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['Features'])
loading_scale_dataf_covar = pd.concat([line_group_scale_df_covar, loading_scale_df_covar], axis=1)
data_frame = loading_scale_dataf_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
# uses covariance matrix
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar,
columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar,
columns=["PC" + str(i + 1) for i in
range(len(features_outlier))])
line_group_outlier_scale_df_covar = pd.DataFrame(data=features_outlier, columns=['Features'])
loading_outlier_scale_dataf_covar = pd.concat(
[line_group_outlier_scale_df_covar, loading_outlier_scale_df_covar], axis=1)
data_frame = loading_outlier_scale_dataf_covar
if all_custom == 'Custom':
if outlier == 'No' and matrix_type == "Correlation":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input,
columns=["PC" + str(i + 1) for i in range(len(features_input))])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['Features'])
loading_scale_input_dataf = pd.concat([line_group_scale_input_df, loading_scale_input_df], axis=1)
data_frame = loading_scale_input_dataf
elif outlier == 'Yes' and matrix_type == "Correlation":
dff_input = dff.drop(columns=dff[input])
dff_target = dff[input]
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier,
columns=["PC" + str(i + 1)
for i in range(len(features_input_outlier))])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['Features'])
loading_scale_input_outlier_dataf = pd.concat([line_group_scale_input_outlier_df,
loading_scale_input_outlier_df], axis=1)
data_frame = loading_scale_input_outlier_dataf
elif outlier == "No" and matrix_type == "Covariance":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# INPUT DATA WITH OUTLIERS
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target],
axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar,
columns=["PC" + str(i + 1) for i in range(len(features_input))])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['Features'])
loading_scale_input_dataf_covar = pd.concat([line_group_scale_input_df_covar, loading_scale_input_df_covar],
axis=1)
data_frame = loading_scale_input_dataf_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dff_input = dff.drop(columns=dff[input])
dff_target = dff[input]
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar,
columns=["PC" + str(i + 1)
for i in range(len(features_input_outlier))])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['Features'])
loading_scale_input_outlier_dataf_covar = pd.concat([line_group_scale_input_outlier_df_covar,
loading_scale_input_outlier_df_covar], axis=1)
data_frame = loading_scale_input_outlier_dataf_covar
data = data_frame.to_dict('records')
columns = [{"name": i, "id": i, "deletable": True, "selectable": True, 'type': 'numeric',
'format': Format(precision=3, scheme=Scheme.fixed)} for i in data_frame.columns]
csv_string = data_frame.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return data, columns, csv_string
@app.callback(Output('download-link-cos2', 'download'),
[Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value')])
def update_filename(outlier, matrix_type):
if outlier == 'Yes' and matrix_type == "Correlation":
download = 'Cos2_correlation_matrix_removed_outliers_data.csv'
elif outlier == 'Yes' and matrix_type == "Covariance":
download = 'Cos2_covariance_matrix_removed_outliers_data.csv'
elif outlier == 'No' and matrix_type == "Correlation":
download = 'Cos2_correlation_matrix_data.csv'
elif outlier == "No" and matrix_type == "Covariance":
download = 'Cos2_covariance_matrix_data.csv'
return download
@app.callback([Output('data-table-cos2', 'data'),
Output('data-table-cos2', 'columns'),
Output('download-link-cos2', 'href'), ],
[Input('all-custom-choice', 'value'),
Input("eigenA-outlier", 'value'),
Input('feature-input', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')], )
def update_output(all_custom, outlier, input, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == "All":
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale,
columns=["PC" + str(i + 1) for i in range(len(features))])
for i in loading_scale_df.columns:
loading_scale_df[i] = (loading_scale_df[i] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['Features'])
loading_scale_dataf = pd.concat([line_group_scale_df, loading_scale_df], axis=1)
data_frame = loading_scale_dataf
elif outlier == 'Yes' and matrix_type == "Correlation":
# ORIGINAL DATA WITH REMOVING OUTLIERS
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
,
columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
finalDf_outlier_scale =
|
pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
|
pandas.concat
|
import json
import pickle
import glob
import numpy as np
import pandas as pd
from tabulate import tabulate
from datetime import datetime
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
def load_json(path):
"""Load json file.
Args:
path (str): file location
Returns:
data (dict)
"""
with open(path, 'r') as f:
data = json.load(f)
return data
def to_sec(ts):
"""Format time string to seconds.
Args:
ts (string): time string.
Returns:
time (float): second format
"""
try:
return datetime.strptime(ts, '%Y-%m-%d %H:%M:%S').timestamp()
except:
return datetime.strptime(ts, '%Y-%m-%d %H:%M:%S.%f').timestamp()
def three_random_split(DATA_DIR, multi_class=False):
"""Combine results from three random seed trainings.
Args:
DATA_DIR (str): data pickle file location.
multi_class (bool): if the regression has multi-class.
Returns:
train_true (np.array): training data true labels.
train_pred (np.array): training data predicted labels.
valid_true (np.array): validation data true labels.
valid_pred (np.array): validation data predicted labels.
test_true (np.array): testing data true labels.
test_pred (np.array): testing data predicted labels.
"""
y_true = []
y_pred = []
files = sorted(glob.glob(DATA_DIR + 'best_archs_result_0_*.pickle'))
for file in files:
with open(file, 'rb') as f:
_ = pickle.load(f)
for _ in range(3):
if multi_class:
y_true.append(pickle.load(f)[np.newaxis, ...])
y_pred.append(pickle.load(f).squeeze()[np.newaxis, ...])
else:
y_true.append(pickle.load(f).ravel())
y_pred.append(pickle.load(f).ravel().squeeze())
train_true = np.vstack([y_true[i] for i in [0, 3, 6]])
train_pred = np.vstack([y_pred[i] for i in [0, 3, 6]])
valid_true = np.vstack([y_true[i] for i in [1, 4, 7]])
valid_pred = np.vstack([y_pred[i] for i in [1, 4, 7]])
test_true = np.vstack([y_true[i] for i in [2, 5, 8]])
test_pred = np.vstack([y_pred[i] for i in [2, 5, 8]])
return train_true, train_pred, valid_true, valid_pred, test_true, test_pred
def three_random_mean_std(DATA_DIR, multi_class=False):
"""Calculate the mean and standard deviation of three random seed trainings.
Args:
DATA_DIR (str): data pickle file location.
multi_class (bool): if the regression has multi-class.
Returns:
m (float): mean value.
s (float): standard deviation value.
"""
output = three_random_split(DATA_DIR, multi_class=multi_class)
funcs = [mean_absolute_error, mean_squared_error, r2_score]
if not multi_class:
result = []
for func in funcs:
for i in range(3):
result.append([func(output[i * 2][j], output[i * 2 + 1][j]) for j in range(len(output[0]))])
result = np.array(result)
m = result.mean(axis=1)
s = result.std(axis=1)
print(tabulate(
[['Train', f'{m[0]:0.4f}+/-{s[0]:0.4f}', f'{m[3]:0.4f}+/-{s[3]:0.4f}', f'{m[6]:0.4f}+/-{s[6]:0.4f}'],
['Valid', f'{m[1]:0.4f}+/-{s[1]:0.4f}', f'{m[4]:0.4f}+/-{s[4]:0.4f}', f'{m[7]:0.4f}+/-{s[7]:0.4f}'],
['Test', f'{m[2]:0.4f}+/-{s[2]:0.4f}', f'{m[5]:0.4f}+/-{s[5]:0.4f}', f'{m[8]:0.4f}+/-{s[8]:0.4f}']],
headers=['', 'MAE', 'MSE', 'R2']))
else:
for c in range(output[0].shape[-1]):
result = []
for func in funcs:
for i in range(3):
result.append(
[func(output[i * 2][j, :, c], output[i * 2 + 1][j, :, c]) for j in range(len(output[0]))])
result = np.array(result)
m = result.mean(axis=1)
s = result.std(axis=1)
print(tabulate(
[['Train', f'{m[0]:0.4f}+/-{s[0]:0.4f}', f'{m[3]:0.4f}+/-{s[3]:0.4f}', f'{m[6]:0.4f}+/-{s[6]:0.4f}'],
['Valid', f'{m[1]:0.4f}+/-{s[1]:0.4f}', f'{m[4]:0.4f}+/-{s[4]:0.4f}', f'{m[7]:0.4f}+/-{s[7]:0.4f}'],
['Test', f'{m[2]:0.4f}+/-{s[2]:0.4f}', f'{m[5]:0.4f}+/-{s[5]:0.4f}', f'{m[8]:0.4f}+/-{s[8]:0.4f}']],
headers=['', 'MAE', 'MSE', 'R2']))
return m, s
def create_csv(DATA_DIR, data):
"""Create a csv file of the architecture components.
Args:
DATA_DIR (str): data file location.
data (dict): the dictionary file containing the operations for each architecture.
"""
# Task specific
state_dims = ['dim(4)', 'dim(8)', 'dim(16)', 'dim(32)']
Ts = ['repeat(1)', 'repeat(2)', 'repeat(3)', 'repeat(4)']
attn_methods = ['attn(const)', 'attn(gcn)', 'attn(gat)', 'attn(sym-gat)', 'attn(linear)', 'attn(gen-linear)',
'attn(cos)']
attn_heads = ['head(1)', 'head(2)', 'head(4)', 'head(6)']
aggr_methods = ['aggr(max)', 'aggr(mean)', 'aggr(sum)']
update_methods = ['update(gru)', 'update(mlp)']
activations = ['act(sigmoid)', 'act(tanh)', 'act(relu)', 'act(linear)', 'act(elu)', 'act(softplus)',
'act(leaky_relu)',
'act(relu6)']
out = []
for state_dim in state_dims:
for T in Ts:
for attn_method in attn_methods:
for attn_head in attn_heads:
for aggr_method in aggr_methods:
for update_method in update_methods:
for activation in activations:
out.append(
[state_dim, T, attn_method, attn_head, aggr_method, update_method, activation])
out_pool = []
for functions in ['GlobalSumPool', 'GlobalMaxPool', 'GlobalAvgPool']:
for axis in ['(feature)', '(node)']: # Pool in terms of nodes or features
out_pool.append(functions + axis)
out_pool.append('flatten')
for state_dim in [16, 32, 64]:
out_pool.append(f'AttentionPool({state_dim})')
out_pool.append('AttentionSumPool')
out_connect = ['skip', 'connect']
def get_gat(index):
return out[index]
def get_pool(index):
return out_pool[index]
def get_connect(index):
return out_connect[index]
archs = np.array(data['arch_seq'])
rewards = np.array(data['raw_rewards'])
a = np.empty((len(archs), 0), dtype=np.object)
a = np.append(a, archs, axis=-1)
a = np.append(a, rewards[..., np.newaxis], axis=-1)
b = np.empty((0, 29), dtype=np.object)
for i in range(len(a)):
temp = a[i, :]
b0 = [get_gat(temp[0])[i] + '[cell1]' for i in range(len(get_gat(temp[0])))]
b1 = [get_connect(temp[1]) + '[link1]']
b2 = [get_gat(temp[2])[i] + '[cell2]' for i in range(len(get_gat(temp[2])))]
b3 = [get_connect(temp[3]) + '[link2]']
b4 = [get_connect(temp[4]) + '[link3]']
b5 = [get_gat(temp[5])[i] + '[cell3]' for i in range(len(get_gat(temp[5])))]
b6 = [get_connect(temp[6]) + '[link4]']
b7 = [get_connect(temp[7]) + '[link5]']
b8 = [get_connect(temp[8]) + '[link6]']
b9 = [get_pool(temp[9])]
bout = b0 + b1 + b2 + b3 + b4 + b5 + b6 + b7 + b8 + b9 + [temp[10]]
bout = np.array(bout, dtype=object)
b = np.append(b, bout[np.newaxis, ...], axis=0)
table = pd.DataFrame(data=b)
table.to_csv(DATA_DIR + 'nas_result.csv', encoding='utf-8', index=False, header=False)
def moving_average(time_list, data_list, window_size=100):
"""Calculate the moving average.
Args:
time_list (list): a list of timestamps.
data_list (list): a list of data points.
window_size (int): the window size.
Returns:
time array and data array
"""
res_list = []
times_list = []
for i in range(len(data_list) - window_size):
times_list.append(sum(time_list[i:i + window_size]) / window_size)
res_list.append(sum(data_list[i:i + window_size]) / window_size)
return np.array(times_list), np.array(res_list)
def plot_reward_vs_time(data, PLOT_DIR, ylim=None, time=True, plot=False, metric='MAE'):
"""Generate plot of search trajectory.
Args:
data (dict): the data dictionary.
PLOT_DIR (str): the location to store the figure.
ylim (float): the minimum value of the y axis.
time (bool): True if want time as x axis, else want instance number.
plot (bool): if want to create a plot.
metric (str): the type of metric on y axis.
"""
start_infos = data['start_infos'][0]
try:
start_time = to_sec(data['workload']['times'][0])
except:
start_time = to_sec(start_infos['timestamp'])
times = [to_sec(ts) - start_time for ts in data['timestamps']]
x = times
y = data['raw_rewards']
plt.figure(figsize=(5, 4))
if time:
plt.plot(np.array(x) / 60, y, 'o', markersize=3)
plt.xlabel('Time (min)')
else:
plt.plot(y, 'o', markersize=3)
plt.xlabel('Iterations')
plt.ylabel(f'Reward (-{metric})')
plt.xlim(left=0)
if ylim is not None:
plt.ylim(ylim)
plt.locator_params(axis='y', nbins=4)
plt.savefig(PLOT_DIR + 'reward.png', dpi=300, bbox_inches='tight')
plt.savefig(PLOT_DIR+'reward.svg', bbox_inches='tight')
if not plot:
plt.close();
def three_random_parity_plot(DATA_DIR, PLOT_DIR, multi_class=False, limits=None, plot=False, ticks=None):
"""Generate parity plots from three random seed trainings.
Args:
DATA_DIR (str): the location of the data file.
PLOT_DIR (str): the location to store the figure.
multi_class (bool): if it is multi-class regression.
limits (list): the y limits you want to set.
plot (bool): if want to create a plot.
ticks (list): the x axis ticks.
"""
_, _, _, _, y_true_raw, y_pred_raw = three_random_split(DATA_DIR, multi_class=multi_class)
if not multi_class:
y_true = y_true_raw.ravel()
y_pred = y_pred_raw.ravel()
scaler = StandardScaler()
y_true = scaler.fit_transform(y_true[..., np.newaxis]).squeeze()
y_pred = scaler.fit_transform(y_pred[..., np.newaxis]).squeeze()
fig, ax = plt.subplots(figsize=(4, 4))
min_value = np.min([y_true.min(), y_pred.min()])
max_value = np.max([y_true.max(), y_pred.max()])
dist = max_value - min_value
min_value -= 0.03 * dist
max_value += 0.03 * dist
if limits is not None:
min_value, max_value = limits
ax.plot(np.linspace(min_value, max_value, 100), np.linspace(min_value, max_value, 100), 'k--', alpha=0.5)
ax.scatter(y_true.ravel(), y_pred.ravel(), s=5, alpha=0.9)
plt.xlim(min_value, max_value)
plt.ylim(min_value, max_value)
plt.xlabel("True")
plt.ylabel("Predicted")
print(min_value, max_value)
from matplotlib import ticker
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1, 1))
if ticks is not None:
plt.xticks(ticks, ticks)
plt.yticks(ticks, ticks)
else:
plt.locator_params(axis='x', nbins=5)
plt.locator_params(axis='y', nbins=5)
ax.xaxis.set_major_formatter(formatter)
ax.yaxis.set_major_formatter(formatter)
# plt.tight_layout()
plt.savefig(PLOT_DIR + "parity_plot.png", bbox_inches='tight')
plt.savefig(PLOT_DIR + "parity_plot.svg", bbox_inches='tight')
if not plot:
plt.close();
else:
for c in range(y_true_raw.shape[-1]):
y_true = y_true_raw[..., c].ravel()
y_pred = y_pred_raw[..., c].ravel()
plt.figure(figsize=(4, 4))
min_value = np.min([y_true.min(), y_pred.min()])
max_value = np.max([y_true.max(), y_pred.max()])
dist = max_value - min_value
min_value -= 0.03 * dist
max_value += 0.03 * dist
if limits is not None:
min_value, max_value = limits
plt.plot(np.linspace(min_value, max_value, 100), np.linspace(min_value, max_value, 100), 'k--', alpha=0.5)
plt.scatter(y_true.ravel(), y_pred.ravel(), s=5, alpha=0.9)
plt.xlim(min_value, max_value)
plt.ylim(min_value, max_value)
plt.xlabel("True")
plt.ylabel("Predicted")
plt.locator_params(axis='x', nbins=5)
plt.locator_params(axis='y', nbins=5)
plt.savefig(PLOT_DIR + f"parity_plot_{c}.png", bbox_inches='tight')
if not plot:
plt.close();
def feature_importance(DATA_DIR, PLOT_DIR, plot=False):
"""Generate feature importance plots.
Args:
DATA_DIR (str): the location of the data file.
PLOT_DIR (str): the location to store the figure.
plot (bool): if want to create a plot.
"""
train_data = pd.read_csv(DATA_DIR + 'nas_result.csv', header=None)
df = train_data
df_new =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
AIDeveloper
---------
@author: maikherbig
"""
import os,sys,gc
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'#suppress warnings/info from tensorflow
if not sys.platform.startswith("win"):
from multiprocessing import freeze_support
freeze_support()
# Make sure to get the right icon file on win,linux and mac
if sys.platform=="darwin":
icon_suff = ".icns"
else:
icon_suff = ".ico"
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtWidgets, QtGui
from pyqtgraph import Qt
import aid_start
dir_root = os.path.dirname(aid_start.__file__)#ask the module for its origin
dir_settings = os.path.join(dir_root,"aid_settings.json")#dir to settings
Default_dict = aid_start.get_default_dict(dir_settings)
#try:
# splashapp = QtWidgets.QApplication(sys.argv)
# #splashapp.setWindowIcon(QtGui.QIcon("."+os.sep+"art"+os.sep+Default_dict["Icon theme"]+os.sep+"main_icon_simple_04_256.ico"))
# # Create and display the splash screen
# splash_pix = os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff)
# splash_pix = QtGui.QPixmap(splash_pix)
# #splash_pix = QtGui.QPixmap("."+os.sep+"art"+os.sep+Default_dict["Icon theme"]+os.sep+"main_icon_simple_04_256"+icon_suff)
# splash = QtWidgets.QSplashScreen(splash_pix, QtCore.Qt.WindowStaysOnTopHint)
# splash.setMask(splash_pix.mask())
# splash.show()
#except:
# pass
#BEFORE importing tensorflow or anything from keras: make sure the keras.json has
#certain properties
keras_json_path = os.path.expanduser('~')+os.sep+'.keras'+os.sep+'keras.json'
if not os.path.isdir(os.path.expanduser('~')+os.sep+'.keras'):
os.mkdir(os.path.expanduser('~')+os.sep+'.keras')
aid_start.banner() #show a fancy banner in console
aid_start.keras_json_check(keras_json_path)
import traceback,shutil,re,ast,io,platform
import h5py,json,time,copy,urllib,datetime
from stat import S_IREAD,S_IRGRP,S_IROTH,S_IWRITE,S_IWGRP,S_IWOTH
import tensorflow as tf
from tensorboard import program
from tensorboard import default
from tensorflow.python.client import device_lib
devices = device_lib.list_local_devices()
device_types = [devices[i].device_type for i in range(len(devices))]
#Get the number of CPU cores and GPUs
cpu_nr = os.cpu_count()
gpu_nr = device_types.count("GPU")
print("Nr. of GPUs detected: "+str(gpu_nr))
print("Found "+str(len(devices))+" device(s):")
print("------------------------")
for i in range(len(devices)):
print("Device "+str(i)+": "+devices[i].name)
print("Device type: "+devices[i].device_type)
print("Device description: "+devices[i].physical_device_desc)
print("------------------------")
#Split CPU and GPU into two lists of devices
devices_cpu = []
devices_gpu = []
for dev in devices:
if dev.device_type=="CPU":
devices_cpu.append(dev)
elif dev.device_type=="GPU":
devices_gpu.append(dev)
else:
print("Unknown device type:"+str(dev)+"\n")
import numpy as np
rand_state = np.random.RandomState(117) #to get the same random number on diff. PCs
from scipy import ndimage,misc
from sklearn import metrics,preprocessing
import PIL
import dclab
import cv2
import pandas as pd
import openpyxl,xlrd
import psutil
from keras.models import model_from_json,model_from_config,load_model,clone_model
from keras import backend as K
if 'GPU' in device_types:
keras_gpu_avail = K.tensorflow_backend._get_available_gpus()
if len(keras_gpu_avail)>0:
print("Following GPU is used:")
print(keras_gpu_avail)
print("------------------------")
else:
print("TensorFlow detected GPU, but Keras didn't")
print("------------------------")
from keras.preprocessing.image import load_img
from keras.utils import np_utils,multi_gpu_model
from keras.utils.conv_utils import convert_kernel
import keras_metrics #side package for precision, recall etc during training
global keras_metrics
import model_zoo
from keras2onnx import convert_keras
from onnx import save_model as save_onnx
import aid_img, aid_dl, aid_bin
import aid_frontend
from partial_trainability import partial_trainability
import aid_imports
VERSION = "0.2.3" #Python 3.5.6 Version
model_zoo_version = model_zoo.__version__()
print("AIDeveloper Version: "+VERSION)
print("model_zoo.py Version: "+model_zoo.__version__())
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtWidgets.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig)
tooltips = aid_start.get_tooltips()
class MyPopup(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
class WorkerSignals(QtCore.QObject):
'''
Code inspired from here: https://www.learnpyqt.com/courses/concurrent-execution/multithreading-pyqt-applications-qthreadpool/
Defines the signals available from a running worker thread.
Supported signals are:
finished
No data
error
`tuple` (exctype, value, traceback.format_exc() )
result
`object` data returned from processing, anything
progress
`int` indicating % progress
history
`dict` containing keras model history.history resulting from .fit
'''
finished = QtCore.pyqtSignal()
error = QtCore.pyqtSignal(tuple)
result = QtCore.pyqtSignal(object)
progress = QtCore.pyqtSignal(int)
history = QtCore.pyqtSignal(dict)
class Worker(QtCore.QRunnable):
'''
Code inspired/copied from: https://www.learnpyqt.com/courses/concurrent-execution/multithreading-pyqt-applications-qthreadpool/
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
'''
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
self.kwargs['progress_callback'] = self.signals.progress
self.kwargs['history_callback'] = self.signals.history
@QtCore.pyqtSlot()
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
# Retrieve args/kwargs here; and fire processing using them
try:
result = self.fn(*self.args, **self.kwargs)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
self.signals.result.emit(result) # Return the result of the processing
finally:
self.signals.finished.emit() # Done
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi()
def setupUi(self):
aid_frontend.setup_main_ui(self,gpu_nr)
def retranslateUi(self):
aid_frontend.retranslate_main_ui(self,gpu_nr,VERSION)
def dataDropped(self, l):
#If there is data stored on ram tell user that RAM needs to be refreshed!
if len(self.ram)>0:
self.statusbar.showMessage("Newly added data is not yet in RAM. Only RAM data will be used. Use ->'File'->'Data to RAM now' to update RAM",5000)
#l is a list of some filenames (.rtdc) or folders (containing .jpg, jpeg, .png)
#Iterate over l and check if it is a folder or a file (directory)
isfile = [os.path.isfile(str(url)) for url in l]
isfolder = [os.path.isdir(str(url)) for url in l]
#####################For folders with images:##########################
#where are folders?
ind_true = np.where(np.array(isfolder)==True)[0]
foldernames = list(np.array(l)[ind_true]) #select the indices that are valid
#On mac, there is a trailing / in case of folders; remove them
foldernames = [os.path.normpath(url) for url in foldernames]
basename = [os.path.basename(f) for f in foldernames]
#Look quickly inside the folders and ask the user if he wants to convert
#to .rtdc (might take a while!)
if len(foldernames)>0: #User dropped (also) folders (which may contain images)
# filecounts = []
# for i in range(len(foldernames)):
# url = foldernames[i]
# files = os.listdir(url)
# files_full = [os.path.join(url,files[i]) for i in range(len(files))]
# filecounts.append(len([f for f in files_full if os.path.isfile(f)]))
# Text = []
# for b,n in zip(basename,filecounts):
# Text.append(b+": "+str(n)+" images")
# Text = "\n".join(Text)
Text = "Images from single folders are read and saved to individual \
.rtdc files with the same name like the corresponding folder.<b>If \
you have RGB images you can either save the full RGB information, \
or do a conversion to Grayscale (saves some diskspace but information \
about color is lost). RGB is recommended since AID will automatically\
do the conversion to grayscale later if required.<b>If you have \
Grayscale images, a conversion to RGB will just copy the info to all \
channels, which allows you to use RGB-mode and Grayscale-mode lateron."
Text = Text+"\nImages from following folders will be converted:\n"+"\n".join(basename)
#Show the user a summary with all the found folders and how many files are
#contained. Ask if he want to convert
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>Should the images of the chosen folder(s)\
be converted to .rtdc using <b>RGB</b> or <b>Grayscale</b> format? <b>\
(RGB is recommended!)</b> Either option might take some time. You can \
reuse the .rtdc file next time.</p></body></html>"
msg.setText(text)
msg.setDetailedText(Text)
msg.setWindowTitle("Format for conversion to .rtdc (RGB/Grayscale)")
msg.addButton(QtGui.QPushButton('Convert to Grayscale'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('Convert to RGB'), QtGui.QMessageBox.NoRole)
msg.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
retval = msg.exec_()
#Conversion of images in folders is (almost) independent from what
#is going to be fitted (So I leave the option menu still!)
#In options: Color Mode one can still use RGB mode and export here as
#Grayscale (but this would actually not work since RGB information is lost).
#The other way around works. Therefore it is recommended to export RGB!
if retval==0:
color_mode = "Grayscale"
channels = 1
elif retval==1:
color_mode = "RGB"
channels = 3
else:
return
self.statusbar.showMessage("Color mode' "+color_mode+"' is used",5000)
url_converted = []
for i in range(len(foldernames)):
url = foldernames[i]
print("Start converting images in\n"+url)
#try:
#get a list of files inside this directory:
images,pos_x,pos_y = [],[],[]
for root, dirs, files in os.walk(url):
for file in files:
try:
path = os.path.join(root, file)
img = load_img(path,color_mode=color_mode.lower()) #This uses PIL and supports many many formats!
images.append(np.array(img)) #append nice numpy array to list
#create pos_x and pos_y
pos_x.append( int(np.round(img.width/2.0,0)) )
pos_y.append( int(np.round(img.height/2.0,0)) )
except:
pass
#Thanks to andko76 for pointing that unequal image sizes cause an error:
#https://github.com/maikherbig/AIDeveloper/issues/1
#Check that all images have the same size
# img_shape_errors = 0
# text_error = "Images have unequal dimensions:"
# img_h = [a.shape[0] for a in images]
# img_h_uni = len(np.unique(img_h))
# if img_h_uni!=1:
# text_error += "\n- found unequal heights"
# img_shape_errors=1
# img_w = [a.shape[1] for a in images]
# img_w_uni = len(np.unique(img_w))
# if img_w_uni!=1:
# text_error += "\n- found unequal widths"
# img_shape_errors=1
# img_c = [len(a.shape) for a in images]
# img_c_uni = len(np.unique(img_c))
# if img_c_uni!=1:
# text_error += "\n- found unequal numbers of channels"
# img_shape_errors=1
# #If there were issues detected, show error message
# if img_shape_errors==1:
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Warning)
# msg.setText(str(text_error))
# msg.setWindowTitle("Error: Unequal image shapes")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
# return
#Get a list of occuring image dimensions (width and height)
img_shape = [a.shape[0] for a in images] + [a.shape[1] for a in images]
dims = np.unique(img_shape)
#Get a list of occurences of image shapes
img_shape = [str(a.shape[0])+" x "+str(a.shape[1]) for a in images]
occurences = np.unique(img_shape,return_counts=True)
#inform user if there is more than one img shape
if len(occurences[0])>1 or len(dims)>1:
text_detail = "Path: "+url
text_detail += "\nFollowing image shapes are present"
for i in range(len(occurences[0])):
text_detail+="\n- "+str(occurences[1][i])+" times: "+str(occurences[0][i])
self.popup_imgRes = QtGui.QDialog()
self.popup_imgRes_ui = aid_frontend.popup_imageLoadResize()
self.popup_imgRes_ui.setupUi(self.popup_imgRes) #open a popup to show options for image resizing (make image equally sized)
#self.popup_imgRes.setWindowModality(QtCore.Qt.WindowModal)
self.popup_imgRes.setWindowModality(QtCore.Qt.ApplicationModal)
#Insert information into textBrowser
self.popup_imgRes_ui.textBrowser_imgResize_occurences.setText(text_detail)
Image_import_dimension = Default_dict["Image_import_dimension"]
self.popup_imgRes_ui.spinBox_ingResize_h_1.setValue(Image_import_dimension)
self.popup_imgRes_ui.spinBox_ingResize_h_2.setValue(Image_import_dimension)
self.popup_imgRes_ui.spinBox_ingResize_w_1.setValue(Image_import_dimension)
self.popup_imgRes_ui.spinBox_ingResize_w_2.setValue(Image_import_dimension)
Image_import_interpol_method = Default_dict["Image_import_interpol_method"]
index = self.popup_imgRes_ui.comboBox_resizeMethod.findText(Image_import_interpol_method, QtCore.Qt.MatchFixedString)
if index >= 0:
self.popup_imgRes_ui.comboBox_resizeMethod.setCurrentIndex(index)
#Define function for the OK button:
def popup_imgRes_ok(images,channels,pos_x,pos_y):
print("Start resizing operation")
#Get info from GUI
final_h = int(self.popup_imgRes_ui.spinBox_ingResize_h_1.value())
print("Height:"+str(final_h))
final_w = int(self.popup_imgRes_ui.spinBox_ingResize_w_1.value())
print("Width:"+str(final_w))
Default_dict["Image_import_dimension"] = final_h
pix = 1
if self.popup_imgRes_ui.radioButton_imgResize_cropPad.isChecked():#cropping and padding method
images = aid_img.image_crop_pad_cv2(images,pos_x,pos_y,pix,final_h,final_w,padding_mode="cv2.BORDER_CONSTANT")
elif self.popup_imgRes_ui.radioButton_imgResize_interpolate.isChecked():
interpolation_method = str(self.popup_imgRes_ui.comboBox_resizeMethod.currentText())
Default_dict["Image_import_interpol_method"] = interpolation_method
images = aid_img.image_resize_scale(images,pos_x,pos_y,final_h,final_w,channels,interpolation_method,verbose=False)
else:
print("Invalid image resize method!")
#Save the Default_dict
aid_bin.save_aid_settings(Default_dict)
self.popup_imgRes.accept()
return images
#Define function for the Cancel button:
def popup_imgRes_cancel():
self.popup_imgRes.close()
return
self.popup_imgRes_ui.pushButton_imgResize_ok.clicked.connect(lambda: popup_imgRes_ok(images,channels,pos_x,pos_y))
self.popup_imgRes_ui.pushButton_imgResize_cancel.clicked.connect(popup_imgRes_cancel)
retval = self.popup_imgRes.exec_()
#retval is 0 if the user clicked cancel or just closed the window; in this case just exist the function
if retval==0:
return
#get new pos_x, pos_y (after cropping, the pixel value for the middle of the image is different!)
pos_x = [int(np.round(img.shape[1]/2.0,0)) for img in images]
pos_y = [int(np.round(img.shape[0]/2.0,0)) for img in images]
#Now, all images are of identical shape and can be converted to a numpy array
images = np.array((images), dtype="uint8")
pos_x = np.array((pos_x), dtype="uint8")
pos_y = np.array((pos_y), dtype="uint8")
#Save as foldername.rtdc
fname = url+".rtdc"
if os.path.isfile(fname):
#ask user if file can be overwritten
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>File:"+fname+" already exists. Should it be overwritten?</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Overwrite file?")
msg.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.NoRole)
msg.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
retval = msg.exec_()
if retval==0:
try:
os.remove(fname)
aid_img.imgs_2_rtdc(fname,images,pos_x,pos_y)
url_converted.append(fname)
except Exception as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
retval = msg.exec_()
elif retval==1:
pass
else:
pass
else:#file does not yet exist. Create it
aid_img.imgs_2_rtdc(fname,images,pos_x,pos_y)
url_converted.append(fname)
print("Finished converting! Final dimension of image tensor is:"+str(images.shape))
#Now load the created files directly to drag/drop-region!
self.dataDropped(url_converted)
#####################For .rtdc files:##################################
#where are files?
ind_true = np.where(np.array(isfile)==True)[0]
filenames = list(np.array(l)[ind_true]) #select the indices that are valid
#check if the file can be opened and get some information
fileinfo = []
for i in range(len(filenames)):
rtdc_path = filenames[i]
try:
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
features = list(rtdc_ds["events"].keys())
#Make sure that there is "images", "pos_x" and "pos_y" available
if "image" in features and "pos_x" in features and "pos_y" in features:
nr_images = rtdc_ds["events"]["image"].len()
pix = rtdc_ds.attrs["imaging:pixel size"]
xtra_in_available = len(rtdc_ds.keys())>2 #Is True, only if there are more than 2 elements.
fileinfo.append({"rtdc_ds":rtdc_ds,"rtdc_path":rtdc_path,"features":features,"nr_images":nr_images,"pix":pix,"xtra_in":xtra_in_available})
else:
missing = []
for feat in ["image","pos_x","pos_y"]:
if feat not in features:
missing.append(feat)
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Essential feature(s) are missing in data-set")
msg.setDetailedText("Data-set: "+rtdc_path+"\nis missing "+str(missing))
msg.setWindowTitle("Missing essential features")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
except Exception as e:
print(e)
#Add the stuff to the combobox on Plot/Peak Tab
url_list = [fileinfo[iterator]["rtdc_path"] for iterator in range(len(fileinfo))]
self.comboBox_chooseRtdcFile.addItems(url_list)
self.comboBox_selectData.addItems(url_list)
if len(url_list)==0: #This fixes the issue that the prog. crashes if accidentially a tableitem is dragged and "dropped" on the table
return
width=self.comboBox_selectData.fontMetrics().boundingRect(max(url_list, key=len)).width()
self.comboBox_selectData.view().setFixedWidth(width+10)
for rowNumber in range(len(fileinfo)):#for url in l:
url = fileinfo[rowNumber]["rtdc_path"]
#add to table
rowPosition = self.table_dragdrop.rowCount()
self.table_dragdrop.insertRow(rowPosition)
columnPosition = 0
line = QtWidgets.QLabel(self.table_dragdrop)
line.setText(url)
line.setDisabled(True)
line.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.table_dragdrop.setCellWidget(rowPosition, columnPosition, line)
# item = QtWidgets.QTableWidgetItem(url)
# item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
# print(item.textAlignment())
# item.setTextAlignment(QtCore.Qt.AlignRight) # change the alignment
# #item.setTextAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AnchorRight) # change the alignment
# self.table_dragdrop.setItem(rowPosition , columnPosition, item ) #
columnPosition = 1
spinb = QtWidgets.QSpinBox(self.table_dragdrop)
spinb.valueChanged.connect(self.dataOverviewOn)
self.table_dragdrop.setCellWidget(rowPosition, columnPosition, spinb)
for columnPosition in range(2,4):
#for each item, also create 2 checkboxes (train/valid)
item = QtWidgets.QTableWidgetItem()#("item {0} {1}".format(rowNumber, columnNumber))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 4
#Place a button which allows to show a plot (scatter, histo...lets see)
btn = QtWidgets.QPushButton(self.table_dragdrop)
btn.setMinimumSize(QtCore.QSize(50, 30))
btn.setMaximumSize(QtCore.QSize(50, 30))
btn.clicked.connect(self.button_hist)
btn.setText('Plot')
self.table_dragdrop.setCellWidget(rowPosition, columnPosition, btn)
self.table_dragdrop.resizeRowsToContents()
# columnPosition = 5
# #Place a combobox with the available features
# cb = QtWidgets.QComboBox(self.table_dragdrop)
# cb.addItems(fileinfo[rowNumber]["features"])
# cb.setMinimumSize(QtCore.QSize(70, 30))
# cb.setMaximumSize(QtCore.QSize(70, 30))
# width=cb.fontMetrics().boundingRect(max(fileinfo[rowNumber]["features"], key=len)).width()
# cb.view().setFixedWidth(width+30)
# self.table_dragdrop.setCellWidget(rowPosition, columnPosition, cb)
columnPosition = 5
#Place a combobox with the available features
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, fileinfo[rowNumber]["nr_images"])
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 6
#Field to user-define nr. of cells/epoch
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole,100)
#item.cellChanged.connect(self.dataOverviewOn)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 7
#Pixel size
item = QtWidgets.QTableWidgetItem()
pix = float(fileinfo[rowNumber]["pix"])
#print(pix)
item.setData(QtCore.Qt.EditRole,pix)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 8
#Should data be shuffled (random?)
item = QtWidgets.QTableWidgetItem()#("item {0} {1}".format(rowNumber, columnNumber))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Checked)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 9
#Zooming factor
item = QtWidgets.QTableWidgetItem()
zoom = 1.0
item.setData(QtCore.Qt.EditRole,zoom)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 10
#Should xtra_data be used?
item = QtWidgets.QTableWidgetItem()#("item {0} {1}".format(rowNumber, columnNumber))
xtra_in_available = fileinfo[rowNumber]["xtra_in"]
if xtra_in_available:
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
else:
item.setFlags( QtCore.Qt.ItemIsUserCheckable )
item.setCheckState(QtCore.Qt.Unchecked)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
#Functions for Keras augmentation checkboxes
def keras_changed_rotation(self,on_or_off):
if on_or_off==0:
self.lineEdit_Rotation.setText(str(0))
self.lineEdit_Rotation.setEnabled(False)
elif on_or_off==2:
self.lineEdit_Rotation.setText(str(Default_dict ["rotation"]))
self.lineEdit_Rotation.setEnabled(True)
else:
return
def keras_changed_width_shift(self,on_or_off):
if on_or_off==0:
self.lineEdit_widthShift.setText(str(0))
self.lineEdit_widthShift.setEnabled(False)
elif on_or_off==2:
self.lineEdit_widthShift.setText(str(Default_dict ["width_shift"]))
self.lineEdit_widthShift.setEnabled(True)
else:
return
def keras_changed_height_shift(self,on_or_off):
if on_or_off==0:
self.lineEdit_heightShift.setText(str(0))
self.lineEdit_heightShift.setEnabled(False)
elif on_or_off==2:
self.lineEdit_heightShift.setText(str(Default_dict ["height_shift"]))
self.lineEdit_heightShift.setEnabled(True)
else:
return
def keras_changed_zoom(self,on_or_off):
if on_or_off==0:
self.lineEdit_zoomRange.setText(str(0))
self.lineEdit_zoomRange.setEnabled(False)
elif on_or_off==2:
self.lineEdit_zoomRange.setText(str(Default_dict ["zoom"]))
self.lineEdit_zoomRange.setEnabled(True)
else:
return
def keras_changed_shear(self,on_or_off):
if on_or_off==0:
self.lineEdit_shearRange.setText(str(0))
self.lineEdit_shearRange.setEnabled(False)
elif on_or_off==2:
self.lineEdit_shearRange.setText(str(Default_dict ["shear"]))
self.lineEdit_shearRange.setEnabled(True)
else:
return
def keras_changed_brightplus(self,on_or_off):
if on_or_off==0:
self.spinBox_PlusLower.setValue(0)
self.spinBox_PlusLower.setEnabled(False)
self.spinBox_PlusUpper.setValue(0)
self.spinBox_PlusUpper.setEnabled(False)
elif on_or_off==2:
self.spinBox_PlusLower.setValue(Default_dict ["Brightness add. lower"])
self.spinBox_PlusLower.setEnabled(True)
self.spinBox_PlusUpper.setValue(Default_dict ["Brightness add. upper"])
self.spinBox_PlusUpper.setEnabled(True)
else:
return
def keras_changed_brightmult(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_MultLower.setValue(1.0)
self.doubleSpinBox_MultLower.setEnabled(False)
self.doubleSpinBox_MultUpper.setValue(1.0)
self.doubleSpinBox_MultUpper.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_MultLower.setValue(Default_dict ["Brightness mult. lower"])
self.doubleSpinBox_MultLower.setEnabled(True)
self.doubleSpinBox_MultUpper.setValue(Default_dict ["Brightness mult. upper"])
self.doubleSpinBox_MultUpper.setEnabled(True)
else:
return
def keras_changed_noiseMean(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_GaussianNoiseMean.setValue(0.0)
self.doubleSpinBox_GaussianNoiseMean.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_GaussianNoiseMean.setValue(Default_dict ["Gaussnoise Mean"])
self.doubleSpinBox_GaussianNoiseMean.setEnabled(True)
else:
return
def keras_changed_noiseScale(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_GaussianNoiseScale.setValue(0.0)
self.doubleSpinBox_GaussianNoiseScale.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_GaussianNoiseScale.setValue(Default_dict ["Gaussnoise Scale"])
self.doubleSpinBox_GaussianNoiseScale.setEnabled(True)
else:
return
def keras_changed_contrast(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_contrastLower.setEnabled(False)
self.doubleSpinBox_contrastHigher.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_contrastLower.setEnabled(True)
self.doubleSpinBox_contrastHigher.setEnabled(True)
else:
return
def keras_changed_saturation(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_saturationLower.setEnabled(False)
self.doubleSpinBox_saturationHigher.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_saturationLower.setEnabled(True)
self.doubleSpinBox_saturationHigher.setEnabled(True)
else:
return
def keras_changed_hue(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_hueDelta.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_hueDelta.setEnabled(True)
else:
return
def expert_mode_off(self,on_or_off):
"""
Reset all values on the expert tab to the default values, excluding the metrics
metrics are defined only once when starting fitting and should not be changed
"""
if on_or_off==0: #switch off
self.spinBox_batchSize.setValue(Default_dict["spinBox_batchSize"])
self.spinBox_epochs.setValue(1)
self.checkBox_expt_loss.setChecked(False)
self.expert_loss_off(0)
self.groupBox_learningRate.setChecked(False)
self.expert_learningrate_off(0)
self.checkBox_optimizer.setChecked(False)
self.expert_optimizer_off(0)
def expert_loss_off(self,on_or_off):
if on_or_off==0: #switch off
#switch back to categorical_crossentropy
index = self.comboBox_expt_loss.findText("categorical_crossentropy", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_expt_loss.setCurrentIndex(index)
def expert_learningrate_off(self,on_or_off):
if on_or_off==0: #switch off
#which optimizer is used? (there are different default learning-rates
#for each optimizer!)
optimizer = str(self.comboBox_optimizer.currentText())
self.doubleSpinBox_learningRate.setValue(Default_dict["doubleSpinBox_learningRate_"+optimizer])
self.radioButton_LrCycl.setChecked(False)
self.radioButton_LrExpo.setChecked(False)
self.radioButton_LrConst.setChecked(True)
def expert_optimizer_off(self,on_or_off):
if on_or_off==0: #switch off, set back to categorical_crossentropy
optimizer = "Adam"
index = self.comboBox_optimizer.findText(optimizer, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_optimizer.setCurrentIndex(index)
#also reset the learning rate to the default
self.doubleSpinBox_learningRate.setValue(Default_dict["doubleSpinBox_learningRate_"+optimizer])
def expert_optimizer_changed(self,optimizer_text,listindex):
# print("optimizer_text: "+str(optimizer_text))
# print("listindex: "+str(listindex))
if optimizer_text=="":
return
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
#set the learning rate to the default for this optimizer
value_current = float(item_ui.doubleSpinBox_learningRate.value())
value_wanted = Default_dict["doubleSpinBox_learningRate_"+optimizer_text]
#insert the current value in the optimizer_settings:
item_ui.optimizer_settings["doubleSpinBox_lr_"+optimizer_text.lower()] = value_current
item_ui.optimizer_settings["comboBox_optimizer"] = optimizer_text
try: #only works on the fitting-popup
text = str(item_ui.textBrowser_FittingInfo.toPlainText())
except:
text = "Epoch"
# print("text: "+str(text))
if value_current!=value_wanted and "Epoch" in text:#avoid that the message pops up when window is created
item_ui.doubleSpinBox_learningRate.setValue(value_wanted)
item_ui.doubleSpinBox_expDecInitLr.setValue(value_wanted)
#Inform user
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setWindowTitle("Learning rate to default")
msg.setText("Learning rate was set to the default for "+optimizer_text)
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
def expert_lr_changed(self,value,optimizer_text,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.optimizer_settings["doubleSpinBox_lr_"+optimizer_text.lower()] = value
def update_hist1(self):
feature = str(self.comboBox_feat1.currentText())
feature_values = self.rtdc_ds["events"][feature]
#if len(feature_values)==len(self.rtdc_ds['area_cvx']):
# self.histogram = pg.GraphicsWindow()
#plt1 = self.histogram.addPlot()
y,x = np.histogram(feature_values, bins='auto')
self.plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),clear=True)
# self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
# self.w.show()
def update_hist2(self):
feature = str(self.comboBox_feat2.currentText())
feature_values = self.rtdc_ds["events"][feature]
#if len(feature_values)==len(self.rtdc_ds['area_cvx']):
#self.histogram = pg.GraphicsWindow()
#plt1 = self.histogram.addPlot()
y,x = np.histogram(feature_values, bins='auto')
self.plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),clear=True)
# self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
# self.w.show()
def update_scatter(self):
feature_x = str(self.comboBox_feat1.currentText())
feature_x_values = self.rtdc_ds["events"][feature_x]
feature_y = str(self.comboBox_feat2.currentText())
feature_y_values = self.rtdc_ds["events"][feature_y]
if len(feature_x_values)==len(feature_y_values):
#self.histogram = pg.GraphicsWindow()
#plt1 = self.histogram.addPlot()
#y,x = np.histogram(feature_values, bins='auto')
self.plt1.plot(feature_x_values, feature_y_values,pen=None,symbol='o',clear=True)
# self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
# self.w.show()
def button_hist(self,item):
buttonClicked = self.sender()
index = self.table_dragdrop.indexAt(buttonClicked.pos())
rowPosition = index.row()
rtdc_path = self.table_dragdrop.cellWidget(rowPosition, 0).text()
rtdc_path = str(rtdc_path)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.rtdc_ds = rtdc_ds
# feature_values = rtdc_ds[feature]
#Init a popup window
self.w = MyPopup()
self.w.setWindowTitle(rtdc_path)
self.w.setObjectName(_fromUtf8("w"))
self.gridLayout_w2 = QtWidgets.QGridLayout(self.w)
self.gridLayout_w2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_w2.setObjectName(_fromUtf8("gridLayout_w2"))
self.widget = QtWidgets.QWidget(self.w)
self.widget.setMinimumSize(QtCore.QSize(0, 65))
self.widget.setMaximumSize(QtCore.QSize(16777215, 65))
self.widget.setObjectName(_fromUtf8("widget"))
self.horizontalLayout_w3 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_w3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_w3.setObjectName(_fromUtf8("horizontalLayout_w3"))
self.verticalLayout_w = QtWidgets.QVBoxLayout()
self.verticalLayout_w.setObjectName(_fromUtf8("verticalLayout_w"))
self.horizontalLayout_w = QtWidgets.QHBoxLayout()
self.horizontalLayout_w.setObjectName(_fromUtf8("horizontalLayout_w"))
self.comboBox_feat1 = QtWidgets.QComboBox(self.widget)
self.comboBox_feat1.setObjectName(_fromUtf8("comboBox_feat1"))
features = list(self.rtdc_ds["events"].keys())
self.comboBox_feat1.addItems(features)
self.horizontalLayout_w.addWidget(self.comboBox_feat1)
self.comboBox_feat2 = QtWidgets.QComboBox(self.widget)
self.comboBox_feat2.setObjectName(_fromUtf8("comboBox_feat2"))
self.comboBox_feat2.addItems(features)
self.horizontalLayout_w.addWidget(self.comboBox_feat2)
self.verticalLayout_w.addLayout(self.horizontalLayout_w)
self.horizontalLayout_w2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_w2.setObjectName(_fromUtf8("horizontalLayout_w2"))
self.pushButton_Hist1 = QtWidgets.QPushButton(self.widget)
self.pushButton_Hist1.setObjectName(_fromUtf8("pushButton_Hist1"))
self.horizontalLayout_w2.addWidget(self.pushButton_Hist1)
self.pushButton_Hist2 = QtWidgets.QPushButton(self.widget)
self.pushButton_Hist2.setObjectName(_fromUtf8("pushButton_Hist2"))
self.horizontalLayout_w2.addWidget(self.pushButton_Hist2)
self.verticalLayout_w.addLayout(self.horizontalLayout_w2)
self.horizontalLayout_w3.addLayout(self.verticalLayout_w)
self.verticalLayout_w2 = QtWidgets.QVBoxLayout()
self.verticalLayout_w2.setObjectName(_fromUtf8("verticalLayout_w2"))
self.pushButton_Scatter = QtWidgets.QPushButton(self.widget)
self.pushButton_Scatter.setObjectName(_fromUtf8("pushButton_Scatter"))
self.verticalLayout_w2.addWidget(self.pushButton_Scatter)
self.checkBox_ScalePix = QtWidgets.QCheckBox(self.widget)
self.checkBox_ScalePix.setObjectName(_fromUtf8("checkBox_ScalePix"))
self.verticalLayout_w2.addWidget(self.checkBox_ScalePix)
self.horizontalLayout_w3.addLayout(self.verticalLayout_w2)
self.gridLayout_w2.addWidget(self.widget, 0, 0, 1, 1)
self.pushButton_Hist1.setText("Hist")
self.pushButton_Hist1.clicked.connect(self.update_hist1)
self.pushButton_Hist2.setText("Hist")
self.pushButton_Hist2.clicked.connect(self.update_hist2)
self.pushButton_Scatter.setText("Scatter")
self.pushButton_Scatter.clicked.connect(self.update_scatter)
self.checkBox_ScalePix.setText("Scale by pix")
self.histogram = pg.GraphicsWindow()
self.plt1 = self.histogram.addPlot()
# y,x = np.histogram(feature_values, bins='auto')
# plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150))
self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
self.w.show()
def update_historyplot_pop(self,listindex):
#listindex = self.popupcounter-1 #len(self.fittingpopups_ui)-1
#After the first epoch there are checkboxes available. Check, if user checked some:
colcount = int(self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.columnCount())
#Collect items that are checked
selected_items,Colors = [],[]
for colposition in range(colcount):
#is it checked for train?
cb = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.item(0, colposition)
if not cb==None:
if cb.checkState() == QtCore.Qt.Checked:
selected_items.append(str(cb.text()))
Colors.append(cb.background())
self.Colors = Colors
Histories = self.fittingpopups_ui[listindex].Histories
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
# if len(DF1)>0:
# DF1 = pd.concat(DF1)
# else:
# return
self.fittingpopups_ui[listindex].widget_pop.clear()
#Create fresh plot
plt1 = self.fittingpopups_ui[listindex].widget_pop.addPlot()
plt1.showGrid(x=True,y=True)
plt1.addLegend()
plt1.setLabel('bottom', 'Epoch', units='')
#Create a dict that stores plots for each metric (for real time plotting)
self.fittingpopups_ui[listindex].historyscatters = dict()
for i in range(len(selected_items)):
key = selected_items[i]
df = DF1[key]
color = self.Colors[i]
pen_rollmedi = list(color.color().getRgb())
pen_rollmedi = pg.mkColor(pen_rollmedi)
pen_rollmedi = pg.mkPen(color=pen_rollmedi,width=6)
color = list(color.color().getRgb())
color[-1] = int(0.6*color[-1])
color = tuple(color)
pencolor = pg.mkColor(color)
brush = pg.mkBrush(color=pencolor)
#print(df)
historyscatter = plt1.plot(range(len(df)), df.values, pen=None,symbol='o',symbolPen=None,symbolBrush=brush,name=key,clear=False)
#self.fittingpopups_ui[listindex].historyscatters.append(historyscatter)
self.fittingpopups_ui[listindex].historyscatters[key]=historyscatter
def stop_fitting_pop(self,listindex):
#listindex = len(self.fittingpopups_ui)-1
epochs = self.fittingpopups_ui[listindex].epoch_counter
#Stop button on the fititng popup
#Should stop the fitting process and save the metafile
#1. Change the nr. requested epochs to a smaller number
self.fittingpopups_ui[listindex].spinBox_NrEpochs.setValue(epochs-1)
#2. Check the box which will cause that the new parameters are applied at next epoch
self.fittingpopups_ui[listindex].checkBox_ApplyNextEpoch.setChecked(True)
def pause_fitting_pop(self,listindex):
#Just change the text on the button
if str(self.fittingpopups_ui[listindex].pushButton_Pause_pop.text())==" ":
#If the the text on the button was Pause, change it to Continue
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setText("")
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setStyleSheet("background-color: green")
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"continue.png")))
elif str(self.fittingpopups_ui[listindex].pushButton_Pause_pop.text())=="":
#If the the text on the button was Continue, change it to Pause
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setText(" ")
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"pause.png")))
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setStyleSheet("")
def saveTextWindow_pop(self,listindex):
#Get the entire content of textBrowser_FittingInfo
text = str(self.fittingpopups_ui[listindex].textBrowser_FittingInfo.toPlainText())
#Ask the user where to save the stuff
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Fitting info', Default_dict["Path of last model"]," (*.txt)")
filename = filename[0]
#Save to this filename
if len(filename)>0:
f = open(filename,'w')
f.write(text)
f.close()
def clearTextWindow_pop(self,listindex):
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.clear()
def showModelSumm_pop(self,listindex):
text5 = "Model summary:\n"
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text5+summary
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
def saveModelSumm_pop(self,listindex):
text5 = "Model summary:\n"
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text5+summary
#Ask the user where to save the stuff
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Model summary', Default_dict["Path of last model"]," (*.txt)")
filename = filename[0]
#Save to this filename
f = open(filename,'w')
f.write(text)
f.close()
#class_weight = self.get_class_weight(self.fittingpopups_ui[listindex].SelectedFiles,lossW_expert) #
def get_class_weight(self,SelectedFiles,lossW_expert,custom_check_classes=False):
t1 = time.time()
print("Getting dictionary for class_weight")
if lossW_expert=="None":
return None
elif lossW_expert=="":
return None
elif lossW_expert=="Balanced":
#Which are training files?
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = list(np.array(SelectedFiles)[ind])
classes = [int(selectedfile["class"]) for selectedfile in SelectedFiles_train]
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train]
classes_uni = np.unique(classes)
counter = {}
for class_ in classes_uni:
ind = np.where(np.array(classes)==class_)[0]
nr_events_epoch_class = np.array(nr_events_epoch)[ind]
counter[class_] = np.sum(nr_events_epoch_class)
max_val = float(max(counter.values()))
return {class_id : max_val/num_images for class_id, num_images in counter.items()}
elif lossW_expert.startswith("{"):#Custom loss weights
class_weights = eval(lossW_expert)
if custom_check_classes:#Check that each element in classes_uni is contained in class_weights.keys()
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = list(np.array(SelectedFiles)[ind])
classes = [int(selectedfile["class"]) for selectedfile in SelectedFiles_train]
classes_uni = np.unique(classes)
classes_uni = np.sort(classes_uni)
class_weights_keys = np.sort([int(a) for a in class_weights.keys()])
#each element in classes_uni has to be equal to class_weights_keys
equal = np.array_equal(classes_uni,class_weights_keys)
if equal == True:
return class_weights
else:
#If the equal is false I'm really in trouble...
#run the function again, but request 'Balanced' weights. I'm not sure if this should be the default...
class_weights = self.get_class_weight(SelectedFiles,"Balanced")
return ["Balanced",class_weights]
else:
return class_weights
t2 = time.time()
dt = np.round(t2-t1,2)
print("Comp. time = "+str(dt))
def accept_lr_range(self):
lr_start = str(self.popup_lrfinder_ui.lineEdit_LrMin.text())
lr_stop = str(self.popup_lrfinder_ui.lineEdit_LrMax.text())
if len(lr_start)>0 and len(lr_stop)>0:
self.lineEdit_cycLrMin.setText(lr_start)
self.lineEdit_cycLrMax.setText(lr_stop)
else:
print("Found no values for LR range")
def accept_lr_value(self):
single_lr = self.popup_lrfinder_ui.lineEdit_singleLr.text()
if len(single_lr)>0:
lr_value = float(single_lr)
self.doubleSpinBox_learningRate.setValue(lr_value)
self.doubleSpinBox_expDecInitLr.setValue(lr_value)
else:
print("Found no value for single LR!")
def reset_lr_settings(self):
self.popup_lrfinder_ui.lineEdit_startLr.setText(_translate("Form_LrFinder", "1e-10", None))
self.popup_lrfinder_ui.lineEdit_stopLr.setText(_translate("Form_LrFinder", "0.1", None))
self.popup_lrfinder_ui.doubleSpinBox_percDataT.setProperty("value", 100.0)
self.popup_lrfinder_ui.doubleSpinBox_percDataV.setProperty("value", 100.0)
self.popup_lrfinder_ui.spinBox_batchSize.setValue(Default_dict["spinBox_batchSize"])
self.popup_lrfinder_ui.spinBox_lineWidth.setProperty("value", 6)
self.popup_lrfinder_ui.spinBox_epochs.setProperty("value", 5)
def reset_lr_value(self):
self.popup_lrfinder_ui.lineEdit_singleLr.setText("")
#Uncheck and Check the groupbox to refresh the line
self.popup_lrfinder_ui.groupBox_singleLr.setChecked(False)
self.popup_lrfinder_ui.groupBox_singleLr.setChecked(True)
def reset_lr_range(self):
self.popup_lrfinder_ui.lineEdit_LrMin.setText("")
self.popup_lrfinder_ui.lineEdit_LrMax.setText("")
#Uncheck and Check the groupbox to refresh the range
self.popup_lrfinder_ui.groupBox_LrRange.setChecked(False)
self.popup_lrfinder_ui.groupBox_LrRange.setChecked(True)
def popup_lr_finder(self):
SelectedFiles = self.items_clicked()
self.popup_lrfinder = MyPopup()
self.popup_lrfinder_ui = aid_frontend.popup_lrfinder()
self.popup_lrfinder_ui.setupUi(self.popup_lrfinder) #open a popup for lr finder
#Get information about the model
#check, which radiobutton is clicked and just copy paste the text from there
if self.radioButton_NewModel.isChecked():
modelname = str(self.comboBox_ModelSelection.currentText())
if modelname==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No model specified!")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
elif self.radioButton_LoadContinueModel.isChecked():
modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_LoadRestartModel.isChecked():
modelname = str(self.lineEdit_LoadModelPath.text())
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please specify a model using the radiobuttons on the 'Define Model' -tab")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
in_dim = int(self.spinBox_imagecrop.value())
#Put information onto UI
self.popup_lrfinder_ui.lineEdit_loadModel.setText(modelname)
self.popup_lrfinder_ui.spinBox_Crop_inpImgSize.setValue(in_dim)
color_mode = self.get_color_mode()
self.popup_lrfinder_ui.comboBox_colorMode.addItem(color_mode)
loss_str = str(self.comboBox_expt_loss.currentText())
self.popup_lrfinder_ui.comboBox_expt_loss.addItem(loss_str)
optimizer_str = str(self.comboBox_optimizer.currentText())
self.popup_lrfinder_ui.comboBox_optimizer.addItem(optimizer_str)
batch_size = self.spinBox_batchSize.value()
self.popup_lrfinder_ui.spinBox_batchSize.setValue(batch_size)
#Connect action_lr_finder function to button
self.popup_lrfinder_ui.pushButton_LrFindRun.clicked.connect(lambda: self.action_initialize_model(duties="initialize_lrfind"))
self.popup_lrfinder_ui.pushButton_rangeAccept.clicked.connect(self.accept_lr_range)
self.popup_lrfinder_ui.pushButton_singleAccept.clicked.connect(self.accept_lr_value)
self.popup_lrfinder_ui.pushButton_LrReset.clicked.connect(self.reset_lr_settings)
self.popup_lrfinder_ui.pushButton_singleReset.clicked.connect(self.reset_lr_value)
self.popup_lrfinder_ui.pushButton_rangeReset.clicked.connect(self.reset_lr_range)
#Update the plot when any plotting option is changed
self.popup_lrfinder_ui.comboBox_metric.currentIndexChanged.connect(self.update_lrfind_plot)
self.popup_lrfinder_ui.spinBox_lineWidth.valueChanged.connect(self.update_lrfind_plot)
self.popup_lrfinder_ui.checkBox_smooth.toggled.connect(self.update_lrfind_plot)
#LR single value when groupbox is toggled
self.popup_lrfinder_ui.groupBox_singleLr.toggled.connect(self.get_lr_single)
#LR range when groupbox is toggled
self.popup_lrfinder_ui.groupBox_LrRange.toggled.connect(self.get_lr_range)
#compute the number of steps/epoch
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
nr_events_train_total = np.sum([int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train])
def update_stepsPerEpoch():
batch_size = self.popup_lrfinder_ui.spinBox_batchSize.value()
perc_data = self.popup_lrfinder_ui.doubleSpinBox_percDataT.value()
nr_events = (perc_data/100)*nr_events_train_total
stepsPerEpoch = np.ceil(nr_events / float(batch_size))
self.popup_lrfinder_ui.spinBox_stepsPerEpoch.setValue(stepsPerEpoch)
update_stepsPerEpoch()
self.popup_lrfinder_ui.spinBox_batchSize.valueChanged.connect(update_stepsPerEpoch)
self.popup_lrfinder_ui.doubleSpinBox_percDataT.valueChanged.connect(update_stepsPerEpoch)
self.popup_lrfinder.show()
def popup_clr_settings(self,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.popup_clrsettings = MyPopup()
item_ui.popup_clrsettings_ui = aid_frontend.Ui_Clr_settings()
item_ui.popup_clrsettings_ui.setupUi(item_ui.popup_clrsettings) #open a popup for lr plotting
##Manual insertion##
item_ui.popup_clrsettings_ui.spinBox_stepSize.setProperty("value", item_ui.clr_settings["step_size"])
item_ui.popup_clrsettings_ui.doubleSpinBox_gamma.setProperty("value", item_ui.clr_settings["gamma"])
def clr_settings_ok():
step_size = int(item_ui.popup_clrsettings_ui.spinBox_stepSize.value())
gamma = float(item_ui.popup_clrsettings_ui.doubleSpinBox_gamma.value())
item_ui.clr_settings["step_size"] = step_size #Number of epochs to fulfill half a cycle
item_ui.clr_settings["gamma"] = gamma #gamma factor for Exponential decrease method (exp_range)
print("Settings for cyclical learning rates were changed.")
#close the popup
item_ui.popup_clrsettings = None
item_ui.popup_clrsettings_ui = None
def clr_settings_cancel():#close the popup
item_ui.popup_clrsettings = None
item_ui.popup_clrsettings_ui = None
item_ui.popup_clrsettings_ui.pushButton_ok.clicked.connect(clr_settings_ok)
item_ui.popup_clrsettings_ui.pushButton_cancel.clicked.connect(clr_settings_cancel)
item_ui.popup_clrsettings.show()
def popup_lr_plot(self,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.popup_lrplot = MyPopup()
item_ui.popup_lrplot_ui = aid_frontend.popup_lrplot()
item_ui.popup_lrplot_ui.setupUi(item_ui.popup_lrplot) #open a popup for lr plotting
#compute total number of epochs that will be fitted
spinBox_NrEpochs = item_ui.spinBox_NrEpochs.value() #my own loop
spinBox_epochs = item_ui.spinBox_epochs.value() #inside model.fit()
nr_epochs = spinBox_NrEpochs*spinBox_epochs
item_ui.popup_lrplot_ui.spinBox_totalEpochs.setValue(nr_epochs)
#Get the number of training examples
SelectedFiles = self.items_clicked()
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
nr_events_train_total = np.sum([int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train])
if nr_events_train_total==0 and item_ui.radioButton_LrConst.isChecked()==False:
#for Cyclical learning rates and Exponential learning rates, the
#number of training images is needed
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no training data. Nr. of training images is required for this plot.")
msg.setWindowTitle("Nr. of training images = 0")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
text_info = ""
if item_ui.radioButton_LrConst.isChecked():
text_info+="Constant learning rate\n"
epochs_plot = np.array(range(nr_epochs))
const_lr = float(self.doubleSpinBox_learningRate.value())
learningrates = np.repeat(const_lr,nr_epochs)
elif item_ui.radioButton_LrCycl.isChecked():
text_info+="Cyclical learning rates\n"
base_lr = float(item_ui.lineEdit_cycLrMin.text())
max_lr = float(item_ui.lineEdit_cycLrMax.text())
batch_size = int(item_ui.spinBox_batchSize.value())
step_size = item_ui.clr_settings["step_size"] #batch updates in a half cycle
step_size_ = step_size*int(np.round(nr_events_train_total / batch_size))#number of steps in one epoch
mode = str(item_ui.comboBox_cycLrMethod.currentText())
clr_iterations = nr_epochs*int(np.round(nr_events_train_total / batch_size))#number of cycles
nr_cycles = (clr_iterations/step_size_)/2.0#number of cycles
gamma = item_ui.clr_settings["gamma"] #gamma factor for the exp_range
#Generate text to diplay the settings used
text_info+="Nr. of training images: "+str(nr_events_train_total)+"\n"
text_info+="base_lr: "+str(base_lr)+"\n"
text_info+="max_lr: "+str(max_lr)+"\n"
text_info+="batch_size: "+str(batch_size)+"\n"
text_info+="mode: "+str(mode)+"\n"
text_info+="gamma: "+str(gamma)+"\n"
text_info+="Nr. of epochs to fulfill one cycle: "+str(2*step_size)+"\n"
#text_info+="Total nr. of lr adjustmend: "+str(step_size_)+"\n"
text_info+="Total nr. of lr adjustments: "+str(clr_iterations)+"\n"
text_info+="Total nr. of cycles: "+str(nr_cycles)+"\n"
#Request the learning rates from the class cyclicLR
clr_iterations = np.arange(clr_iterations)
clr_1 = aid_dl.cyclicLR(base_lr=base_lr,max_lr=max_lr,step_size=step_size_,mode=mode,gamma=gamma)
clr_1.clr_iterations=clr_iterations#pass the number of clr iterations to the class
learningrates = clr_1.clr() #compute the learning rates for each iteration
#convert clr_iterations back to "epochs"
epochs_plot = clr_iterations/int(np.round(nr_events_train_total / batch_size))
elif item_ui.radioButton_LrExpo.isChecked():
text_info+="Exponentially decreased learning rates\n"
initial_lr = float(item_ui.doubleSpinBox_expDecInitLr.value())
decay_steps = int(item_ui.spinBox_expDecSteps.value())
decay_rate = float(item_ui.doubleSpinBox_expDecRate.value())
batch_size = int(item_ui.spinBox_batchSize.value())
text_info+="Nr. of training images: "+str(nr_events_train_total)+"\n"
text_info+="initial_lr: "+str(initial_lr)+"\n"
text_info+="decay_steps: "+str(decay_steps)+"\n"
text_info+="decay_rate: "+str(decay_rate)+"\n"
#epochs_plot = np.array(range(nr_epochs))
epochs_plot = nr_epochs * int(np.round(nr_events_train_total / batch_size))
epochs_plot = np.arange(epochs_plot)
exp_decay = aid_dl.exponentialDecay(initial_lr=initial_lr, decay_steps=decay_steps, decay_rate=decay_rate)
exp_decay.iterations=epochs_plot#pass the number of clr iterations to the class
learningrates = exp_decay.exp_decay()
epochs_plot = epochs_plot/int(np.round(nr_events_train_total / batch_size))
#learningrates = aid_dl.exponentialDecay(epochs_plot,initial_lr=initial_lr, decay_steps=decay_steps, decay_rate=decay_rate)
def refreshPlot():
try: # try to empty the plot
item_ui.popup_lrplot_ui.lr_plot.removeItem(item_ui.lr_line2)
except:
pass
#Get design settings
color = item_ui.popup_lrplot_ui.pushButton_color.palette().button().color()
width = int(item_ui.popup_lrplot_ui.spinBox_lineWidth.value())
color = list(color.getRgb())
color = tuple(color)
pencolor=pg.mkPen(color, width=width)
#define curve and add to plot
item_ui.lr_line2 = pg.PlotCurveItem(x=epochs_plot, y=learningrates,pen=pencolor)
item_ui.popup_lrplot_ui.lr_plot.addItem(item_ui.lr_line2)
refreshPlot()
item_ui.popup_lrplot_ui.pushButton_refreshPlot.clicked.connect(refreshPlot)
item_ui.popup_lrplot_ui.textBrowser_lrSettings.setText(text_info)
item_ui.popup_lrplot.show()
def lossWeights_activated(self,on_or_off,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
if on_or_off==False:#0 means switched OFF
item_ui.lineEdit_lossW.setText("")
item_ui.pushButton_lossW.setEnabled(False)
#this happens when the user activated the expert option "loss weights"
elif on_or_off==True:#2 means switched ON
#Activate button
item_ui.pushButton_lossW.setEnabled(True)
self.lossWeights_popup(listindex)
def lossWeights_popup(self,listindex):
if listindex==-1:
item_ui = self
SelectedFiles = self.items_clicked()
else:
item_ui = self.fittingpopups_ui[listindex]
SelectedFiles = item_ui.SelectedFiles
item_ui.popup_lossW = MyPopup()
item_ui.popup_lossW_ui = aid_frontend.popup_lossweights()
item_ui.popup_lossW_ui.setupUi(item_ui.popup_lossW) #open a popup to show the numbers of events in each class in a table
indices = [SelectedFiles[i]["class"] for i in range(len(SelectedFiles))]
#Initiate the table with 4 columns : this will be ["Index","Nr of cells","Clr","Name"]
item_ui.popup_lossW_ui.tableWidget_lossW.setColumnCount(5)
nr_ind = len(set(indices)) #each index could occur for train and valid
nr_rows = nr_ind
item_ui.popup_lossW_ui.tableWidget_lossW.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class", "Events tot." ,"Events/Epoch", "Events/Epoch[%]", "Loss weight"]
item_ui.popup_lossW_ui.tableWidget_lossW.setHorizontalHeaderLabels(header_labels)
header = item_ui.popup_lossW_ui.tableWidget_lossW.horizontalHeader()
for i in range(len(header_labels)):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
#Fill the table
rowPosition = 0
#Training info
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
nr_events_train_total = np.sum([int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train])
#Total nr of cells for each index
for index in np.unique(indices_train):
colPos = 0 #"Class" #put the index (class!) in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole,str(index))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
#Get the training files of that index
ind = np.where(indices_train==index)[0]
SelectedFiles_train_index = np.array(SelectedFiles_train)[ind]
colPos = 1 #"Events tot."
nr_events = [int(selectedfile["nr_events"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events)))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
colPos = 2 #"Events/Epoch"
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
colPos = 3 #"Events/Epoch[%]"
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole, str(np.round(np.sum(nr_events_epoch)/float(nr_events_train_total),2)))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
colPos = 4 #"Loss weights"
#for each item create a spinbopx (trainability)
spinb = QtWidgets.QDoubleSpinBox(item_ui.popup_lossW_ui.tableWidget_lossW)
spinb.setEnabled(False)
spinb.setMinimum(-99999)
spinb.setMaximum(99999)
spinb.setSingleStep(0.1)
spinb.setValue(1.0) #Default in Keras is "None", which means class_weight=1.0
item_ui.popup_lossW_ui.tableWidget_lossW.setCellWidget(rowPosition, colPos, spinb)
rowPosition += 1
item_ui.popup_lossW_ui.tableWidget_lossW.resizeColumnsToContents()
item_ui.popup_lossW_ui.tableWidget_lossW.resizeRowsToContents()
item_ui.popup_lossW.show()
item_ui.popup_lossW_ui.pushButton_pop_lossW_cancel.clicked.connect(lambda: self.lossW_cancel(listindex))
item_ui.popup_lossW_ui.pushButton_pop_lossW_ok.clicked.connect(lambda: self.lossW_ok(listindex))
item_ui.popup_lossW_ui.comboBox_lossW.currentIndexChanged.connect(lambda on_or_off: self.lossW_comboB(on_or_off,listindex))
def optimizer_change_settings_popup(self,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.popup_optim = MyPopup()
item_ui.popup_optim_ui = aid_frontend.Ui_Form_expt_optim()
item_ui.popup_optim_ui.setupUi(item_ui.popup_optim) #open a popup to show advances settings for optimizer
##Manual insertion##
optimizer_name = item_ui.optimizer_settings["comboBox_optimizer"].lower()
if optimizer_name=='sgd':
item_ui.popup_optim_ui.radioButton_sgd.setChecked(True)
elif optimizer_name=='rmsprop':
item_ui.popup_optim_ui.radioButton_rms.setChecked(True)
elif optimizer_name=='adagrad':
item_ui.popup_optim_ui.radioButton_adagrad.setChecked(True)
elif optimizer_name=='adadelta':
item_ui.popup_optim_ui.radioButton_adadelta.setChecked(True)
elif optimizer_name=='adam':
item_ui.popup_optim_ui.radioButton_adam.setChecked(True)
elif optimizer_name=='adamax':
item_ui.popup_optim_ui.radioButton_adamax.setChecked(True)
elif optimizer_name=='nadam':
item_ui.popup_optim_ui.radioButton_nadam.setChecked(True)
item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_sgd"])
item_ui.popup_optim_ui.doubleSpinBox_sgd_momentum.setValue(item_ui.optimizer_settings["doubleSpinBox_sgd_momentum"])
item_ui.popup_optim_ui.checkBox_sgd_nesterov.setChecked(item_ui.optimizer_settings["checkBox_sgd_nesterov"])
item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_rmsprop"])
item_ui.popup_optim_ui.doubleSpinBox_rms_rho.setValue(item_ui.optimizer_settings["doubleSpinBox_rms_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adam.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adam"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta1.setValue(item_ui.optimizer_settings["doubleSpinBox_adam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta2.setValue(item_ui.optimizer_settings["doubleSpinBox_adam_beta2"])
item_ui.popup_optim_ui.checkBox_adam_amsgrad.setChecked(item_ui.optimizer_settings["checkBox_adam_amsgrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_nadam"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta1.setValue(item_ui.optimizer_settings["doubleSpinBox_nadam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta2.setValue(item_ui.optimizer_settings["doubleSpinBox_nadam_beta2"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adadelta"])
item_ui.popup_optim_ui.doubleSpinBox_adadelta_rho.setValue(item_ui.optimizer_settings["doubleSpinBox_adadelta_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adagrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adamax"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta1.setValue(item_ui.optimizer_settings["doubleSpinBox_adamax_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta2.setValue(item_ui.optimizer_settings["doubleSpinBox_adamax_beta2"])
def change_lr(lr):
item_ui.doubleSpinBox_learningRate.setValue(lr)
item_ui.doubleSpinBox_expDecInitLr.setValue(lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adam.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.valueChanged.connect(change_lr)
def change_optimizer(optimizer_name):
index = item_ui.comboBox_optimizer.findText(optimizer_name, QtCore.Qt.MatchFixedString)
if index >= 0:
item_ui.comboBox_optimizer.setCurrentIndex(index)
#get the learning rate for that optimizer
lr = item_ui.optimizer_settings["doubleSpinBox_lr_"+optimizer_name.lower()]
change_lr(lr)
item_ui.popup_optim_ui.radioButton_adam.toggled.connect(lambda: change_optimizer("Adam"))
item_ui.popup_optim_ui.radioButton_sgd.toggled.connect(lambda: change_optimizer("SGD"))
item_ui.popup_optim_ui.radioButton_rms.toggled.connect(lambda: change_optimizer("RMSprop"))
item_ui.popup_optim_ui.radioButton_adagrad.toggled.connect(lambda: change_optimizer("Adagrad"))
item_ui.popup_optim_ui.radioButton_adadelta.toggled.connect(lambda: change_optimizer("Adadelta"))
item_ui.popup_optim_ui.radioButton_adamax.toggled.connect(lambda: change_optimizer("Adamax"))
item_ui.popup_optim_ui.radioButton_nadam.toggled.connect(lambda: change_optimizer("Nadam"))
def ok():
doubleSpinBox_lr_sgd = float(item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.value())
doubleSpinBox_sgd_momentum = float(item_ui.popup_optim_ui.doubleSpinBox_sgd_momentum.value())
checkBox_sgd_nesterov = bool(item_ui.popup_optim_ui.checkBox_sgd_nesterov.isChecked())
doubleSpinBox_lr_rmsprop = float(item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.value())
doubleSpinBox_rms_rho = float(item_ui.popup_optim_ui.doubleSpinBox_rms_rho.value())
doubleSpinBox_lr_adam = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adam.value())
doubleSpinBox_adam_beta1 = float(item_ui.popup_optim_ui.doubleSpinBox_adam_beta1.value())
doubleSpinBox_adam_beta2 = float(item_ui.popup_optim_ui.doubleSpinBox_adam_beta2.value())
checkBox_adam_amsgrad = bool(item_ui.popup_optim_ui.checkBox_adam_amsgrad.isChecked())
doubleSpinBox_lr_adadelta = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.value())
doubleSpinBox_adadelta_rho = float(item_ui.popup_optim_ui.doubleSpinBox_adadelta_rho.value())
doubleSpinBox_lr_nadam = float(item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.value())
doubleSpinBox_nadam_beta1 = float(item_ui.popup_optim_ui.doubleSpinBox_nadam_beta1.value())
doubleSpinBox_nadam_beta2 = float(item_ui.popup_optim_ui.doubleSpinBox_nadam_beta2.value())
doubleSpinBox_lr_adagrad = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.value())
doubleSpinBox_lr_adamax = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.value())
doubleSpinBox_adamax_beta2 = float(item_ui.popup_optim_ui.doubleSpinBox_adamax_beta2.value())
doubleSpinBox_adamax_beta1 = float(item_ui.popup_optim_ui.doubleSpinBox_adamax_beta1.value())
item_ui.optimizer_settings["doubleSpinBox_lr_sgd"] = doubleSpinBox_lr_sgd
item_ui.optimizer_settings["doubleSpinBox_sgd_momentum"] = doubleSpinBox_sgd_momentum
item_ui.optimizer_settings["checkBox_sgd_nesterov"] = checkBox_sgd_nesterov
item_ui.optimizer_settings["doubleSpinBox_lr_rmsprop"] = doubleSpinBox_lr_rmsprop
item_ui.optimizer_settings["doubleSpinBox_rms_rho"] = doubleSpinBox_rms_rho
item_ui.optimizer_settings["doubleSpinBox_lr_adam"] = doubleSpinBox_lr_adam
item_ui.optimizer_settings["doubleSpinBox_adam_beta1"] = doubleSpinBox_adam_beta1
item_ui.optimizer_settings["doubleSpinBox_adam_beta2"] = doubleSpinBox_adam_beta2
item_ui.optimizer_settings["checkBox_adam_amsgrad"] = checkBox_adam_amsgrad
item_ui.optimizer_settings["doubleSpinBox_lr_adadelta"] = doubleSpinBox_lr_adadelta
item_ui.optimizer_settings["doubleSpinBox_adadelta_rho"] = doubleSpinBox_adadelta_rho
item_ui.optimizer_settings["doubleSpinBox_lr_nadam"] = doubleSpinBox_lr_nadam
item_ui.optimizer_settings["doubleSpinBox_nadam_beta1"] = doubleSpinBox_nadam_beta1
item_ui.optimizer_settings["doubleSpinBox_nadam_beta2"] = doubleSpinBox_nadam_beta2
item_ui.optimizer_settings["doubleSpinBox_lr_adagrad"] = doubleSpinBox_lr_adagrad
item_ui.optimizer_settings["doubleSpinBox_lr_adamax"] = doubleSpinBox_lr_adamax
item_ui.optimizer_settings["doubleSpinBox_adamax_beta1"] = doubleSpinBox_adamax_beta1
item_ui.optimizer_settings["doubleSpinBox_adamax_beta2"] = doubleSpinBox_adamax_beta2
#close the popup
item_ui.popup_optim = None
item_ui.popup_optim_ui = None
print("Advanced settings for optimizer were changed.")
def cancel():#close the popup
item_ui.popup_optim = None
item_ui.popup_optim_ui = None
def reset():
print("Reset optimizer settings (in UI). To accept, click OK")
optimizer_default = aid_dl.get_optimizer_settings()
item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.setValue(optimizer_default["doubleSpinBox_lr_sgd"])
item_ui.popup_optim_ui.doubleSpinBox_sgd_momentum.setValue(optimizer_default["doubleSpinBox_sgd_momentum"])
item_ui.popup_optim_ui.checkBox_sgd_nesterov.setChecked(optimizer_default["checkBox_sgd_nesterov"])
item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.setValue(optimizer_default["doubleSpinBox_lr_rmsprop"])
item_ui.popup_optim_ui.doubleSpinBox_rms_rho.setValue(optimizer_default["doubleSpinBox_rms_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adam.setValue(optimizer_default["doubleSpinBox_lr_adam"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta1.setValue(optimizer_default["doubleSpinBox_adam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta2.setValue(optimizer_default["doubleSpinBox_adam_beta2"])
item_ui.popup_optim_ui.checkBox_adam_amsgrad.setChecked(optimizer_default["checkBox_adam_amsgrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.setValue(optimizer_default["doubleSpinBox_lr_nadam"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta1.setValue(optimizer_default["doubleSpinBox_nadam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta2.setValue(optimizer_default["doubleSpinBox_nadam_beta2"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.setValue(optimizer_default["doubleSpinBox_lr_adadelta"])
item_ui.popup_optim_ui.doubleSpinBox_adadelta_rho.setValue(optimizer_default["doubleSpinBox_adadelta_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.setValue(optimizer_default["doubleSpinBox_lr_adagrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.setValue(optimizer_default["doubleSpinBox_lr_adamax"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta1.setValue(optimizer_default["doubleSpinBox_adamax_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta2.setValue(optimizer_default["doubleSpinBox_adamax_beta2"])
item_ui.popup_optim_ui.pushButton_ok.clicked.connect(ok)
item_ui.popup_optim_ui.pushButton_cancel.clicked.connect(cancel)
item_ui.popup_optim_ui.pushButton_reset.clicked.connect(reset)
item_ui.popup_optim.show()
def onLayoutChange(self,app):
#Get the text of the triggered layout
layout_trig = (self.sender().text()).split(" layout")[0]
layout_current = Default_dict["Layout"]
if layout_trig == layout_current:
self.statusbar.showMessage(layout_current+" layout is already in use",2000)
return
elif layout_trig == "Normal":
#Change Layout in Defaultdict to "Normal", such that next start will use Normal layout
Default_dict["Layout"] = "Normal"
app.setStyleSheet("")
#Standard is with tooltip
self.actionTooltipOnOff.setChecked(True)
elif layout_trig == "Dark":
#Change Layout in Defaultdict to "Dark", such that next start will use Dark layout
Default_dict["Layout"] = "Dark"
dir_layout = os.path.join(dir_root,"layout_dark.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/ColinDuquesnoy/QDarkStyleSheet/blob/master/qdarkstyle/style.qss
f = f.read()
app.setStyleSheet(f)
#Standard is with tooltip
self.actionTooltipOnOff.setChecked(True)
elif layout_trig == "DarkOrange":
#Change Layout in Defaultdict to "Dark", such that next start will use Dark layout
Default_dict["Layout"] = "DarkOrange"
dir_layout = os.path.join(dir_root,"layout_darkorange.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/nphase/qt-ping-grapher/blob/master/resources/darkorange.stylesheet
f = f.read()
app.setStyleSheet(f)
#Standard is with tooltip
self.actionTooltipOnOff.setChecked(True)
#Save the layout to Default_dict
with open(dir_settings, 'w') as f:
json.dump(Default_dict,f)
def onTooltipOnOff(self,app):
#what is the current layout?
if bool(self.actionLayout_Normal.isChecked())==True: #use normal layout
if bool(self.actionTooltipOnOff.isChecked())==True: #with tooltips
app.setStyleSheet("")
elif bool(self.actionTooltipOnOff.isChecked())==False: #no tooltips
app.setStyleSheet("""QToolTip {
opacity: 0
}""")
elif bool(self.actionLayout_Dark.isChecked())==True: #use dark layout
if bool(self.actionTooltipOnOff.isChecked())==True: #with tooltips
dir_layout = os.path.join(dir_root,"layout_dark.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/ColinDuquesnoy/QDarkStyleSheet/blob/master/qdarkstyle/style.qss
f = f.read()
app.setStyleSheet(f)
elif bool(self.actionTooltipOnOff.isChecked())==False: #no tooltips
dir_layout = os.path.join(dir_root,"layout_dark_notooltip.txt")#dir to settings
f = open(dir_layout, "r")#I obtained the layout file from: https://github.com/ColinDuquesnoy/QDarkStyleSheet/blob/master/qdarkstyle/style.qss
f = f.read()
app.setStyleSheet(f)
elif bool(self.actionLayout_DarkOrange.isChecked())==True: #use darkorange layout
if bool(self.actionTooltipOnOff.isChecked())==True: #with tooltips
dir_layout = os.path.join(dir_root,"layout_darkorange.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/nphase/qt-ping-grapher/blob/master/resources/darkorange.stylesheet
f = f.read()
app.setStyleSheet(f)
elif bool(self.actionTooltipOnOff.isChecked())==False: #no tooltips
dir_layout = os.path.join(dir_root,"layout_darkorange_notooltip.txt")#dir to settings
f = open(dir_layout, "r")
f = f.read()
app.setStyleSheet(f)
def onIconThemeChange(self):
#Get the text of the triggered icon theme
icontheme_trig = self.sender().text()
icontheme_currenent = Default_dict["Icon theme"]
if icontheme_trig == icontheme_currenent:
self.statusbar.showMessage(icontheme_currenent+" is already in use",2000)
return
elif icontheme_trig == "Icon theme 1":
Default_dict["Icon theme"] = "Icon theme 1"
self.statusbar.showMessage("Icon theme 1 will be used after restart",2000)
elif icontheme_trig == "Icon theme 2":
Default_dict["Icon theme"] = "Icon theme 2"
self.statusbar.showMessage("Icon theme 2 will be used after restart",2000)
#Save the layout to Default_dict
with open(dir_settings, 'w') as f:
json.dump(Default_dict,f)
def items_clicked(self):
#This function checks, which data has been checked on table_dragdrop and returns the necessary data
rowCount = self.table_dragdrop.rowCount()
#Collect urls to files that are checked
SelectedFiles = []
for rowPosition in range(rowCount):
#get the filename/path
rtdc_path = str(self.table_dragdrop.cellWidget(rowPosition, 0).text())
#get the index (celltype) of it
index = int(self.table_dragdrop.cellWidget(rowPosition, 1).value())
#is it checked for train?
cb_t = self.table_dragdrop.item(rowPosition, 2)
#How many Events contains dataset in total?
nr_events = int(self.table_dragdrop.item(rowPosition, 5).text())
#how many cells/epoch during training or validation?
nr_events_epoch = int(self.table_dragdrop.item(rowPosition, 6).text())
#should the dataset be randomized (shuffled?)
shuffle = bool(self.table_dragdrop.item(rowPosition, 8).checkState())
#should the images be zoomed in/out by a factor?
zoom_factor = float(self.table_dragdrop.item(rowPosition, 9).text())
#should xtra_data be used for training?
xtra_in = bool(self.table_dragdrop.item(rowPosition, 10).checkState())
if cb_t.checkState() == QtCore.Qt.Checked and nr_events_epoch>0: #add to training files if the user wants more than 0 images per epoch
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
hash_ = aid_bin.hashfunction(rtdc_path)#rtdc_ds.hash
features = list(rtdc_ds["events"].keys())
nr_images = rtdc_ds["events"]["image"].len()
SelectedFiles.append({"rtdc_ds":rtdc_ds,"rtdc_path":rtdc_path,"features":features,"nr_images":nr_images,"class":index,"TrainOrValid":"Train","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"hash":hash_,"xtra_in":xtra_in})
cb_v = self.table_dragdrop.item(rowPosition, 3)
if cb_v.checkState() == QtCore.Qt.Checked and nr_events_epoch>0:
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
hash_ = aid_bin.hashfunction(rtdc_path)
features = list(rtdc_ds["events"].keys())
nr_images = rtdc_ds["events"]["image"].len()
SelectedFiles.append({"rtdc_ds":rtdc_ds,"rtdc_path":rtdc_path,"features":features,"nr_images":nr_images,"class":index,"TrainOrValid":"Valid","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"hash":hash_,"xtra_in":xtra_in})
return SelectedFiles
def items_available(self):
"""
Function grabs all information from table_dragdrop. Checked and Unchecked
Does not load rtdc_ds (save time)
"""
rowCount = self.table_dragdrop.rowCount()
#Collect urls to files that are checked
SelectedFiles = []
for rowPosition in range(rowCount):
#get the filename/path
rtdc_path = str(self.table_dragdrop.cellWidget(rowPosition, 0).text())
#get the index (celltype) of it
index = int(self.table_dragdrop.cellWidget(rowPosition, 1).value())
#How many Events contains dataset in total?
nr_events = int(self.table_dragdrop.item(rowPosition, 5).text())
#how many cells/epoch during training or validation?
nr_events_epoch = int(self.table_dragdrop.item(rowPosition, 6).text())
#should the dataset be randomized (shuffled?)
shuffle = bool(self.table_dragdrop.item(rowPosition, 8).checkState())
#should the images be zoomed in/out by a factor?
zoom_factor = float(self.table_dragdrop.item(rowPosition, 9).text())
#should xtra_data be used for training?
xtra_in = bool(self.table_dragdrop.item(rowPosition, 10).checkState())
SelectedFiles.append({"rtdc_path":rtdc_path,"class":index,"TrainOrValid":"NotSpecified","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"xtra_in":xtra_in})
return SelectedFiles
def items_clicked_no_rtdc_ds(self):
#This function checks, which data has been checked on table_dragdrop and returns the necessary data
rowCount = self.table_dragdrop.rowCount()
#Collect urls to files that are checked
SelectedFiles = []
for rowPosition in range(rowCount):
#get the filename/path
rtdc_path = str(self.table_dragdrop.cellWidget(rowPosition, 0).text())
#get the index (celltype) of it
index = int(self.table_dragdrop.cellWidget(rowPosition, 1).value())
#How many Events contains dataset in total?
nr_events = int(self.table_dragdrop.item(rowPosition, 5).text())
#how many cells/epoch during training or validation?
nr_events_epoch = int(self.table_dragdrop.item(rowPosition, 6).text())
#should the dataset be randomized (shuffled?)
shuffle = bool(self.table_dragdrop.item(rowPosition, 8).checkState())
#should the images be zoomed in/out by a factor?
zoom_factor = float(self.table_dragdrop.item(rowPosition, 9).text())
#should xtra_data be used for training?
xtra_in = bool(self.table_dragdrop.item(rowPosition, 10).checkState())
#is it checked for train?
cb_t = self.table_dragdrop.item(rowPosition, 2)
if cb_t.checkState() == QtCore.Qt.Checked and nr_events_epoch>0: #add to training files if the user wants more than 0 images per epoch
#SelectedFiles.append({"nr_images":nr_events,"class":index,"TrainOrValid":"Train","nr_events":nr_events,"nr_events_epoch":nr_events_epoch})
SelectedFiles.append({"rtdc_path":rtdc_path,"class":index,"TrainOrValid":"Train","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"xtra_in":xtra_in})
cb_v = self.table_dragdrop.item(rowPosition, 3)
if cb_v.checkState() == QtCore.Qt.Checked and nr_events_epoch>0:
#SelectedFiles.append({"nr_images":nr_events,"class":index,"TrainOrValid":"Valid","nr_events":nr_events,"nr_events_epoch":nr_events_epoch})
SelectedFiles.append({"rtdc_path":rtdc_path,"class":index,"TrainOrValid":"Valid","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"xtra_in":xtra_in})
return SelectedFiles
def uncheck_if_zero(self,item):
#If the Nr. of epochs is changed to zero:
#uncheck the dataset for train/valid
row = item.row()
col = item.column()
#if the user changed Nr. of cells per epoch to zero
if col==6 and int(item.text())==0:
#get the checkstate of the coresponding T/V
cb_t = self.table_dragdrop.item(row, 2)
if cb_t.checkState() == QtCore.Qt.Checked:
cb_t.setCheckState(False)
cb_v = self.table_dragdrop.item(row, 3)
if cb_v.checkState() == QtCore.Qt.Checked:
cb_v.setCheckState(False)
def item_click(self,item):
colPosition = item.column()
rowPosition = item.row()
#if Shuffle was clicked (col=8), check if this checkbox is not deactivated
if colPosition==8:
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==False:
rtdc_path = self.table_dragdrop.cellWidget(rowPosition, 0).text()
rtdc_path = str(rtdc_path)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
nr_images = rtdc_ds["events"]["image"].len()
columnPosition = 6
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, nr_images)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==True:
#Inspect this table item. If shuffle was checked before, it will be grayed out. Invert normal cell then
item = self.table_dragdrop.item(rowPosition, 6)
item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
if len(self.ram)>0:
self.statusbar.showMessage("Make sure to update RAM (->Edit->Data to RAM now) after changing Data-set",2000)
self.ram = dict() #clear the ram, since the data was changed
self.dataOverviewOn()
#When data is clicked, always reset the validation set (only important for 'Assess Model'-tab)
self.ValidationSet = None
self.Metrics = dict() #Also reset the metrics
def dataOverviewOn(self):
if self.groupBox_DataOverview.isChecked()==True:
if self.threadpool_single_queue == 0:
SelectedFiles = self.items_clicked_no_rtdc_ds()
self.update_data_overview(SelectedFiles)
self.update_data_overview_2(SelectedFiles)
def dataOverviewOn_OnChange(self,item):
#When a value is entered in Events/Epoch and enter is hit
#there is no update of the table called
if self.groupBox_DataOverview.isChecked()==True:
if self.threadpool_single_queue == 0:
rowPosition = item.row()
colPosition = item.column()
if colPosition==6:#one when using the spinbox (Class),or when entering a new number in "Events/Epoch", the table is not updated.
#get the new value
nr_cells = self.table_dragdrop.cellWidget(rowPosition, colPosition)
if nr_cells==None:
return
else:
SelectedFiles = self.items_clicked_no_rtdc_ds()
self.update_data_overview(SelectedFiles)
self.update_data_overview_2(SelectedFiles)
def update_data_overview(self,SelectedFiles):
#Check if there are custom class names (determined by user)
rows = self.tableWidget_Info.rowCount()
self.classes_custom = [] #by default assume there are no custom classes
classes_custom_bool = False
if rows>0:#if >0, then there is already a table existing
classes,self.classes_custom = [],[]
for row in range(rows):
try:
class_ = self.tableWidget_Info.item(row,0).text()
if class_.isdigit():
classes.append(class_)#get the classes
except:
pass
try:
self.classes_custom.append(self.tableWidget_Info.item(row,3).text())#get the classes
except:
pass
classes = np.unique(classes)
if len(classes)==len(self.classes_custom):#equal in length
same = [i for i, j in zip(classes, self.classes_custom) if i == j] #which items are identical?
if len(same)==0:
#apparently there are custom classes! Save them
classes_custom_bool = True
if len(SelectedFiles)==0:#reset the table
#Table1
#Prepare a table in tableWidget_Info
self.tableWidget_Info.setColumnCount(0)
self.tableWidget_Info.setRowCount(0)
self.tableWidget_Info.setColumnCount(4)
header = self.tableWidget_Info.horizontalHeader()
header_labels = ["Class","Events tot.","Events/Epoch","Name"]
self.tableWidget_Info.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
return
#Prepare a table in tableWidget_Info
self.tableWidget_Info.setColumnCount(0)
self.tableWidget_Info.setRowCount(0)
indices = [SelectedFiles[i]["class"] for i in range(len(SelectedFiles))]
self.tableWidget_Info.setColumnCount(4)
header = self.tableWidget_Info.horizontalHeader()
nr_ind = len(set(indices)) #each index could occur for train and valid
nr_rows = 2*nr_ind+2 #add two rows for intermediate headers (Train/Valid)
self.tableWidget_Info.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class","Events tot.","Events/Epoch","Name"]
self.tableWidget_Info.setHorizontalHeaderLabels(header_labels)
#self.tableWidget_Info.resizeColumnsToContents()
header = self.tableWidget_Info.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
#Training info
rowPosition = 0
self.tableWidget_Info.setSpan(rowPosition, 0, 1, 2)
item = QtWidgets.QTableWidgetItem("Train. data")
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.tableWidget_Info.setItem(rowPosition, 0, item)
rowPosition += 1
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
classes = np.unique(indices_train)
if len(classes)==len(self.classes_custom):
classes_custom_bool = True
else:
classes_custom_bool = False
#display information for each individual class
for index_ in range(len(classes)):
#for index in np.unique(indices_train):
index = classes[index_]
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info.setItem(rowPosition, 0, item)
#Get the training files of that index
ind = np.where(indices_train==index)[0]
SelectedFiles_train_index = np.array(SelectedFiles_train)[ind]
#Total nr of cells for each class
nr_events = [int(selectedfile["nr_events"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events)))
self.tableWidget_Info.setItem(rowPosition, 1, item)
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info.setItem(rowPosition, 2, item)
item = QtWidgets.QTableWidgetItem()
if classes_custom_bool==False:
item.setData(QtCore.Qt.EditRole,str(index))
else:
item.setData(QtCore.Qt.EditRole,self.classes_custom[index_])
self.tableWidget_Info.setItem(rowPosition, 3, item)
rowPosition += 1
#Validation info
self.tableWidget_Info.setSpan(rowPosition, 0, 1, 2)
item = QtWidgets.QTableWidgetItem("Val. data")
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.tableWidget_Info.setItem(rowPosition, 0, item)
rowPosition += 1
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
#Total nr of cells for each index
for index in np.unique(indices_valid):
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info.setItem(rowPosition, 0, item)
#Get the validation files of that index
ind = np.where(indices_valid==index)[0]
SelectedFiles_valid_index = np.array(SelectedFiles_valid)[ind]
nr_events = [int(selectedfile["nr_events"]) for selectedfile in SelectedFiles_valid_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events)))
self.tableWidget_Info.setItem(rowPosition, 1, item)
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_valid_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info.setItem(rowPosition, 2, item)
rowPosition += 1
self.tableWidget_Info.resizeColumnsToContents()
self.tableWidget_Info.resizeRowsToContents()
def update_data_overview_2(self,SelectedFiles):
if len(SelectedFiles)==0:
#Table2
self.tableWidget_Info_2.setColumnCount(0)
self.tableWidget_Info_2.setRowCount(0)
#In case user specified X_valid and y_valid before, delete it again:
self.ValidationSet = None
self.Metrics = dict() #Also reset the metrics
#Initiate the table with 4 columns : this will be ["Index","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setColumnCount(4)
header_labels = ["Class","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info_2.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
return
#Prepare a table in tableWidget_Info
self.tableWidget_Info_2.setColumnCount(0)
self.tableWidget_Info_2.setRowCount(0)
#In case user specified X_valid and y_valid before, delete it again:
self.ValidationSet = None
self.Metrics = dict() #Also reset the metrics
indices = [SelectedFiles[i]["class"] for i in range(len(SelectedFiles))]
#Initiate the table with 4 columns : this will be ["Index","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setColumnCount(4)
nr_ind = len(set(indices)) #each index could occur for train and valid
nr_rows = nr_ind
self.tableWidget_Info_2.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info_2.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
rowPosition = 0
#Validation info
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
#Total nr of cells for each index
for index in np.unique(indices_valid):
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info_2.setItem(rowPosition, 0, item)
#Get the validation files of that index
ind = np.where(indices_valid==index)[0]
SelectedFiles_valid_index = np.array(SelectedFiles_valid)[ind]
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_valid_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info_2.setItem(rowPosition, 1, item)
#Column for color
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, "")
item.setBackground(QtGui.QColor(self.colorsQt[index]))
self.tableWidget_Info_2.setItem(rowPosition, 2, item)
#Column for User specified name
item = QtWidgets.QTableWidgetItem()
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info_2.setItem(rowPosition, 3, item)
rowPosition += 1
self.tableWidget_Info_2.resizeColumnsToContents()
self.tableWidget_Info_2.resizeRowsToContents()
def tableWidget_Info_2_click(self,item):
if item is not None:
if item.column()==2:
tableitem = self.tableWidget_Info_2.item(item.row(), item.column())
color = QtGui.QColorDialog.getColor()
if color.getRgb()==(0, 0, 0, 255):#no black!
return
else:
tableitem.setBackground(color)
def tableWidget_HistoryItems_dclick(self,item):
if item is not None:
tableitem = self.tableWidget_HistoryItems.item(item.row(), item.column())
if str(tableitem.text())!="Show saved only":
color = QtGui.QColorDialog.getColor()
if color.getRgb()==(0, 0, 0, 255):#no black!
return
else:
tableitem.setBackground(color)
self.update_historyplot()
def select_all(self,col):
"""
Check/Uncheck items on table_dragdrop
"""
apply_at_col = [2,3,8,10]
if col not in apply_at_col:
return
#otherwiese continue
rows = range(self.table_dragdrop.rowCount()) #Number of rows of the table
tableitems = [self.table_dragdrop.item(row, col) for row in rows]
checkStates = [tableitem.checkState() for tableitem in tableitems]
#Checked?
checked = [state==QtCore.Qt.Checked for state in checkStates]
if set(checked)=={True}:#all are checked!
#Uncheck all!
for tableitem in tableitems:
tableitem.setCheckState(QtCore.Qt.Unchecked)
else:#otherwise check all
for tableitem in tableitems:
tableitem.setCheckState(QtCore.Qt.Checked)
#If shuffle column was clicked do some extra
if col==8:
for rowPosition in rows:
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==False:
rtdc_path = self.table_dragdrop.cellWidget(rowPosition, 0).text()
rtdc_path = str(rtdc_path)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
nr_images = rtdc_ds["events"]["image"].len()
columnPosition = 6
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, nr_images)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==True:
#Inspect this table item. If shuffle was checked before, it will be grayed out. Invert normal cell then
item = self.table_dragdrop.item(rowPosition, 6)
item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
#Finally, update the Data-Overview-Box
self.dataOverviewOn()#update the overview box
def item_dclick(self, item):
#Check/Uncheck if item is from column 2 or 3
tableitem = self.table_dragdrop.item(item.row(), item.column())
if item.column() in [2,3]:
#If the item is unchecked ->check it!
if tableitem.checkState() == QtCore.Qt.Unchecked:
tableitem.setCheckState(QtCore.Qt.Checked)
#else, the other way around
elif tableitem.checkState() == QtCore.Qt.Checked:
tableitem.setCheckState(QtCore.Qt.Unchecked)
#Show example image if item on column 0 was dclicked
if item.column() == 0:
#rtdc_path = str(item.text())
#rtdc_path = tableitem.text()
rtdc_path = self.table_dragdrop.cellWidget(item.row(), item.column()).text()
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
nr_images = rtdc_ds["events"]["image"].len()
ind = np.random.randint(0,nr_images)
img = rtdc_ds["events"]["image"][ind]
if len(img.shape)==2:
height, width = img.shape
channels = 1
elif len(img.shape)==3:
height, width, channels = img.shape
else:
print("Invalid image format: "+str(img.shape))
return
self.w = MyPopup()
self.gridLayout_w = QtWidgets.QGridLayout(self.w)
self.label_image = QtWidgets.QLabel(self.w)
self.label_cropimage = QtWidgets.QLabel(self.w)
#zoom image such that longest side is 512
zoom_factor = np.round(float(512.0/np.max(img.shape)),0)
#Get the order, specified in Image processing->Zoom Order
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
#Convert to corresponding cv2 zooming method
zoom_interpol_method = aid_img.zoom_arguments_scipy2cv(zoom_factor,zoom_order)
img_zoomed = cv2.resize(img, dsize=None,fx=zoom_factor, fy=zoom_factor, interpolation=eval(zoom_interpol_method))
if channels==1:
height, width = img_zoomed.shape
if channels==3:
height, width, _ = img_zoomed.shape
if channels==1:
qi=QtGui.QImage(img_zoomed.data, width, height,width, QtGui.QImage.Format_Indexed8)
if channels==3:
qi = QtGui.QImage(img_zoomed.data,img_zoomed.shape[1], img_zoomed.shape[0], QtGui.QImage.Format_RGB888)
self.label_image.setPixmap(QtGui.QPixmap.fromImage(qi))
self.gridLayout_w.addWidget(self.label_image, 1,1)
#get the location of the cell
rowPosition = item.row()
pix = float(self.table_dragdrop.item(rowPosition, 7).text())
#pix = rtdc_ds.config["imaging"]["pixel size"]
PIX = pix
pos_x,pos_y = rtdc_ds["events"]["pos_x"][ind]/PIX,rtdc_ds["events"]["pos_y"][ind]/PIX
cropsize = self.spinBox_imagecrop.value()
y1 = int(round(pos_y))-cropsize/2
x1 = int(round(pos_x))-cropsize/2
y2 = y1+cropsize
x2 = x1+cropsize
#Crop the image
img_crop = img[int(y1):int(y2),int(x1):int(x2)]
#zoom image such that the height gets the same as for non-cropped img
zoom_factor = float(img_zoomed.shape[0])/img_crop.shape[0]
if zoom_factor == np.inf:
factor = 1
if self.actionVerbose.isChecked()==True:
print("Set resize factor to 1. Before, it was: "+str(factor))
#Get the order, specified in Image processing->Zoom Order
zoom_order = str(self.comboBox_zoomOrder.currentText()) #
zoom_interpol_method = aid_img.zoom_arguments_scipy2cv(zoom_factor,zoom_order)
img_crop = cv2.resize(img_crop, dsize=None,fx=zoom_factor, fy=zoom_factor, interpolation=eval(zoom_interpol_method))
if channels==1:
height, width = img_crop.shape
qi=QtGui.QImage(img_crop.data, width, height,width, QtGui.QImage.Format_Indexed8)
if channels==3:
height, width, _ = img_crop.shape
qi = QtGui.QImage(img_crop.data,width, height, QtGui.QImage.Format_RGB888)
self.label_cropimage.setPixmap(QtGui.QPixmap.fromImage(qi))
self.gridLayout_w.addWidget(self.label_cropimage, 1,2)
self.w.show()
def get_norm_from_modelparafile(self):
#Get the normalization method from a modelparafile
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open meta-data', Default_dict["Path of last model"],"AIDeveloper Meta file (*meta.xlsx)")
filename = filename[0]
if len(str(filename))==0:
return
norm = pd.read_excel(filename,sheet_name='Parameters')["Normalization"]
norm = str(norm[0])
index = self.comboBox_Normalization.findText(norm, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_Normalization.setCurrentIndex(index)
self.w.close()
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Invalid normalization method was specified.\
Likely this version of AIDeveloper does not support that normalization method\
Please define a valid normalization method")
msg.setDetailedText("Supported normalization methods are: "+"\n".join(self.norm_methods))
msg.setWindowTitle("Invalid Normalization method")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("Invalid Normalization method")
def update_plottingTab(self):
#Get current text of combobox (url to data set)
url = str(self.comboBox_chooseRtdcFile.currentText())
if len(url)==0:
return
failed,rtdc_ds = aid_bin.load_rtdc(url)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
keys = list(rtdc_ds["events"].keys())
#find keys of image_channels
keys_0d,keys_1d,keys_2d = [],[],[]
for key in keys:
if type(rtdc_ds["events"][key])==h5py._hl.dataset.Dataset:
shape = rtdc_ds["events"][key].shape
if len(shape)==1: #zero-dimensional info (single number per cell)
keys_0d.append(key)
elif len(shape)==2: #one-dimensional info (multiple numbers per cell)
keys_1d.append(key)
elif len(shape)==3: #two-dimensional info (images)
keys_2d.append(key)
#add the traces to the 1d features
if "trace" in keys:
for key_trace in list(rtdc_ds["events"]["trace"].keys()):
keys_1d.append(key_trace+" (RTFDC)")
#Sort keys_2d: "image" first; "mask" last
keys_2d.insert(0, keys_2d.pop(keys_2d.index("image")))
keys_2d.insert(len(keys_2d), keys_2d.pop(keys_2d.index("mask")))
#Fill those feautues in the comboboxes at the scatterplot
self.comboBox_featurex.addItems(keys_0d)
self.comboBox_featurey.addItems(keys_0d)
#check if masks or contours are available
cont_available = "mask" in keys or "contour" in keys
self.checkBox_contour.setEnabled(cont_available)
self.checkBox_contour.setChecked(cont_available)
#Centroid is always available (prerequisite for AIDeveloper)
self.checkBox_centroid.setEnabled(True)
self.checkBox_centroid.setChecked(True)
#Intialize option menus
self.contour_options_nr = 0
self.centroid_options_nr = 0
self.show_1d_options_nr = 0
self.show_2d_options_nr = 0
self.init_contour_options(keys_2d)
self.init_centroid_options(keys_1d)
self.init_2d_options(keys_2d)
self.init_1d_options(keys_1d)
def init_contour_options(self,keys_2d):
print("Work in progress")
# self.popup_layercontrols = MyPopup()
# self.popup_layercontrols_ui = frontend.Ui_LayerControl()
# self.popup_layercontrols_ui.setupUi(self.popup_layercontrols,keys_2d) #open a popup
def init_centroid_options(self,keys_image):
print("Work in progress")
# self.popup_centroid_options = MyPopup()
# self.popup_centroid_options_ui = aid_frontend.Ui_centroid_options()
# self.popup_centroid_options_ui.setupUi(self.popup_centroid_options,keys_image) #open a popup
def init_2d_options(self,keys_2d):
#Initialize 2d Option Menu. Range values are saved and manipulated here
self.popup_2dOptions = MyPopup()
self.popup_2dOptions_ui = aid_frontend.Ui_2dOptions()
self.popup_2dOptions_ui.setupUi(self.popup_2dOptions,keys_2d) #open a popup
def init_1d_options(self,keys_1d):
self.popup_1dOptions = MyPopup()
self.popup_1dOptions_ui = aid_frontend.Ui_1dOptions()
self.popup_1dOptions_ui.setupUi(self.popup_1dOptions,keys_1d) #open a popup
def show_contour_options():
self.contour_options_nr += 1
print("Work in progress")
def show_centroid_options(self):
print("Work in progress")
self.centroid_options_nr += 1
#self.popup_layercontrols_ui.pushButton_close.clicked.connect(self.visualization_settings)
if self.centroid_options_nr==1:
for iterator in range(len(self.popup_layercontrols_ui.spinBox_minChX)):
print(1)
def show_2d_options(self):
self.show_2d_options_nr += 1
#self.popup_layercontrols_ui.pushButton_close.clicked.connect(self.visualization_settings)
if self.show_2d_options_nr==1:
for iterator in range(len(self.popup_2dOptions_ui.spinBox_minChX)):
slider = self.popup_2dOptions_ui.horizontalSlider_chX[iterator]
slider.startValueChanged.connect(lambda _, b=None: self.put_image(ind=b))
slider.endValueChanged.connect(lambda _, b=None: self.put_image(ind=b))
checkBox = self.popup_2dOptions_ui.checkBox_show_chX[iterator]
checkBox.stateChanged.connect(lambda _, b=None: self.put_image(ind=b))
comboBox = self.popup_2dOptions_ui.comboBox_cmap_chX[iterator]
comboBox.currentIndexChanged.connect(lambda _, b=None: self.put_image(ind=b))
checkBox = self.popup_2dOptions_ui.checkBox_auto_chX[iterator]
checkBox.stateChanged.connect(lambda _, b=None: self.put_image(ind=b))
self.popup_2dOptions.show()
def show_1d_options(self):
self.show_1d_options_nr += 1
#self.popup_layercontrols_ui.pushButton_close.clicked.connect(self.visualization_settings)
if self.show_1d_options_nr==1:
for iterator in range(len(self.popup_1dOptions_ui.checkBox_show_chX)):
checkBox = self.popup_1dOptions_ui.checkBox_show_chX[iterator]
checkBox.stateChanged.connect(lambda _, b=None: self.put_line(index=b))
comboBox = self.popup_1dOptions_ui.comboBox_cmap_chX[iterator]
comboBox.clicked.connect(lambda _, b=None: self.put_line(index=b))
self.popup_1dOptions.show()
def activate_deactivate_spinbox(self,newstate):
#get the checkstate of the Input model crop
if newstate==2:
#activate the spinbox
self.spinBox_imagecrop.setEnabled(True)
elif newstate==0:
self.spinBox_imagecrop.setEnabled(False)
def gray_or_rgb_augmentation(self,index):
#When Color-Mode is changed:
#Get the new colormode:
new_colormode = self.colorModes[index]
#when the new Color Mode is Grayscale, disable saturation and hue augmentation
if new_colormode=="Grayscale":
self.checkBox_contrast.setEnabled(True)
self.checkBox_contrast.setChecked(True)
self.doubleSpinBox_contrastLower.setEnabled(True)
self.doubleSpinBox_contrastHigher.setEnabled(True)
self.checkBox_saturation.setEnabled(False)
self.checkBox_saturation.setChecked(False)
self.doubleSpinBox_saturationLower.setEnabled(False)
self.doubleSpinBox_saturationHigher.setEnabled(False)
self.checkBox_hue.setEnabled(False)
self.checkBox_hue.setChecked(False)
self.doubleSpinBox_hueDelta.setEnabled(False)
elif new_colormode=="RGB":
self.checkBox_contrast.setEnabled(True)
self.checkBox_contrast.setChecked(True)
self.doubleSpinBox_contrastLower.setEnabled(True)
self.doubleSpinBox_contrastHigher.setEnabled(True)
self.checkBox_saturation.setEnabled(True)
self.checkBox_saturation.setChecked(True)
self.doubleSpinBox_saturationLower.setEnabled(True)
self.doubleSpinBox_saturationHigher.setEnabled(True)
self.checkBox_hue.setEnabled(True)
self.checkBox_hue.setChecked(True)
self.doubleSpinBox_hueDelta.setEnabled(True)
else:
print("Invalid Color Mode")
def onClick(self,points,pointermethod):
#delete the last item if the user selected already one:
try:
self.scatter_xy.removeItem(self.point_clicked)
except:
pass
if pointermethod=="point":
points = points[0]
p = points.pos()
clicked_x, clicked_y = p.x(), p.y()
a1 = (clicked_x)/float(np.max(self.feature_x))
a2 = (clicked_y)/float(np.max(self.feature_y))
#Which is the closest scatter point?
dist = np.sqrt(( a1-self.scatter_x_norm )**2 + ( a2-self.scatter_y_norm )**2)
index = np.argmin(dist)
elif pointermethod=="index":
index = points
clicked_x = self.feature_x[index]
clicked_y = self.feature_y[index]
self.point_clicked = pg.ScatterPlotItem()
self.point_clicked.setData([clicked_x], [clicked_y],brush="r",symbol='o',symbolPen="w",size=15)
self.scatter_xy.addItem(self.point_clicked)
#self.scatter_xy.plot([clicked_x], [clicked_y],pen=None,symbol='o',symbolPen='w',clear=False)
self.point_was_selected_before = True
#I dont care if the user click or used the slider->always adjust spinbox and slider without running the onChange functions
self.changedbyuser = False
self.spinBox_cellInd.setValue(index)
self.horizontalSlider_cellInd.setValue(index)
self.changedbyuser = True
self.put_image(index)
self.put_line(index)
def put_image(self,ind):
#check that the user is looking at the plotting tab
curr_ind = self.tabWidget_Modelbuilder.currentIndex()
if curr_ind!=3:
return
try:
self.widget_showCell.removeItem(self.dot)
except:
pass
try:
self.widget_showCell.removeItem(self.plot_contour)
except:
pass
if ind==None:
index = int(self.spinBox_cellInd.value())
else:
index = ind
rtdc_ds = self.rtdc_ds
#which channel shouldbe displayed
channels = len(self.popup_2dOptions_ui.spinBox_minChX)
keys_2d = [self.popup_2dOptions_ui.label_layername_chX[i].text() for i in range(channels)]
#Define variable on self that carries all image information
if channels==1:
img = np.expand_dims(rtdc_ds["events"]["image"][index],-1)
elif channels>1:
img = np.stack( [rtdc_ds["events"][key][index] for key in keys_2d] ,axis=-1)
if len(img.shape)==2:
channels = 1
elif len(img.shape)==3:
height, width, channels = img.shape
else:
print("Invalid image format: "+str(img.shape))
return
color_mode = str(self.comboBox_GrayOrRGB_2.currentText())
if color_mode=="Grayscale": #Slider allows to show individual layers: each is shown as grayscale
img = img
elif color_mode == "RGB":#User can define, which layers are shown in R,G,and B
#Retrieve the setting from self.popup_layercontrols_ui
ui_item = self.popup_2dOptions_ui
layer_names = [obj.text() for obj in ui_item.label_layername_chX]
layer_active = [obj.isChecked() for obj in ui_item.checkBox_show_chX]
layer_range = [obj.getRange() for obj in ui_item.horizontalSlider_chX]
layer_auto = [obj.isChecked() for obj in ui_item.checkBox_auto_chX]
layer_cmap = [obj.currentText() for obj in ui_item.comboBox_cmap_chX]
#Assemble the image according to the settings in self.popup_layercontrols_ui
#Find activated layers for each color:
ind_active_r,ind_active_g,ind_active_b = [],[],[]
for ch in range(len(layer_cmap)):
#for color,active in zip(layer_cmap,layer_active):
if layer_cmap[ch]=="Red" and layer_active[ch]==True:
ind_active_r.append(ch)
if layer_cmap[ch]=="Green" and layer_active[ch]==True:
ind_active_g.append(ch)
if layer_cmap[ch]=="Blue" and layer_active[ch]==True:
ind_active_b.append(ch)
if len(ind_active_r)>0:
img_ch = img[:,:,np.array(ind_active_r)]
layer_range_ch = np.array(layer_range)[np.array(ind_active_r)] #Range of all red channels
layer_auto_ch = np.array(layer_auto)[np.array(ind_active_r)] #Automatic range
#Scale each red channel according to layer_range
for layer in range(img_ch.shape[-1]):
limits,auto = layer_range_ch[layer],layer_auto_ch[layer]
img_ch[:,:,layer] = aid_img.clip_contrast(img=img_ch[:,:,layer],low=limits[0],high=limits[1],auto=auto)
img_r = np.mean(img_ch,axis=-1).astype(np.uint8)
else:
img_r = np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
if len(ind_active_g)>0:
img_ch = img[:,:,np.array(ind_active_g)]
layer_range_ch = np.array(layer_range)[np.array(ind_active_g)] #Range of all red channels
layer_auto_ch = np.array(layer_auto)[np.array(ind_active_g)] #Automatic range
#Scale each red channel according to layer_range
for layer in range(img_ch.shape[-1]):
limits,auto = layer_range_ch[layer],layer_auto_ch[layer]
img_ch[:,:,layer] = aid_img.clip_contrast(img=img_ch[:,:,layer],low=limits[0],high=limits[1],auto=auto)
img_g = np.mean(img_ch,axis=-1).astype(np.uint8)
else:
img_g = np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
if len(ind_active_b)>0:
img_ch = img[:,:,np.array(ind_active_b)]
layer_range_ch = np.array(layer_range)[np.array(ind_active_b)] #Range of all red channels
layer_auto_ch = np.array(layer_auto)[np.array(ind_active_b)] #Automatic range
#Scale each red channel according to layer_range
for layer in range(img_ch.shape[-1]):
limits,auto = layer_range_ch[layer],layer_auto_ch[layer]
img_ch[:,:,layer] = aid_img.clip_contrast(img=img_ch[:,:,layer],low=limits[0],high=limits[1],auto=auto)
img_b = np.mean(img_ch,axis=-1).astype(np.uint8)
else:
img_b = np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
#Assemble image by stacking all layers
img = np.stack([img_r,img_g,img_b],axis=-1)
#Get the levels of the previous frame
levels_init = self.widget_showCell.getLevels()
if levels_init==(0,1.0):
levels_init = (0,255)
#Get the layer index of the previous frame
index_ = self.widget_showCell.currentIndex
if color_mode=="Grayscale":
self.widget_showCell.setImage(img.T,autoRange=False,levels=levels_init,levelMode="mono")
self.widget_showCell.setCurrentIndex(index_)
elif color_mode=="RGB":
self.widget_showCell.setImage(np.swapaxes(img,0,1))
pix = rtdc_ds.attrs["imaging:pixel size"]
pos_x = rtdc_ds["events"]["pos_x"][index]/pix
pos_y = rtdc_ds["events"]["pos_y"][index]/pix
#Indicate the centroid of the cell
if self.checkBox_centroid.isChecked():
self.dot = pg.CircleROI(pos=(pos_x-2, pos_y-2), size=4, pen=QtGui.QPen(QtCore.Qt.red, 0.1), movable=False)
self.widget_showCell.getView().addItem(self.dot)
self.widget_showCell.show()
if self.checkBox_contour.isChecked():
#get the contour based on the mask
contour,_ = cv2.findContours(rtdc_ds["events"]["mask"][index], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contour = contour[0][:,0,:]
self.plot_contour = pg.PlotCurveItem(contour[:,0],contour[:,1],width=6,pen="r")
self.widget_showCell.getView().addItem(self.plot_contour)
def put_line(self,index):
curr_ind = self.tabWidget_Modelbuilder.currentIndex()
if curr_ind!=3:
return
#Fluorescence traces: clear first
try:
self.plot_fl_trace_.clear() #clear the plot
self.plot_fl_trace.clear() #clear the plot
except:
pass
if index==None:
index = int(self.spinBox_cellInd.value())
rtdc_ds = self.rtdc_ds
feature_keys = list(rtdc_ds.keys())
#which features shouldbe displayed
features_nr = len(self.popup_1dOptions_ui.checkBox_show_chX)
keys_1d = [self.popup_1dOptions_ui.checkBox_show_chX[i].text() for i in range(features_nr)]
keys_1d_on = [self.popup_1dOptions_ui.checkBox_show_chX[i].isChecked() for i in range(features_nr)]
colors = [self.popup_1dOptions_ui.comboBox_cmap_chX[i].palette().button().color() for i in range(features_nr)]
colors = [list(c.getRgb()) for c in colors]
colors = [tuple(c) for c in colors]
ind = np.where(np.array(keys_1d_on)==True)[0]
keys_1d = list(np.array(keys_1d)[ind])
colors = list(np.array(colors)[ind])
for key_1d,color in zip(keys_1d,colors):
if key_1d.endswith(" (RTFDC)"):
key_1d = key_1d.split(" (RTFDC)")[0]
trace_flx = rtdc_ds["events"]["trace"][key_1d][index]
pencolor = pg.mkPen(color, width=2)
self.plot_fl_trace_ = self.plot_fl_trace.plot(range(len(trace_flx)),trace_flx,width=6,pen=pencolor,clear=False)
# if "fl1_max" in feature_keys and "fl1_pos" in feature_keys: #if also the maxima and position of the max are available: use it to put the region accordingly
# fl1_max,fl1_pos = rtdc_ds["events"]["fl1_max"][index],rtdc_ds["events"]["fl1_pos"][index]
else:
values = rtdc_ds["events"][key_1d][index]
pencolor = pg.mkPen(color, width=2)
self.plot_fl_trace_ = self.plot_fl_trace.plot(range(len(trace_flx)),trace_flx,width=6,pen=pencolor,clear=False)
#get the maximum of [fl1_max,fl2_max,fl3_max] and put the region to the corresponding fl-position
# ind = np.argmax(np.array([fl1_max,fl2_max,fl3_max]))
# region_pos = np.array([fl1_pos,fl2_pos,fl3_pos])[ind] #this region is already given in us. translate this back to range
# peak_height = np.array([fl1_max,fl2_max,fl3_max])[ind]
# sample_rate = rtdc_ds.attrs["fluorescence:sample rate"]
# fl_pos_ind = float((sample_rate*region_pos))/1E6 #
# #Indicate the used flx_max and flx_pos by a scatter dot
# self.peak_dot = self.plot_fl_trace.plot([float(fl_pos_ind)], [float(peak_height)],pen=None,symbol='o',symbolPen='w',clear=False)
def onScatterClick(self,event, points):
pointermethod = 'point'
if self.changedbyuser:
self.onClick(points,pointermethod)
def onIndexChange(self,index):
pointermethod = 'index'
if self.changedbyuser:
self.onClick(index,pointermethod)
#Set self.changedbyuser to False and change the spinbox and slider. changedbyuser=False prevents onClick function
self.changedbyuser = False
self.spinBox_cellInd.setValue(index)
self.horizontalSlider_cellInd.setValue(index)
self.changedbyuser = True
def updateScatterPlot(self):
#If the Plot is updated, delete the dot in the cell-image
try:
self.widget_showCell.removeItem(self.dot)
except:
pass
try:
self.scatter_xy.removeItem(self.point_clicked)
except:
pass
self.point_was_selected_before = False
#read url from current comboBox_chooseRtdcFile
url = str(self.comboBox_chooseRtdcFile.currentText())
if len(url)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please use the 'Build' tab to load files first")
msg.setWindowTitle("No file selected")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
failed,rtdc_ds = aid_bin.load_rtdc(url)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.rtdc_ds = rtdc_ds
feature_x_name = str(self.comboBox_featurex.currentText())
feature_y_name = str(self.comboBox_featurey.currentText())
features = list(self.rtdc_ds["events"].keys())
if feature_x_name in features:
self.feature_x = self.rtdc_ds["events"][feature_x_name]
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Feature on x axis is not contained in data set")
msg.setWindowTitle("Invalid x feature")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if feature_y_name in features:
self.feature_y = self.rtdc_ds["events"][feature_y_name]
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Feature on y axis is not contained in data set")
msg.setWindowTitle("Invalid y feature")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.changedbyuser = True #variable used to prevent plotting if spinbox or slider is changed programmatically
#density estimation
kde = self.comboBox_kde.currentText()
if kde=="None":
brush = "b"
elif kde=="2d Histogram" or kde=="Gauss":
if kde=="2d Histogram":
density = aid_bin.kde_histogram(np.array(self.feature_x), np.array(self.feature_y))
elif kde=="Gauss":
density = aid_bin.kde_gauss(np.array(self.feature_x), np.array(self.feature_y))
density_min,density_max = np.min(density),np.max(density)
density = (density-density_min)/density_max
# define colormap
brush = []
from pyqtgraph.graphicsItems.GradientEditorItem import Gradients
cmap = pg.ColorMap(*zip(*Gradients["viridis"]["ticks"]))
for k in density:
brush.append(cmap.mapToQColor(k))
#Add plot
#self.scatter = self.scatter_xy.plot(np.array(self.feature_x), np.array(self.feature_y),symbolPen=None,pen=None,symbol='o',brush=brush[100],clear=True)
#try to remove existing scatterplot
try:
self.scatter_xy.removeItem(self.scatter)
except:
print("Not cleared")
self.scatter = pg.ScatterPlotItem()
self.scatter.setData(np.array(self.feature_x), np.array(self.feature_y),brush=brush,symbolPen=None,pen=None,symbol='o',size=10)
self.scatter_xy.addItem(self.scatter)
#pen=None,symbol='o',symbolPen=None,symbolBrush=density,clear=True)
self.scatter.sigClicked.connect(self.onScatterClick) #When scatterplot is clicked, show the desired cell
#Fill histogram for x-axis; widget_histx
y,x = np.histogram(self.feature_x, bins='auto')
self.hist_x.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),clear=True)
#Manually clear y hist first. Only clear=True did not do the job
self.hist_y.clear()
#Fill histogram for y-axis; widget_histy
y,x = np.histogram(self.feature_y, bins='auto')
curve = pg.PlotCurveItem(-1.*x, y, stepMode=True, fillLevel=0, brush=(0, 0, 255, 150),clear=True)
curve.rotate(-90)
self.hist_y.addItem(curve)
self.scatter_x_norm = (np.array(self.feature_x).astype(np.float32))/float(np.max(self.feature_x))
self.scatter_y_norm = (np.array(self.feature_y).astype(np.float32))/float(np.max(self.feature_y))
#Adjust the horizontalSlider_cellInd and spinBox_cellInd
self.horizontalSlider_cellInd.setSingleStep(1)
self.horizontalSlider_cellInd.setMinimum(0)
self.horizontalSlider_cellInd.setMaximum(len(self.feature_x)-1)
self.spinBox_cellInd.setMinimum(0)
self.spinBox_cellInd.setMaximum(len(self.feature_x)-1)
def selectPeakPos(self):
#Check if self.region exists
#If not, show a message and return:
if not hasattr(self, 'region'):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no region defined yet")
msg.setWindowTitle("No region defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Try to get the user defined peak position
if not hasattr(self, 'new_peak'):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no peak defined yet")
msg.setWindowTitle("No peak defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#how much rows are already in table?
rowcount = self.tableWidget_showSelectedPeaks.rowCount()
self.tableWidget_showSelectedPeaks.setRowCount(rowcount+1)
rowPosition = rowcount
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.new_peak["fl_max"]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 0, item)
item = QtWidgets.QTableWidgetItem()
fl_pos_us = float(float(self.new_peak["fl_pos"])*float(1E6))/float(self.rtdc_ds.attrs["fluorescence:sample rate"])
item.setData(QtCore.Qt.EditRole,fl_pos_us)
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 1, item)
item = QtWidgets.QTableWidgetItem()
pos_x_um = float(self.new_peak["pos_x"])*float(self.rtdc_ds.attrs["imaging:pixel size"])
item.setData(QtCore.Qt.EditRole,pos_x_um)
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 2, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.new_peak["fl_pos"]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 3, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.new_peak["pos_x"]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 4, item)
self.tableWidget_showSelectedPeaks.resizeColumnsToContents()
self.tableWidget_showSelectedPeaks.resizeRowsToContents()
#Update the widget_showSelectedPeaks
self.update_peak_plot()
def selectPeakRange(self):
new_region = self.region.getRegion()
region_width = np.max(new_region) - np.min(new_region) #in [samples]
sample_rate = self.rtdc_ds.attrs["fluorescence:sample rate"]
region_width = (float(region_width)/float(sample_rate))*1E6 #range[samples]*(1/sample_rate[1/s]) = range[s]; div by 1E6 to conver to us
self.region_width = region_width
#put this in the table
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Range [us]")
self.tableWidget_peakModelParameters.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.region_width))
self.tableWidget_peakModelParameters.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
def onPeaksPlotClick(self,event, points):
points = points[0]
p = points.pos()
clicked_x, clicked_y = p.x(), p.y()
a1 = (clicked_x)/float(np.max(self.Pos_x))
a2 = (clicked_y)/float(np.max(self.Fl_pos))
#Which is the closest scatter point?
pos_x_norm = self.Pos_x/np.max(self.Pos_x)#normalized pos_x
fl_pos_norm = self.Fl_pos/np.max(self.Fl_pos)#normalized fl_pos
dist = np.sqrt(( a1-pos_x_norm )**2 + ( a2-fl_pos_norm )**2)
index = np.argmin(dist)
#Highlight this row
self.tableWidget_showSelectedPeaks.selectRow(index)
#Delete the highlighted rows
# try:
# self.actionRemoveSelectedPeaks_function()
# except:
# pass
def update_peak_plot(self):
#This function reads tableWidget_showSelectedPeaks and
#fits a function and
#puts fitting parameters on tableWidget_peakModelParameters
#read the data on tableWidget_showSelectedPeaks
rowcount = self.tableWidget_showSelectedPeaks.rowCount()
Fl_pos,Pos_x = [],[]
for row in range(rowcount):
line = [float(self.tableWidget_showSelectedPeaks.item(row, col).text()) for col in [1,2]] #use the values for [us] and [um]
Fl_pos.append(line[0])
Pos_x.append(line[1])
self.Fl_pos = np.array(Fl_pos)
self.Pos_x = np.array(Pos_x)
self.selectedPeaksPlotPlot = self.selectedPeaksPlot.plot(self.Pos_x, self.Fl_pos,pen=None,symbol='o',symbolPen=None,symbolBrush='b',clear=True)
#if user clicks in the plot, show him the corresponding row in the table
self.selectedPeaksPlotPlot.sigPointsClicked.connect(self.onPeaksPlotClick)
if not hasattr(self, 'region_width'): #if there was no region_width defined yet...
#to get a reasonable initial range, use 20% of the nr. of availeble samples
samples_per_event = self.rtdc_ds.attrs["fluorescence:samples per event"]
self.region_width = 0.2*samples_per_event #width of the region in samples
#Convert to SI unit:
sample_rate = self.rtdc_ds.attrs["fluorescence:sample rate"]
self.region_width = (float(self.region_width)/float(sample_rate))*1E6 #range[samples]*(1/sample_rate[1/s]) = range[s]; div by 1E6 to convert to us
#which model should be used?
if str(self.comboBox_peakDetModel.currentText()) == "Linear dependency and max in range" and len(Pos_x)>1:
slope,intercept = np.polyfit(Pos_x, Fl_pos,deg=1) #Linear FIT, y=mx+n; y=FL_pos[us] x=Pos_x[um]
xlin = np.round(np.linspace(np.min(Pos_x),np.max(Pos_x),25),1)
ylin = intercept + slope*xlin
self.selectedPeaksPlot.plot(xlin, ylin,width=6,pen='b',clear=False)
#Put info to tableWidget_peakModelParameters
self.tableWidget_peakModelParameters.setColumnCount(2)
self.tableWidget_peakModelParameters.setRowCount(5)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Model")
self.tableWidget_peakModelParameters.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Linear dependency and max in range")
self.tableWidget_peakModelParameters.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Range [us]")
self.tableWidget_peakModelParameters.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.region_width))
self.tableWidget_peakModelParameters.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Intercept [us]")
self.tableWidget_peakModelParameters.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(intercept))
self.tableWidget_peakModelParameters.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Slope [us/um]")
self.tableWidget_peakModelParameters.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(slope))
self.tableWidget_peakModelParameters.setItem(3, 1, item)
#Calculate velocity
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Velocity[m/s]")
self.tableWidget_peakModelParameters.setItem(4, 0, item)
velocity = float(1.0/float(slope))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(velocity))
self.tableWidget_peakModelParameters.setItem(4, 1, item)
def addHighestXPctPeaks(self):
#how many x%?
x_pct = float(self.doubleSpinBox_highestXPercent.value())
#Get the flourescence traces and maxima/positions of maxima
#->it could be that the user did not yet load the dataset:
if not hasattr(self,"rtdc_ds"):
#run the function updateScatterPlot()
self.updateScatterPlot()
trace = self.rtdc_ds["events"]["trace"]
fl_keys = list(trace.keys())
fl1_max,fl1_pos,fl2_max,fl2_pos,fl3_max,fl3_pos,pos_x = [],[],[],[],[],[],[]
for i in range(len(fl_keys)):
if "fl1_median" in fl_keys[i] and self.checkBox_fl1.isChecked():
for index in range(len(trace[fl_keys[i]])):
trace_flx = trace[fl_keys[i]][index]
ind = np.argmax(trace_flx)
fl1_max.append(trace_flx[ind])
fl1_pos.append(ind)
#Get the x% maxima
fl1_max = np.array(fl1_max)
fl1_pos = np.array(fl1_pos)
sorter = np.argsort(fl1_max)[::-1]
sorter = sorter[0:int(x_pct/100.0*len(fl1_max))]
fl1_max = fl1_max[sorter]
fl1_pos = fl1_pos[sorter]
pos_x.append(self.rtdc_ds["events"]["pos_x"][sorter])
elif "fl2_median" in fl_keys[i] and self.checkBox_fl2.isChecked():
for index in range(len(trace[fl_keys[i]])):
trace_flx = trace[fl_keys[i]][index]
ind = np.argmax(trace_flx)
fl2_max.append(trace_flx[ind])
fl2_pos.append(ind)
#Get the x% maxima
fl2_max = np.array(fl2_max)
fl2_pos = np.array(fl2_pos)
sorter = np.argsort(fl2_max)[::-1]
sorter = sorter[0:int(x_pct/100.0*len(fl2_max))]
fl2_max = fl2_max[sorter]
fl2_pos = fl2_pos[sorter]
pos_x.append(self.rtdc_ds["events"]["pos_x"][sorter])
elif "fl3_median" in fl_keys[i] and self.checkBox_fl3.isChecked():
for index in range(len(trace[fl_keys[i]])):
trace_flx = trace[fl_keys[i]][index]
ind = np.argmax(trace_flx)
fl3_max.append(trace_flx[ind])
fl3_pos.append(ind)
#Get the x% maxima
fl3_max = np.array(fl3_max)
fl3_pos = np.array(fl3_pos)
sorter = np.argsort(fl3_max)[::-1]
sorter = sorter[0:int(x_pct/100.0*len(fl3_max))]
fl3_max = fl3_max[sorter]
fl3_pos = fl3_pos[sorter]
pos_x.append(self.rtdc_ds["events"]["pos_x"][sorter])
#Add fl1 fl2 and fl3 information
flx_max = np.array(list(fl1_max)+list(fl2_max)+list(fl3_max))
flx_pos = np.array(list(fl1_pos)+list(fl2_pos)+list(fl3_pos))
pos_x_um = np.concatenate(np.atleast_2d(np.array(pos_x)))
pix = self.rtdc_ds.attrs["imaging:pixel size"]
pos_x = pos_x_um/pix #convert from um to pix
rowcount = self.tableWidget_showSelectedPeaks.rowCount()
self.tableWidget_showSelectedPeaks.setRowCount(rowcount+len(flx_max))
for i in range(len(flx_max)):
rowPosition = rowcount+i
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(flx_max[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 0, item)
item = QtWidgets.QTableWidgetItem()
fl_pos_us = float(float(flx_pos[i])*float(1E6))/float(self.rtdc_ds.attrs["fluorescence:sample rate"] )
item.setData(QtCore.Qt.EditRole,fl_pos_us)
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 1, item)
item = QtWidgets.QTableWidgetItem()
#pos_x_um = float(pos_x[i])*float(self.rtdc_ds.config["imaging"]["pixel size"])
item.setData(QtCore.Qt.EditRole,float(pos_x_um[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 2, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(flx_pos[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 3, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(pos_x[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 4, item)
#Update the widget_showSelectedPeaks
self.update_peak_plot()
def savePeakDetModel(self):
#Get tableWidget_peakModelParameters and write it to excel file
#Get filename from user:
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save peak fitting model', Default_dict["Path of last model"],"Excel file (*.xlsx)")
filename = filename[0]
if len(filename)==0:
return
#add the suffix .csv
if not filename.endswith(".xlsx"):
filename = filename +".xlsx"
table = self.tableWidget_peakModelParameters
cols = table.columnCount()
header = range(cols)
rows = table.rowCount()
model_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
model_df.iloc[i, j] = table.item(i, j).text()
except:
model_df.iloc[i, j] = np.nan
table = self.tableWidget_showSelectedPeaks
cols = table.columnCount()
header = [table.horizontalHeaderItem(col).text() for col in range(cols)]
rows = table.rowCount()
peaks_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
peaks_df.iloc[i, j] = table.item(i, j).text()
except:
peaks_df.iloc[i, j] = np.nan
writer = pd.ExcelWriter(filename, engine='openpyxl')
#Used files go to a separate sheet on the MetaFile.xlsx
pd.DataFrame().to_excel(writer,sheet_name='Model') #initialize empty Sheet
model_df.to_excel(writer,sheet_name='Model') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='Peaks') #initialize empty Sheet
peaks_df.to_excel(writer,sheet_name='Peaks')
writer.save()
writer.close()
def loadPeakDetModel(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open peak fitting model', Default_dict["Path of last model"],"Excel file (*.xlsx)")
filename = filename[0]
if len(str(filename))==0:
return
peak_model_df = pd.read_excel(filename,sheet_name='Model')
model = peak_model_df.iloc[0,1]
if model=="Linear dependency and max in range":
#set the combobox accordingly
index = self.comboBox_peakDetModel.findText(model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_peakDetModel.setCurrentIndex(index)
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Could not find a valid model in the chosen file. Did you accidentially load a session or history file?!")
msg.setWindowTitle("No valid model found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
range_ = float(peak_model_df.iloc[1,1])
intercept = float(peak_model_df.iloc[2,1])
slope = float(peak_model_df.iloc[3,1])
velocity = float(peak_model_df.iloc[4,1])
#put the information in the table
xlin = np.round(np.linspace(np.min(0),np.max(100),25),1)
ylin = intercept + slope*xlin
self.selectedPeaksPlot.plot(xlin, ylin,width=6,pen='b',clear=False)
#Put info to tableWidget_peakModelParameters
self.tableWidget_peakModelParameters.setColumnCount(2)
self.tableWidget_peakModelParameters.setRowCount(5)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Model")
self.tableWidget_peakModelParameters.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Linear dependency and max in range")
self.tableWidget_peakModelParameters.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Range [us]")
self.tableWidget_peakModelParameters.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(range_))
self.tableWidget_peakModelParameters.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Intercept [us]")
self.tableWidget_peakModelParameters.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(intercept))
self.tableWidget_peakModelParameters.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Slope [us/um]")
self.tableWidget_peakModelParameters.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(slope))
self.tableWidget_peakModelParameters.setItem(3, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Velocity[m/s]")
self.tableWidget_peakModelParameters.setItem(4, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(velocity))
self.tableWidget_peakModelParameters.setItem(4, 1, item)
def applyPeakModel_and_export(self):
#On which files should the action be performed?
Files = []
if self.radioButton_exportAll.isChecked():
#Grab all items of comboBox_chooseRtdcFile
Files = [self.comboBox_chooseRtdcFile.itemText(i) for i in range(self.comboBox_chooseRtdcFile.count())]
else:
file = self.comboBox_chooseRtdcFile.currentText()
Files.append(str(file))
#Get the model from tableWidget_peakModelParameters
table = self.tableWidget_peakModelParameters
cols = table.columnCount()
header = range(cols)
rows = table.rowCount()
model_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
model_df.iloc[i, j] = table.item(i, j).text()
except:
model_df.iloc[i, j] = np.nan
model = model_df.iloc[0,1]
if model == "Linear dependency and max in range":
range_us = float(model_df.iloc[1,1]) #[us]
intercept_us = float(model_df.iloc[2,1])
slope_us_um = float(model_df.iloc[3,1])
#velocity_m_s = float(model_df.iloc[4,1])
#Get a directory from the user!
folder = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select directory', Default_dict["Path of last model"])
if len(folder)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Invalid directory")
msg.setWindowTitle("Invalid directory")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
for rtdc_path in Files:
path, rtdc_file = os.path.split(rtdc_path)
savename = os.path.join(folder,rtdc_file)
#Avoid to save to an existing file:
addon = 1
while os.path.isfile(savename):
savename = savename.split(".rtdc")[0]
if addon>1:
savename = savename.split("_"+str(addon-1))[0]
savename = savename+"_"+str(addon)+".rtdc"
addon += 1
print("Saving to : "+savename)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Convert quantities to [index]
sample_rate = rtdc_ds.attrs["fluorescence:sample rate"]
range_ = (range_us*float(sample_rate))/1E6 #range was given in us->Divide by 1E6 to get to s and then multiply by the sample rate
# #check if an rtdc_ds is already chosen:
# if not hasattr(self,'rtdc_ds'):
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("No measurement chosen yet. Use 'Update' button")
# msg.setWindowTitle("No measurement")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
# return
trace = rtdc_ds["events"]["trace"]
fl_keys = list(trace.keys()) #Which traces are available
fl1_max,fl1_pos,fl2_max,fl2_pos,fl3_max,fl3_pos,pos_x = [],[],[],[],[],[],[]
#Iterate over the available cells
pos_x = rtdc_ds["events"]["pos_x"] #is already given in [um]
indices = range(len(pos_x))
if model == "Linear dependency and max in range":
#Use the linear model to get the estimated location of the fluorescence peaks
fl_peak_position_us = intercept_us+slope_us_um*pos_x
#Convert to index
fl_peak_position_ = (fl_peak_position_us*float(sample_rate))/1E6
#Now we have the estimated peak position of each cell. Look at the traces on these spots
def ind_to_us(x):
return x*1E6/sample_rate
#iterate over the cells:
for cellindex in range(len(pos_x)):
#Iterate over the availble traces
for i in range(len(fl_keys)):
if "_median" in fl_keys[i]:
trace_flx = trace[fl_keys[i]][cellindex]
trace_pos = np.array(range(len(trace_flx)))
left = int(fl_peak_position_[cellindex]-range_/2.0)
right = int(fl_peak_position_[cellindex]+range_/2.0)
trace_flx_range = trace_flx[left:right]
trace_pos_range = trace_pos[left:right]
ind = np.argmax(trace_flx_range)
if "fl1_median" in fl_keys[i]:
fl1_max.append(trace_flx_range[ind])
fl1_pos.append(ind_to_us(trace_pos_range[ind]))
if "fl2_median" in fl_keys[i]:
fl2_max.append(trace_flx_range[ind])
fl2_pos.append(ind_to_us(trace_pos_range[ind]))
if "fl3_median" in fl_keys[i]:
fl3_max.append(trace_flx_range[ind])
fl3_pos.append(ind_to_us(trace_pos_range[ind]))
#Save those new fluorescence features into free spots in .rtdc file
#Those names can be found via dclab.dfn.feature_names called (userdef0...userdef9)
#TODO (dont use dclab anymore for saving)
#But just in case anyone uses that function?!
#get metadata of the dataset
meta = {}
# only export configuration meta data (no user-defined config)
for sec in dclab.definitions.CFG_METADATA:
if sec in ["fmt_tdms"]:
# ignored sections
continue
if sec in rtdc_ds.config:
meta[sec] = rtdc_ds.config[sec].copy()
#features = rtdc_ds._events.keys() #Get the names of the online features
compression = 'gzip'
nev = len(rtdc_ds)
#["Overwrite Fl_max and Fl_pos","Save to userdef"]
features = list(rtdc_ds["events"].keys())
if str(self.comboBox_toFlOrUserdef.currentText())=='Save to userdef':
features = features+["userdef"+str(i) for i in range(10)]
with dclab.rtdc_dataset.write_hdf5.write(path_or_h5file=savename,meta=meta, mode="append") as h5obj:
# write each feature individually
for feat in features:
# event-wise, because
# - tdms-based datasets don't allow indexing with numpy
# - there might be memory issues
if feat == "contour":
cont_list = [rtdc_ds["events"]["contour"][ii] for ii in indices]
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"contour": cont_list},
mode="append",
compression=compression)
elif feat == "userdef0":
if "fl1_median" in fl_keys:
print("writing fl1_max to userdef0")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef0": np.array(fl1_max)},
mode="append",
compression=compression)
elif feat == "userdef1":
if "fl2_median" in fl_keys:
print("writing fl2_max to userdef1")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef1": np.array(fl2_max)},
mode="append",
compression=compression)
elif feat == "userdef2":
if "fl3_median" in fl_keys:
print("writing fl3_max to userdef2")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef2": np.array(fl3_max)},
mode="append",
compression=compression)
elif feat == "userdef3":
if "fl1_pos" in features:
print("writing fl1_pos to userdef3")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef3": np.array(fl1_pos)},
mode="append",
compression=compression)
elif feat == "userdef4":
if "fl2_pos" in features:
print("writing fl2_pos to userdef4")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef4": np.array(fl2_pos)},
mode="append",
compression=compression)
elif feat == "userdef5":
if "fl3_pos" in features:
print("writing fl3_pos to userdef5")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef5": np.array(fl3_pos)},
mode="append",
compression=compression)
elif feat in ["userdef"+str(i) for i in range(5,10)]:
pass
elif feat == "fl1_max":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl1_max")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl1_max)},mode="append",compression=compression)
elif feat == "fl2_max":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl2_max")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl2_max)},mode="append",compression=compression)
elif feat == "fl3_max":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl3_max")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl3_max)},mode="append",compression=compression)
elif feat == "fl1_pos":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl1_pos")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl1_pos)},mode="append",compression=compression)
elif feat == "fl2_pos":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl2_pos")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl2_pos)},mode="append",compression=compression)
elif feat == "fl3_pos":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl3_pos")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl3_pos)},mode="append",compression=compression)
elif feat == "index":
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"index": np.array(indices)+1}, #ShapeOut likes to start with index=1
mode="append",
compression=compression)
elif feat in ["mask", "image"]:
# store image stacks (reduced file size and save time)
m = 64
if feat=='mask':
im0 = rtdc_ds["events"][feat][0]
if feat=="image":
im0 = rtdc_ds["events"][feat][0]
imstack = np.zeros((m, im0.shape[0], im0.shape[1]),
dtype=im0.dtype)
jj = 0
if feat=='mask':
image_list = [rtdc_ds["events"][feat][ii] for ii in indices]
elif feat=='image':
image_list = [rtdc_ds["events"][feat][ii] for ii in indices]
for ii in range(len(image_list)):
dat = image_list[ii]
#dat = rtdc_ds[feat][ii]
imstack[jj] = dat
if (jj + 1) % m == 0:
jj = 0
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: imstack},
mode="append",
compression=compression)
else:
jj += 1
# write rest
if jj:
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: imstack[:jj, :, :]},
mode="append",
compression=compression)
elif feat == "trace":
for tr in rtdc_ds["events"]["trace"].keys():
tr0 = rtdc_ds["events"]["trace"][tr][0]
trdat = np.zeros((nev, tr0.size), dtype=tr0.dtype)
jj = 0
trace_list = [rtdc_ds["events"]["trace"][tr][ii] for ii in indices]
for ii in range(len(trace_list)):
trdat[jj] = trace_list[ii]
jj += 1
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"trace": {tr: trdat}},
mode="append",
compression=compression)
else:
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: rtdc_ds["events"][feat][indices]},mode="append")
h5obj.close()
def partialtrainability_activated(self,on_or_off):
if on_or_off==False:#0 means switched OFF
self.lineEdit_partialTrainability.setText("")
#self.lineEdit_partialTrainability.setEnabled(False)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(False)
#Also, remove the model from self!
self.model_keras = None
self.radioButton_NewModel.setChecked(False)
self.radioButton_LoadRestartModel.setChecked(False)
self.radioButton_LoadContinueModel.setChecked(False)
self.lineEdit_LoadModelPath.setText("")#put the filename in the lineedit
#this happens when the user activated the expert option "partial trainability"
elif on_or_off==True:#2 means switched ON
#Has the user already chosen a model?
if self.model_keras == None: #if there is no model yet chosen
self.action_initialize_model(duties="initialize")
#If there is still no model...
if self.model_keras == None:# or self.model_keras_path==None: #if there is no model yet chosen
#Tell the user to initiate a model first!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>To use this option please first select and load a model. To do that choose/load a model in 'Define Model'-Tab and hit the button 'Initialize/Fit Model'. Choose to only initialize the model.</p></body></html>")
msg.setWindowTitle("Please load a model first")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#Switch off
self.lineEdit_partialTrainability.setText("")
self.radioButton_NewModel.setChecked(False)
self.radioButton_LoadRestartModel.setChecked(False)
self.radioButton_LoadContinueModel.setChecked(False)
self.lineEdit_LoadModelPath.setText("")
#self.lineEdit_partialTrainability.setEnabled(False)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(False)
self.checkBox_partialTrainability.setChecked(False)
return
#Otherwise, there is a model on self and we can continue :)
#Collections are not supported
if type(self.model_keras)==tuple:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>Partial trainability is not available for collections of models. Please specify a single model.</p></body></html>")
msg.setWindowTitle("Collections of models not supported for collections of models")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Switch on lineedit and the button
#self.lineEdit_partialTrainability.setEnabled(True)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(True)#enables the lineEdit which shows the trainability status of each layer.
#Load trainability states of the model
Layer_types = [self.model_keras.layers[i].__class__.__name__ for i in range(len(self.model_keras.layers))]
#Count Dense and Conv layers
is_dense_or_conv = [layer_type in ["Dense","Conv2D"] for layer_type in Layer_types]
index = np.where(np.array(is_dense_or_conv)==True)[0]
Layer_train_status = [self.model_keras.layers[layerindex].trainable for layerindex in index]
self.lineEdit_partialTrainability.setText(str(Layer_train_status))#enables the lineEdit which shows the trainability status of each layer.
def partialTrainability(self):
self.popup_trainability = MyPopup()
self.popup_trainability_ui = aid_frontend.popup_trainability()
self.popup_trainability_ui.setupUi(self.popup_trainability) #open a popup to show the layers in a table
#One can only activate this function when there was a model loaded already!
#self.model_keras has to exist!!!
if self.model_keras == None: #if there is no model yet chosen
self.action_initialize_model(duties="initialize")
if self.model_keras == None: #if there is still no model...
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>To use this option please first select and load a model. To do that choose/load a model in 'Define Model'-Tab and hit the button 'Initialize/Fit Model'. Choose to only initialize the model.</p></body></html>")
msg.setWindowTitle("Please load a model first")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#Switch this On in the final version
self.lineEdit_partialTrainability.setText("")
self.lineEdit_partialTrainability.setEnabled(False)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(False)
return
#Fill information about the model
if self.radioButton_NewModel.isChecked():#a new model is loaded
self.popup_trainability_ui.lineEdit_pop_pTr_modelPath.setText("New model")
elif self.radioButton_LoadRestartModel.isChecked():#a new model is loaded
load_model_path = str(self.lineEdit_LoadModelPath.text())
self.popup_trainability_ui.lineEdit_pop_pTr_modelPath.setText("Restart model: "+load_model_path)
elif self.radioButton_LoadContinueModel.isChecked():#a new model is loaded
load_model_path = str(self.lineEdit_LoadModelPath.text())
self.popup_trainability_ui.lineEdit_pop_pTr_modelPath.setText("Continue model: "+load_model_path)
in_dim = self.model_keras.input_shape
#Retrieve the color_mode from the model (nr. of channels in last in_dim)
channels = in_dim[-1] #TensorFlow: channels in last dimension
out_dim = self.model_keras.output_shape[-1]
self.popup_trainability_ui.spinBox_pop_pTr_inpSize.setValue(int(in_dim[1]))
self.popup_trainability_ui.spinBox_pop_pTr_outpSize.setValue(int(out_dim))
if channels==1:
self.popup_trainability_ui.comboBox_pop_pTr_colorMode.addItem("Grayscale")
elif channels==3:
self.popup_trainability_ui.comboBox_pop_pTr_colorMode.addItem("RGB")
#Model summary to textBrowser_pop_pTr_modelSummary
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
self.popup_trainability_ui.textBrowser_pop_pTr_modelSummary.setText(summary)
#Work on the tableWidget_pop_pTr_layersTable
Layer_types = [self.model_keras.layers[i].__class__.__name__ for i in range(len(self.model_keras.layers))]
#Count Dense and Conv layers
is_dense_or_conv = [layer_type in ["Dense","Conv2D"] for layer_type in Layer_types]
index = np.where(np.array(is_dense_or_conv)==True)[0]
nr_layers = len(index) #total nr. of dense and conv layers with parameters
for rowNumber in range(nr_layers):
layerindex = index[rowNumber]
columnPosition = 0
layer = self.model_keras.layers[layerindex]
rowPosition = self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.rowCount()
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.insertRow(rowPosition)
Name = layer.name
item = QtWidgets.QTableWidgetItem(Name)
item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
item.setTextAlignment(QtCore.Qt.AlignCenter) # change the alignment
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition , columnPosition, item ) #
columnPosition = 1
layer_type = layer.__class__.__name__
item = QtWidgets.QTableWidgetItem(layer_type)
item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
item.setTextAlignment(QtCore.Qt.AlignCenter) # change the alignment
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition , columnPosition, item ) #
columnPosition = 2
Params = layer.count_params()
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, Params)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition, columnPosition, item)
columnPosition = 3
if layer_type == "Dense":
split_property = "units" #'units' are the number of nodes in dense layers
elif layer_type == "Conv2D":
split_property = "filters"
else:
print("other splitprop!")
return
layer_config = layer.get_config()
nr_units = layer_config[split_property] #units are either nodes or filters for dense and convolutional layer, respectively
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, int(nr_units))
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition, columnPosition, item)
columnPosition = 4
#for each item create a spinbopx (trainability)
spinb = QtWidgets.QDoubleSpinBox(self.popup_trainability_ui.tableWidget_pop_pTr_layersTable)
spinb.setMinimum(0)
spinb.setMaximum(1)
spinb.setSingleStep(0.1)
trainability = int(layer.trainable) #.trainable actually returns True or False. Make it integer
spinb.setValue(trainability) #this should be always 1
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setCellWidget(rowPosition, columnPosition, spinb)
self.popup_trainability.show()
#self.popup_trainability_ui.pushButton_pop_pTr_reset.clicked.connect(self.pop_pTr_reset)
self.popup_trainability_ui.pushButton_pop_pTr_update.clicked.connect(self.pop_pTr_update_2)
self.popup_trainability_ui.pushButton_pop_pTr_ok.clicked.connect(self.pop_pTr_ok)
###############Functions for the partial trainability popup################
def pop_pTr_reset(self):
#Reset the model to initial state, with partial trainability
print("Not implemented yet")
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>Not implemented yet.</p></body></html>")
msg.setWindowTitle("Not implemented")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def pop_pTr_update_1(self):#main worker function
#Apply the requested changes and display updated model in table
pTr_table = self.popup_trainability_ui.tableWidget_pop_pTr_layersTable
#Read the table:
Layer_names,Layer_trainabilities = [],[]
rowCount = pTr_table.rowCount()
for row in range(rowCount):
#Layer_indices.append(str(pTr_table.item(row, 0).text()))
Layer_names.append(str(pTr_table.item(row, 0).text()))
Layer_trainabilities.append(float(pTr_table.cellWidget(row, 4).value()))
Layer_trainabilities = np.array(Layer_trainabilities)
#What are the current trainability statuses of the model
Layer_trainabilities_orig = np.array([self.model_keras.get_layer(l_name).trainable for l_name in Layer_names])
diff = abs( Layer_trainabilities - Layer_trainabilities_orig )
ind = np.where( diff>0 )[0]
#Where do we have a trainability between 0 and 1
#ind = np.where( (Layer_trainabilities>0) & (Layer_trainabilities<1) )[0]
if len(ind)>0:
Layer_trainabilities = list(Layer_trainabilities[ind])
Layer_names = list(np.array(Layer_names)[ind])
#Update the model using user-specified trainabilities
self.model_keras = partial_trainability(self.model_keras,Layer_names,Layer_trainabilities)
#Update lineEdit_partialTrainability
Layer_types = [self.model_keras.layers[i].__class__.__name__ for i in range(len(self.model_keras.layers))]
#Count Dense and Conv layers
is_dense_or_conv = [layer_type in ["Dense","Conv2D"] for layer_type in Layer_types]
index = np.where(np.array(is_dense_or_conv)==True)[0]
Layer_train_status = [self.model_keras.layers[layerindex].trainable for layerindex in index]
self.lineEdit_partialTrainability.setText(str(Layer_train_status))#enables the lineEdit which shows the trainability status of each layer.
else:
print("Nothing to do. All trainabilities are either 0 or 1")
def pop_pTr_update_2(self):#call pop_pTr_update_1 to do the work and then update the window
try:
self.pop_pTr_update_1()#Change the model on self.model_keras according to the table
self.partialTrainability()#Update the popup window by calling the partialTrainability function
except Exception as e:
#There is an issue building the model!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error occured when building model:")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
def pop_pTr_ok(self):
self.pop_pTr_update_1()#Change the model on self.model_keras according to the table; If 'Update' was used before, there will not be done work again, but the model is used as it is
#To make the model accessible, it has to be saved to a new .model file
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save model', Default_dict["Path of last model"],"AIDeveloper model file (*.model)")
filename = filename[0]
path, fname = os.path.split(filename)
if len(fname)==0:
return
#add the suffix _session.xlsx
if not fname.endswith(".model"):
fname = fname +".model"
filename = os.path.join(path,fname)
self.model_keras.save(filename)
#Activate 'load and restart' and put this file
#Avoid the automatic popup
self.radioButton_NewModel.setChecked(False)
self.radioButton_LoadRestartModel.setChecked(False)
self.radioButton_LoadContinueModel.setChecked(True)
self.lineEdit_LoadModelPath.setText(filename)#put the filename in the lineedit
#Destroy the window
self.popup_trainability = None
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(tooltips["modelsaved_success"])
msg.setWindowTitle("Sucessfully created and selected model")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def lossW_comboB(self,state_nr,listindex):
if listindex==-1:
ui_item = self.popup_lossW_ui
else:
ui_item = self.fittingpopups_ui[listindex].popup_lossW_ui
state_str = ui_item.comboBox_lossW.itemText(int(state_nr))
rows_nr = int(ui_item.tableWidget_lossW.rowCount())
if rows_nr==0:
state_str = "None"
if state_str=="None":
for rowPos in range(rows_nr):
colPos = 4 #"Loss weights"
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setEnabled(False)
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setValue(1.0)
elif state_str=="Custom":
for rowPos in range(rows_nr):
colPos = 4 #"Loss weights"
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setEnabled(True)
elif state_str=="Balanced":
#How many cells in total per epoch
events_epoch = [int(ui_item.tableWidget_lossW.item(rowPos,2).text()) for rowPos in range(rows_nr)]
classes = [int(ui_item.tableWidget_lossW.item(rowPos,0).text()) for rowPos in range(rows_nr)]
counter = {}
for i in range(len(classes)):
counter[classes[i]]=events_epoch[i]
max_val = float(max(counter.values()))
class_weights = {class_id : max_val/num_images for class_id, num_images in counter.items()}
class_weights = list(class_weights.values())
for rowPos in range(rows_nr):
colPos = 4 #"Loss weights"
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setEnabled(False)
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setValue(class_weights[rowPos])
def lossW_ok(self,listindex):
#This happens when the user presses the OK button on the popup for
#custom loss weights
if listindex==-1:
ui_item = self
else:
ui_item = self.fittingpopups_ui[listindex]
#Which option was used on comboBox_lossW?
state_str = ui_item.popup_lossW_ui.comboBox_lossW.currentText()
if state_str=="None":#User left None. This actually means its off
ui_item.lineEdit_lossW.setText("")
ui_item.pushButton_lossW.setEnabled(False)
ui_item.checkBox_lossW.setChecked(False)
elif state_str=="Custom":#User left None. This actually means its off
#There are custom values
#Read the loss values on the table
rows_nr = int(ui_item.popup_lossW_ui.tableWidget_lossW.rowCount())
classes = [int(ui_item.popup_lossW_ui.tableWidget_lossW.item(rowPos,0).text()) for rowPos in range(rows_nr)]
loss_weights = [float(ui_item.popup_lossW_ui.tableWidget_lossW.cellWidget(rowPos,4).value()) for rowPos in range(rows_nr)]
counter = {}
for i in range(len(classes)):
counter[classes[i]]=loss_weights[i]
#Put counter (its a dictionary) to lineedit
ui_item.lineEdit_lossW.setText(str(counter))
elif state_str=="Balanced":#Balanced, the values are computed later fresh, even when user changes the cell-numbers again
ui_item.lineEdit_lossW.setText("Balanced")
#Destroy the window
ui_item.popup_lossW = None
def lossW_cancel(self,listindex):
#This happens when the user presses the Cancel button on the popup for
#custom loss weights
if listindex==-1:
ui_item = self
else:
ui_item = self.fittingpopups_ui[listindex]
if ui_item.lineEdit_lossW.text()=="":
#if state_str=="None":#User left None. This actually means its off
ui_item.lineEdit_lossW.setText("")
ui_item.pushButton_lossW.setEnabled(False)
ui_item.checkBox_lossW.setChecked(False)
ui_item.popup_lossW = None
return
#Destroy the window
ui_item.popup_lossW = None
def get_norm_from_manualselection(self):
norm = self.comboBox_w.currentText()
index = self.comboBox_Normalization.findText(norm, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_Normalization.setCurrentIndex(index)
self.w.close()
def popup_normalization(self):
self.w = MyPopup()
self.gridLayout_w = QtWidgets.QGridLayout(self.w)
self.gridLayout_w.setObjectName(_fromUtf8("gridLayout"))
self.verticalLayout_w = QtWidgets.QVBoxLayout()
self.verticalLayout_w.setObjectName(_fromUtf8("verticalLayout"))
self.label_w = QtWidgets.QLabel(self.w)
self.label_w.setAlignment(QtCore.Qt.AlignCenter)
self.label_w.setObjectName(_fromUtf8("label_w"))
self.verticalLayout_w.addWidget(self.label_w)
self.horizontalLayout_2_w = QtWidgets.QHBoxLayout()
self.horizontalLayout_2_w.setObjectName(_fromUtf8("horizontalLayout_2"))
self.pushButton_w = QtWidgets.QPushButton(self.w)
self.pushButton_w.setObjectName(_fromUtf8("pushButton"))
self.horizontalLayout_2_w.addWidget(self.pushButton_w)
self.horizontalLayout_w = QtWidgets.QHBoxLayout()
self.horizontalLayout_w.setObjectName(_fromUtf8("horizontalLayout"))
self.label_2_w = QtWidgets.QLabel(self.w)
self.label_2_w.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2_w.setObjectName(_fromUtf8("label_2_w"))
self.horizontalLayout_w.addWidget(self.label_2_w)
self.comboBox_w = QtWidgets.QComboBox(self.w)
self.comboBox_w.setObjectName(_fromUtf8("comboBox"))
self.comboBox_w.addItems(["Select"]+self.norm_methods)
self.comboBox_w.setMinimumSize(QtCore.QSize(200,22))
self.comboBox_w.setMaximumSize(QtCore.QSize(200, 22))
width=self.comboBox_w.fontMetrics().boundingRect(max(self.norm_methods, key=len)).width()
self.comboBox_w.view().setFixedWidth(width+10)
self.comboBox_w.currentIndexChanged.connect(self.get_norm_from_manualselection)
self.horizontalLayout_w.addWidget(self.comboBox_w)
self.horizontalLayout_2_w.addLayout(self.horizontalLayout_w)
self.verticalLayout_w.addLayout(self.horizontalLayout_2_w)
self.gridLayout_w.addLayout(self.verticalLayout_w, 0, 0, 1, 1)
self.w.setWindowTitle("Select normalization method")
self.label_w.setText("You are about to continue training a pretrained model\n"
"Please select the meta file of that model to load the normalization method\n"
"or choose the normalization method manually")
self.pushButton_w.setText("Load meta file")
self.label_2_w.setText("Manual \n"
"selection")
#one button that allows to load a meta file containing the norm-method
self.pushButton_w.clicked.connect(self.get_norm_from_modelparafile)
self.w.show()
def action_preview_model(self,enabled):#function runs when radioButton_LoadRestartModel or radioButton_LoadContinueModel was clicked
if enabled:
#if the "Load and restart" radiobutton was clicked:
if self.radioButton_LoadRestartModel.isChecked():
modelname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open model architecture', Default_dict["Path of last model"],"Architecture or model (*.arch *.model)")
modelname = modelname[0]
#modelname_for_dict = modelname
#if the "Load and continue" radiobutton was clicked:
elif self.radioButton_LoadContinueModel.isChecked():
modelname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open model with all parameters', Default_dict["Path of last model"],"Keras model (*.model)")
modelname = modelname[0]
#modelname_for_dict = modelname
self.lineEdit_LoadModelPath.setText(modelname) #Put the filename to the line edit
#Remember the location for next time
if len(str(modelname))>0:
Default_dict["Path of last model"] = os.path.split(modelname)[0]
aid_bin.save_aid_settings(Default_dict)
#If user wants to load and restart a model
if self.radioButton_LoadRestartModel.isChecked():
#load the model and print summary
if modelname.endswith(".arch"):
json_file = open(modelname, 'r')
model_config = json_file.read()
json_file.close()
model_config = json.loads(model_config)
#cut the .json off
modelname = modelname.split(".arch")[0]
#Or a .model (FULL model with trained weights) , but for display only load the architecture
elif modelname.endswith(".model"):
#Load the model config (this is the architecture)
model_full_h5 = h5py.File(modelname, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
#model = model_from_config(model_config)
modelname = modelname.split(".model")[0]
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("No valid file was chosen")
text1 = "Architecture: loaded from .arch\nWeights: will be randomly initialized'\n"
#Try to find the corresponding .meta
#All models have a number:
metaname = modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
if chosen_model is not None:
#chosen_model is a string that should be contained in comboBox_ModelSelection
index = self.comboBox_ModelSelection.findText(chosen_model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
else:
index = self.comboBox_ModelSelection.findText('None', QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
#Otherwise, user wants to load and continue training a model
elif self.radioButton_LoadContinueModel.isChecked():
#User can only choose a .model (FULL model with trained weights) , but for display only load the architecture
if modelname.endswith(".model"):
#Load the model config (this is the architecture)
model_full_h5 = h5py.File(modelname, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
#model = model_from_config(model_config)
modelname = modelname.split(".model")[0]
#Try to find the corresponding .meta
#All models have a number:
metaname = modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
if chosen_model is not None:
#chosen_model is a string that should be contained in comboBox_ModelSelection
index = self.comboBox_ModelSelection.findText(chosen_model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
else:
index = self.comboBox_ModelSelection.findText('None', QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
text1 = "Architecture: loaded from .model\nWeights: pretrained weights will be loaded and used when hitting button 'Initialize model!'\n"
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("No valid file was chosen")
#In both cases (restart or continue) the input dimensions have to fit
#The number of output classes should also fit but this is not essential
#but most users certainly want the same number of classes (output)->Give Info
try: #Sequential Model
in_dim = model_config['config'][0]['config']['batch_input_shape']
except: #Functional Api
in_dim = model_config['config']["layers"][0]["config"]["batch_input_shape"]
try: #Sequential Model
out_dim = model_config['config'][-2]['config']['units']
except: #Functional Api
out_dim = model_config['config']["layers"][-2]["config"]["units"]
#
# in_dim = model_config['config'][0]['config']['batch_input_shape']
# out_dim = model_config['config'][-2]['config']['units']
#Retrieve the color_mode from the model (nr. of channels in last in_dim)
channels = in_dim[-1] #TensorFlow: channels in last dimension
if channels==1:
channel_text = "1 channel (Grayscale)"
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
channel_text = "3 channels (RGB)"
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in the ui to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
text2 = "Model Input: loaded Model takes: "+str(in_dim[-3])+" x "+str(in_dim[-2]) + " pixel images and "+channel_text+"\n"
if int(self.spinBox_imagecrop.value())!=int(in_dim[-2]):
self.spinBox_imagecrop.setValue(in_dim[-2])
text2 = text2+ "'Input image size' in GUI was changed accordingly\n"
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked_no_rtdc_ds()
indices = [s["class"] for s in SelectedFiles]
nr_classes = np.max(indices)+1
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
#aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
text = text1+text2+text3
self.textBrowser_Info.setText(text)
if self.radioButton_LoadContinueModel.isChecked():
#"Load the parameter file of the model that should be continued and apply the same normalization"
#Make a popup: You are about to continue to train a pretrained model
#Please select the parameter file of that model to load the normalization method
#or choose the normalization method manually:
#this is important
self.popup_normalization()
def get_metrics(self,nr_classes):
Metrics = []
f1 = bool(self.checkBox_expertF1.isChecked())
if f1==True:
Metrics.append("f1_score")
precision = bool(self.checkBox_expertPrecision.isChecked())
if precision==True:
Metrics.append("precision")
recall = bool(self.checkBox_expertRecall.isChecked())
if recall==True:
Metrics.append("recall")
metrics = ['accuracy'] + Metrics
metrics = aid_dl.get_metrics_tensors(metrics,nr_classes)
return metrics
def action_set_modelpath_and_name(self):
#Get the path and filename for the new model
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save model', Default_dict["Path of last model"],"Keras Model file (*.model)")
filename = filename[0]
if len(filename)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid filename was chosen.")
msg.setWindowTitle("No valid filename was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if filename.endswith(".arch"):
filename = filename.split(".arch")[0]
#add the suffix .model
if not filename.endswith(".model"):
filename = filename +".model"
self.lineEdit_modelname.setText(filename)
#Write to Default_dict
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
def get_dataOverview(self):
table = self.tableWidget_Info
cols = table.columnCount()
header = [table.horizontalHeaderItem(col).text() for col in range(cols)]
rows = table.rowCount()
tmp_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
tmp_df.iloc[i, j] = table.item(i, j).text()
except:
tmp_df.iloc[i, j] = np.nan
return tmp_df
def action_initialize_model(self,duties="initialize_train"):
"""
duties: which tasks should be performed: "initialize", "initialize_train", "initialize_lrfind"
"""
#print("duties: "+str(duties))
#Create config (define which device to use)
if self.radioButton_cpu.isChecked():
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
# try:
# K.clear_session()
# except:
# print("Could not clear_session (7)")
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
#Initialize the model
#######################Load and restart model##########################
if self.radioButton_LoadRestartModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
text0 = "Loaded model: "+load_modelname
#load the model and print summary
if load_modelname.endswith(".arch"):
json_file = open(load_modelname, 'r')
model_config = json_file.read()
json_file.close()
model_keras = model_from_json(model_config)
model_config = json.loads(model_config)
text1 = "\nArchitecture: loaded from .arch\nWeights: randomly initialized\n"
#Or a .model (FULL model with trained weights) , but for display only load the architecture
elif load_modelname.endswith(".model"):
#Load the model config (this is the architecture)
model_full_h5 = h5py.File(load_modelname, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
model_keras = model_from_config(model_config)
text1 = "\nArchitecture: loaded from .model\nWeights: randomly initialized\n"
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
try:
metaname = load_modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
except:
chosen_model = str(self.comboBox_ModelSelection.currentText())
#In both cases (restart or continue) the input dimensions have to fit
#The number of output classes should also fit but this is not essential
#but most users certainly want the same number of classes (output)->Give Info
try: #Sequential Model
in_dim = model_config['config'][0]['config']['batch_input_shape']
except: #Functional Api
in_dim = model_config['config']["layers"][0]["config"]["batch_input_shape"]
try: #Sequential Model
out_dim = model_config['config'][-2]['config']['units']
except: #Functional Api
out_dim = model_config['config']["layers"][-2]["config"]["units"]
# in_dim = model_config['config'][0]['config']['batch_input_shape']
# out_dim = model_config['config'][-2]['config']['units']
channels = in_dim[-1] #TensorFlow: channels in last dimension
#Compile model (consider user-specific metrics)
model_metrics = self.get_metrics(out_dim)
model_keras.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
if channels==1:
channel_text = "1 channel (Grayscale)"
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
channel_text = "3 channels (RGB)"
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in the ui to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
text2 = "Model Input: "+str(in_dim[-3])+" x "+str(in_dim[-2]) + " pixel images and "+channel_text+"\n"
if int(self.spinBox_imagecrop.value())!=int(in_dim[-2]):
self.spinBox_imagecrop.setValue(in_dim[-2])
text2 = text2+ "'Input image size' in GUI was changed accordingly\n"
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked()
indices = [s["class"] for s in SelectedFiles]
nr_classes = np.max(indices)+1
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
###############Load and continue training the model####################
elif self.radioButton_LoadContinueModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
text0 = "Loaded model: "+load_modelname+"\n"
#User can only choose a .model (FULL model with trained weights) , but for display only load the architecture
if load_modelname.endswith(".model"):
#Load the full model
try:
model_keras = load_model(load_modelname,custom_objects=aid_dl.get_custom_metrics())
except:
K.clear_session() #On linux It happened that there was an error, if another fitting run before
model_keras = load_model(load_modelname,custom_objects=aid_dl.get_custom_metrics())
#model_config = model_keras.config() #Load the model config (this is the architecture)
#load_modelname = load_modelname.split(".model")[0]
text1 = "Architecture: loaded from .model\nWeights: pretrained weights were loaded\n"
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("No valid file was chosen")
try:
metaname = load_modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
else:
chosen_model = str(self.comboBox_ModelSelection.currentText())
except:
chosen_model = str(self.comboBox_ModelSelection.currentText())
#Check input dimensions
#The number of output classes should also fit but this is not essential
#but most users certainly want the same number of classes (output)->Give Info
# in_dim = model_config['config'][0]['config']['batch_input_shape']
# out_dim = model_config['config'][-2]['config']['units']
in_dim = model_keras.get_input_shape_at(0)
out_dim = model_keras.get_output_shape_at(0)[1]
channels = in_dim[-1] #TensorFlow: channels in last dimension
if channels==1:
channel_text = "1 channel (Grayscale)"
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
channel_text = "3 channels (RGB)"
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in the ui to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
text2 = "Model Input: "+str(in_dim[-3])+" x "+str(in_dim[-2]) + " pixel images and "+channel_text+"\n"
if int(self.spinBox_imagecrop.value())!=int(in_dim[-2]):
self.spinBox_imagecrop.setValue(in_dim[-2])
text2 = text2+ "'Input image size' in GUI was changed accordingly\n"
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked()
indices = [s["class"] for s in SelectedFiles]
nr_classes = np.max(indices)+1
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
###########################New model###################################
elif self.radioButton_NewModel.isChecked():
load_modelname = "" #No model is loaded
text0 = load_modelname
#Create a new model!
#Get what the user wants from the dropdown menu!
chosen_model = str(self.comboBox_ModelSelection.currentText())
if chosen_model==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No model specified!")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
in_dim = int(self.spinBox_imagecrop.value())
SelectedFiles = self.items_clicked()
#rtdc_ds = SelectedFiles[0]["rtdc_ds"]
if str(self.comboBox_GrayOrRGB.currentText())=="Grayscale":
channels=1
elif str(self.comboBox_GrayOrRGB.currentText())=="RGB":
channels=3
indices = [s["class"] for s in SelectedFiles]
indices_unique = np.unique(np.array(indices))
if len(indices_unique)<2:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Need at least two classes to fit. Please specify .rtdc files and corresponding indeces")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
out_dim = np.max(indices)+1
nr_classes = out_dim
if chosen_model=="None":
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No model specified!")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
try:
model_keras = model_zoo.get_model(chosen_model,in_dim,channels,out_dim)
except Exception as e:
#There is an issue building the model!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error occured when building model:")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
text1 = "Architecture: created "+chosen_model+" design\nWeights: Initialized random weights\n"
if self.get_color_mode()=="Grayscale":
channels = 1
channel_text = "1 channel (Grayscale)"
elif self.get_color_mode()=="RGB":
channels = 3
channel_text = "3 channels (RGB)"
text2 = "Model Input: "+str(in_dim)+" x "+str(in_dim) + " pixel images and "+channel_text+"\n"
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
else:
#No radio-button was chosen
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please use the radiobuttons to define the model")
msg.setWindowTitle("No model defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#If expert mode is on, apply the requested options
#This affects learning rate, trainability of layers and dropout rate
expert_mode = bool(self.groupBox_expertMode.isChecked())
learning_rate_const = float(self.doubleSpinBox_learningRate.value())
learning_rate_expert_on = bool(self.groupBox_learningRate.isChecked())
train_last_layers = bool(self.checkBox_trainLastNOnly.isChecked())
train_last_layers_n = int(self.spinBox_trainLastNOnly.value())
train_dense_layers = bool(self.checkBox_trainDenseOnly.isChecked())
dropout_expert_on = bool(self.checkBox_dropout.isChecked())
loss_expert_on = bool(self.checkBox_expt_loss.isChecked())
loss_expert = str(self.comboBox_expt_loss.currentText()).lower()
optimizer_expert_on = bool(self.checkBox_optimizer.isChecked())
optimizer_expert = str(self.comboBox_optimizer.currentText()).lower()
optimizer_settings = self.optimizer_settings.copy() #get the current optimizer settings
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
model_metrics = self.get_metrics(nr_classes)
if "collection" in chosen_model.lower():
for m in model_keras[1]: #in a collection, model_keras[0] are the names of the models and model_keras[1] is a list of all models
aid_dl.model_compile(m,loss_expert,optimizer_settings,learning_rate_const,self.get_metrics(nr_classes),nr_classes)
if not "collection" in chosen_model.lower():
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
try:
dropout_expert = str(self.lineEdit_dropout.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
if type(model_keras)==tuple:#when user chose a Collection of models, a tuple is returned by get_model
collection = True
else:
collection = False
if collection==False: #if there is a single model:
#Original learning rate (before expert mode is switched on!)
try:
self.learning_rate_original = K.eval(model_keras.optimizer.lr)
except:
print("Session busy. Try again in fresh session...")
#tf.reset_default_graph() #Make sure to start with a fresh session
K.clear_session()
sess = tf.Session(graph = tf.Graph(), config=config_gpu)
#K.set_session(sess)
self.learning_rate_original = K.eval(model_keras.optimizer.lr)
#Get initial trainability states of model
self.trainable_original, self.layer_names = aid_dl.model_get_trainable_list(model_keras)
trainable_original, layer_names = self.trainable_original, self.layer_names
self.do_list_original = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
do_list_original = self.do_list_original
if collection==True: #if there is a collection of models:
#Original learning rate (before expert mode is switched on!)
self.learning_rate_original = [K.eval(model_keras[1][i].optimizer.lr) for i in range(len(model_keras[1]))]
#Get initial trainability states of model
trainable_layerName = [aid_dl.model_get_trainable_list(model_keras[1][i]) for i in range(len(model_keras[1]))]
self.trainable_original = [trainable_layerName[i][0] for i in range(len(trainable_layerName))]
self.layer_names = [trainable_layerName[i][1] for i in range(len(trainable_layerName))]
trainable_original, layer_names = self.trainable_original, self.layer_names
self.do_list_original = [aid_dl.get_dropout(model_keras[1][i]) for i in range(len(model_keras[1]))]#Get a list of dropout values of the current model
do_list_original = self.do_list_original
#TODO add expert mode ability for collection of models. Maybe define self.model_keras as a list in general. So, fitting a single model is just a special case
if expert_mode==True:
#Apply the changes to trainable states:
if train_last_layers==True:#Train only the last n layers
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,out_dim,loss_expert,optimizer_settings,learning_rate_const)
if train_dense_layers==True:#Train only dense layers
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,out_dim,loss_expert,optimizer_settings,learning_rate_const)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a float
dropout_expert_list=len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers")
msg.setWindowTitle("Issue with Expert->Dropout")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
dropout_expert_list = []
return
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Could not understand user input at Expert->Dropout")
msg.setWindowTitle("Issue with Expert->Dropout")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
print(text_do)
text_updates = ""
#Learning Rate: Compare current lr and the lr on expert tab:
if collection == False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[1][0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6: #If there is a difference, change lr accordingly
K.set_value(model_keras.optimizer.lr, learning_rate_const)
text_updates += "Learning rate: "+str(lr_current)+"\n"
recompile = False
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
else:
optimizer_current = aid_dl.get_optimizer_name(model_keras[1][0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Optimizer: "+optimizer_expert+"\n"
#Loss function: Compare current loss function and the loss-function on expert tab:
if collection==False:
if model_keras.loss!=loss_expert:
recompile = True
if collection==True:
if model_keras[1][0].loss!=loss_expert:
recompile = True
text_updates += "Loss function: "+loss_expert+"\n"
if recompile==True:
if collection==False:
print("Recompiling...")
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if collection==True:
for m in model_keras[1]:
print("Recompiling...")
aid_dl.model_compile(m,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
self.model_keras = model_keras #overwrite the model in self
if collection == False:
#Get user-specified filename for the new model
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)>0:
text_new_modelname = "Model will be saved as: "+new_modelname+"\n"
else:
text_new_modelname = "Please specify a model path (name for the model to be fitted)\n"
if collection == True:
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)>0:
new_modelname = os.path.split(new_modelname)
text_new_modelname = "Collection of Models will be saved into: "+new_modelname[0]+"\n"
else:
text_new_modelname = "Please specify a model path (name for the model to be fitted)\n"
#Info about normalization method
norm = str(self.comboBox_Normalization.currentText())
text4 = "Input image normalization method: "+norm+"\n"
#Check if there are dropout layers:
#do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
if len(do_list_original)>0:
text4 = text4+"Found "+str(len(do_list_original)) +" dropout layers with rates: "+str(do_list_original)+"\n"
else:
text4 = text4+"Found no dropout layers\n"
if expert_mode==True:
if dropout_expert_on:
text4 = text4+text_do+"\n"
# if learning_rate_expert_on==True:
# if K.eval(model_keras.optimizer.lr) != learning_rate_const: #if the learning rate in UI is NOT equal to the lr of the model...
# text_lr = "Changed the learning rate to: "+ str(learning_rate_const)+"\n"
# text4 = text4+text_lr
text5 = "Model summary:\n"
summary = []
if collection==False:
model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text_new_modelname+text0+text1+text2+text3+text4+text_updates+text5+summary
self.textBrowser_Info.setText(text)
#Save the model architecture: serialize to JSON
model_json = model_keras.to_json()
with open(new_modelname.split(".model")[0]+".arch", "w") as json_file:
json_file.write(model_json)
elif collection==True:
if self.groupBox_expertMode.isChecked()==True:
self.groupBox_expertMode.setChecked(False)
print("Turned off expert mode. Not implemented yet for collections of models. This does not affect user-specified metrics (precision/recall/f1)")
self.model_keras_arch_path = [new_modelname[0]+os.sep+new_modelname[1].split(".model")[0]+"_"+model_keras[0][i]+".arch" for i in range(len(model_keras[0]))]
for i in range(len(model_keras[1])):
model_keras[1][i].summary(print_fn=summary.append)
#Save the model architecture: serialize to JSON
model_json = model_keras[1][i].to_json()
with open(self.model_keras_arch_path[i], "w") as json_file:
json_file.write(model_json)
summary = "\n".join(summary)
text = text_new_modelname+text0+text1+text2+text3+text4+text_updates+text5+summary
self.textBrowser_Info.setText(text)
#Save the model to a variable on self
self.model_keras = model_keras
#Get the user-defined cropping size
crop = int(self.spinBox_imagecrop.value())
#Make the cropsize a bit larger since the images will later be rotated
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
#Estimate RAM needed
nr_imgs = np.sum([np.array(list(SelectedFiles)[i]["nr_images"]) for i in range(len(list(SelectedFiles)))])
ram_needed = np.round(nr_imgs * aid_bin.calc_ram_need(cropsize2),2)
if duties=="initialize":#Stop here if the model just needs to be intialized (for expert mode->partial trainability)
return
elif duties=="initialize_train":
#Tell the user if the data is stored and read from ram or not
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>Should the model only be initialized,\
or do you want to start fitting right after? For fitting, data will\
be loaded to RAM (since Edit->Data to RAM is enabled), which will\
require "+str(ram_needed)+"MB of RAM.</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Initialize model or initialize and fit model?")
msg.addButton(QtGui.QPushButton('Stop after model initialization'), QtGui.QMessageBox.RejectRole)
msg.addButton(QtGui.QPushButton('Start fitting'), QtGui.QMessageBox.ApplyRole)
retval = msg.exec_()
elif duties=="initialize_lrfind":
retval = 1
else:
print("Invalid duties: "+duties)
return
if retval==0: #yes role: Only initialize model
print("Closing session")
del model_keras
sess.close()
return
elif retval == 1:
if self.actionDataToRam.isChecked():
color_mode = self.get_color_mode()
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
#Check if there is data already available in RAM
if len(self.ram)==0:#if there is already data stored on ram
print("No data on RAM. I have to load")
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),cropsize2,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
else:
print("There is already some data on RAM")
new_fileinfo = {"SelectedFiles":list(SelectedFiles),"cropsize2":cropsize2,"zoom_factors":zoom_factors,"zoom_order":zoom_order,"color_mode":color_mode}
identical = aid_bin.ram_compare_data(self.ram,new_fileinfo)
if not identical:
#Load the data
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),cropsize2,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
if identical:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "Data was loaded before! Should same data be reused? If not, click 'Reload data', e.g. if you altered the Data-table."
text = "<html><head/><body><p>"+text+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Found data on RAM")
msg.addButton(QtGui.QPushButton('Reuse data'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('Reload data'), QtGui.QMessageBox.NoRole)
retval = msg.exec_()
if retval==0:
print("Re-use data")
#Re-use same data
elif retval==1:
print("Re-load data")
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),cropsize2,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
#Finally, activate the 'Fit model' button again
#self.pushButton_FitModel.setEnabled(True)
if duties=="initialize_train":
self.action_fit_model()
if duties=="initialize_lrfind":
self.action_lr_finder()
del model_keras
def action_fit_model_worker(self,progress_callback,history_callback):
if self.radioButton_cpu.isChecked():
gpu_used = False
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
gpu_used = True
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
#Retrieve more Multi-GPU Options from Menubar:
cpu_merge = bool(self.actioncpu_merge.isEnabled())
cpu_relocation = bool(self.actioncpu_relocation.isEnabled())
cpu_weight_merge = bool(self.actioncpu_weightmerge.isEnabled())
#Create config (define which device to use)
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
#get an index of the fitting popup
listindex = self.popupcounter-1
#Get user-specified filename for the new model
new_modelname = str(self.lineEdit_modelname.text())
model_keras_path = self.model_keras_path
if type(model_keras_path)==list:
collection = True
#Take the initialized models
model_keras_path = self.model_keras_path
model_keras = [load_model(model_keras_path[i],custom_objects=aid_dl.get_custom_metrics()) for i in range(len(model_keras_path)) ]
model_architecture_names = self.model_keras[0]
print(model_architecture_names)
#self.model_keras = None
else:
collection = False
if deviceSelected=="Multi-GPU" and cpu_weight_merge==True:
with tf.device("/cpu:0"):
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
else:
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
#self.model_keras = None
#Initialize a variable for the parallel model
model_keras_p = None
#Multi-GPU
if deviceSelected=="Multi-GPU":
if collection==False:
print("Adjusting the model for Multi-GPU")
model_keras_p = multi_gpu_model(model_keras, gpus=gpu_nr, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation)#indicate the numbers of gpus that you have
if self.radioButton_LoadContinueModel.isChecked():#calling multi_gpu_model resets the weights. Hence, they need to be put in place again
model_keras_p.layers[-2].set_weights(model_keras.get_weights())
elif collection==True:
print("Collection & Multi-GPU is not supported yet")
return
# model_keras_p = []
# for m in model_keras_p:
# print("Adjusting the model for Multi-GPU")
# model_keras_p.append(multi_gpu_model(m, gpus=gpu_nr)) #indicate the numbers of gpus that you have
##############Main function after hitting FIT MODEL####################
if self.radioButton_LoadRestartModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_LoadContinueModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_NewModel.isChecked():
load_modelname = "" #No model is loaded
if collection==False:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras.output.shape.dims[1])
if collection==True:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras[0].output.shape.dims[1])
#Metrics to be displayed during fitting (real-time)
model_metrics = self.get_metrics(nr_classes)
#Compile model
if collection==False and deviceSelected=="Single-GPU":
model_keras.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif collection==False and deviceSelected=="Multi-GPU":
model_keras_p.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif collection==True and deviceSelected=="Single-GPU":
#Switch off the expert tab!
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setChecked(False)
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setEnabled(False)
for m in model_keras:
m.compile(loss='categorical_crossentropy',optimizer='adam',metrics=self.get_metrics(nr_classes))#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif collection==True and deviceSelected=="Multi-GPU":
print("Collection & Multi-GPU is not supported yet")
return
#Original learning rate:
#learning_rate_original = self.learning_rate_original#K.eval(model_keras.optimizer.lr)
#Original trainable states of layers with parameters
trainable_original, layer_names = self.trainable_original, self.layer_names
do_list_original = self.do_list_original
#Collect all information about the fitting routine that was user
#defined
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
new_model = self.radioButton_NewModel.isChecked()
chosen_model = str(self.comboBox_ModelSelection.currentText())
crop = int(self.spinBox_imagecrop.value())
color_mode = str(self.comboBox_GrayOrRGB.currentText())
loadrestart_model = self.radioButton_LoadRestartModel.isChecked()
loadcontinue_model = self.radioButton_LoadContinueModel.isChecked()
norm = str(self.comboBox_Normalization.currentText())
nr_epochs = int(self.spinBox_NrEpochs.value())
keras_refresh_nr_epochs = int(self.spinBox_RefreshAfterEpochs.value())
h_flip = bool(self.checkBox_HorizFlip.isChecked())
v_flip = bool(self.checkBox_VertFlip.isChecked())
rotation = float(self.lineEdit_Rotation.text())
width_shift = float(self.lineEdit_widthShift.text())
height_shift = float(self.lineEdit_heightShift.text())
zoom = float(self.lineEdit_zoomRange.text())
shear = float(self.lineEdit_shearRange.text())
brightness_refresh_nr_epochs = int(self.spinBox_RefreshAfterNrEpochs.value())
brightness_add_lower = float(self.spinBox_PlusLower.value())
brightness_add_upper = float(self.spinBox_PlusUpper.value())
brightness_mult_lower = float(self.doubleSpinBox_MultLower.value())
brightness_mult_upper = float(self.doubleSpinBox_MultUpper.value())
gaussnoise_mean = float(self.doubleSpinBox_GaussianNoiseMean.value())
gaussnoise_scale = float(self.doubleSpinBox_GaussianNoiseScale.value())
contrast_on = bool(self.checkBox_contrast.isChecked())
contrast_lower = float(self.doubleSpinBox_contrastLower.value())
contrast_higher = float(self.doubleSpinBox_contrastHigher.value())
saturation_on = bool(self.checkBox_saturation.isChecked())
saturation_lower = float(self.doubleSpinBox_saturationLower.value())
saturation_higher = float(self.doubleSpinBox_saturationHigher.value())
hue_on = bool(self.checkBox_hue.isChecked())
hue_delta = float(self.doubleSpinBox_hueDelta.value())
avgBlur_on = bool(self.checkBox_avgBlur.isChecked())
avgBlur_min = int(self.spinBox_avgBlurMin.value())
avgBlur_max = int(self.spinBox_avgBlurMax.value())
gaussBlur_on = bool(self.checkBox_gaussBlur.isChecked())
gaussBlur_min = int(self.spinBox_gaussBlurMin.value())
gaussBlur_max = int(self.spinBox_gaussBlurMax.value())
motionBlur_on = bool(self.checkBox_motionBlur.isChecked())
motionBlur_kernel = str(self.lineEdit_motionBlurKernel.text())
motionBlur_angle = str(self.lineEdit_motionBlurAngle.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
if collection==False:
expert_mode = bool(self.groupBox_expertMode.isChecked())
elif collection==True:
expert_mode = self.groupBox_expertMode.setChecked(False)
print("Expert mode was switched off. Not implemented yet for collections")
expert_mode = False
batchSize_expert = int(self.spinBox_batchSize.value())
epochs_expert = int(self.spinBox_epochs.value())
learning_rate_expert_on = bool(self.groupBox_learningRate.isChecked())
learning_rate_const_on = bool(self.radioButton_LrConst.isChecked())
learning_rate_const = float(self.doubleSpinBox_learningRate.value())
learning_rate_cycLR_on = bool(self.radioButton_LrCycl.isChecked())
try:
cycLrMin = float(self.lineEdit_cycLrMin.text())
cycLrMax = float(self.lineEdit_cycLrMax.text())
except:
cycLrMin = []
cycLrMax = []
cycLrMethod = str(self.comboBox_cycLrMethod.currentText())
#clr_settings = self.fittingpopups_ui[listindex].clr_settings.copy()
cycLrGamma = self.clr_settings["gamma"]
SelectedFiles = self.items_clicked()#to compute cycLrStepSize, the number of training images is needed
cycLrStepSize = aid_dl.get_cyclStepSize(SelectedFiles,self.clr_settings["step_size"],batchSize_expert)
#put clr_settings onto fittingpopup,
self.fittingpopups_ui[listindex].clr_settings = self.clr_settings.copy()#assign a copy. Otherwise values in both dicts are changed when manipulating one dict
#put optimizer_settings onto fittingpopup,
self.fittingpopups_ui[listindex].optimizer_settings = self.optimizer_settings.copy()#assign a copy. Otherwise values in both dicts are changed when manipulating one dict
learning_rate_expo_on = bool(self.radioButton_LrExpo.isChecked())
expDecInitLr = float(self.doubleSpinBox_expDecInitLr.value())
expDecSteps = int(self.spinBox_expDecSteps.value())
expDecRate = float(self.doubleSpinBox_expDecRate.value())
loss_expert_on = bool(self.checkBox_expt_loss.isChecked())
loss_expert = str(self.comboBox_expt_loss.currentText()).lower()
optimizer_expert_on = bool(self.checkBox_optimizer.isChecked())
optimizer_expert = str(self.comboBox_optimizer.currentText()).lower()
optimizer_settings = self.fittingpopups_ui[listindex].optimizer_settings.copy()#make a copy to make sure that changes in the UI are not immediately used
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
train_last_layers = bool(self.checkBox_trainLastNOnly.isChecked())
train_last_layers_n = int(self.spinBox_trainLastNOnly.value())
train_dense_layers = bool(self.checkBox_trainDenseOnly.isChecked())
dropout_expert_on = bool(self.checkBox_dropout.isChecked())
try:
dropout_expert = str(self.lineEdit_dropout.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
lossW_expert_on = bool(self.checkBox_lossW.isChecked())
lossW_expert = str(self.lineEdit_lossW.text())
#To get the class weights (loss), the SelectedFiles are required
#SelectedFiles = self.items_clicked()
#Check if xtra_data should be used for training
xtra_in = [s["xtra_in"] for s in SelectedFiles]
if len(set(xtra_in))==1:
xtra_in = list(set(xtra_in))[0]
elif len(set(xtra_in))>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
self.fittingpopups_ui[listindex].SelectedFiles = SelectedFiles #save to self. to make it accessible for popup showing loss weights
#Get the class weights. This function runs now the first time in the fitting routine.
#It is possible that the user chose Custom weights and then changed the classes. Hence first check if
#there is a weight for each class available.
class_weight = self.get_class_weight(self.fittingpopups_ui[listindex].SelectedFiles,lossW_expert,custom_check_classes=True)
if type(class_weight)==list:
#There has been a mismatch between the classes described in class_weight and the classes available in SelectedFiles!
lossW_expert = class_weight[0] #overwrite
class_weight = class_weight[1]
print("class_weight:" +str(class_weight))
print("There has been a mismatch between the classes described in \
Loss weights and the classes available in the selected files! \
Hence, the Loss weights are set to Balanced")
#Get callback for the learning rate scheduling
callback_lr = aid_dl.get_lr_callback(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
#save a dictionary with initial values
lr_dict_original = aid_dl.get_lr_dict(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
if collection==False:
#Create an excel file
writer = pd.ExcelWriter(new_modelname.split(".model")[0]+'_meta.xlsx', engine='openpyxl')
self.fittingpopups_ui[listindex].writer = writer
#Used files go to a separate sheet on the MetaFile.xlsx
SelectedFiles_df = pd.DataFrame(SelectedFiles)
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df = self.get_dataOverview()
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
elif collection==True:
SelectedFiles_df = pd.DataFrame(SelectedFiles)
Writers = []
#Create excel files
for i in range(len(model_keras_path)):
writer = pd.ExcelWriter(model_keras_path[i].split(".model")[0]+'_meta.xlsx', engine='openpyxl')
Writers.append(writer)
for writer in Writers:
#Used files go to a separate sheet on the MetaFile.xlsx
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df = self.get_dataOverview()
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
###############################Expert Mode values##################
expert_mode_before = False #There was no expert mode used before.
if expert_mode==True:
#activate groupBox_expertMode_pop
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setChecked(True)
expert_mode_before = True
#Some settings only need to be changed once, after user clicked apply at next epoch
#Apply the changes to trainable states:
if train_last_layers==True:#Train only the last n layers
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_last_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only the last "+str(train_last_layers_n)+ " layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if train_dense_layers==True:#Train only dense layers
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_dense_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only dense layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a single float
dropout_expert_list = len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
text = "Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
else:
text = "Could not understand user input at Expert->Dropout"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change dropout. I'm not sure if this works already!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
print(text_do)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_do)
text_updates = ""
#Compare current lr and the lr on expert tab:
if collection == False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6:
if collection == False:
K.set_value(model_keras.optimizer.lr, learning_rate_const)
if collection == True:
for m in model_keras:
K.set_value(m.optimizer.lr, learning_rate_const)
text_updates += "Changed the learning rate to "+ str(learning_rate_const)+"\n"
#Check if model has to be compiled again
recompile = False #by default, dont recompile (happens for "Load and continue" training a model)
if new_model==True:
recompile = True
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
if collection==True:
optimizer_current = aid_dl.get_optimizer_name(model_keras[0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Changed the optimizer to "+optimizer_expert+"\n"
#Compare current loss function and the loss-function on expert tab:
if collection==False:
if model_keras.loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if collection==True:
if model_keras[0].loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if recompile==True:
print("Recompiling...")
if collection==False:
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if collection==True:
for m in model_keras:
aid_dl.model_compile(m, loss_expert, optimizer_settings, learning_rate_const,model_metrics, nr_classes)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to adjust learning rate, loss, optimizer")
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_updates)
#self.model_keras = model_keras #overwrite the model on self
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
nr_events_epoch_train = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_train]
rtdc_path_train = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_train]
zoom_factors_train = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_train]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_train = [selectedfile["shuffle"] for selectedfile in SelectedFiles_train]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_train])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
#read self.ram to new variable ; next clear ram. This is required for multitasking (training multiple models with maybe different data)
DATA = self.ram
if verbose==1:
print("Length of DATA (in RAM) = "+str(len(DATA)))
#clear the ram again if desired
if not self.actionKeep_Data_in_RAM.isChecked():
self.ram = dict()
print("Removed data from self.ram. For further training sessions, data has to be reloaded.")
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles_train)):
#if Data_to_RAM was not enabled:
#if not self.actionDataToRam.isChecked():
if len(DATA)==0: #Here, the entire training set needs to be used! Not only random images!
#Replace=true: means individual cells could occur several times
gen_train = aid_img.gen_crop_img(crop,rtdc_path_train[i],random_images=False,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
# else: #get a similar generator, using the ram-data
# if len(DATA)==0:
# gen_train = aid_img.gen_crop_img(crop,rtdc_path_train[i],random_images=False) #Replace true means that individual cells could occur several times
else:
gen_train = aid_img.gen_crop_img_ram(DATA,rtdc_path_train[i],random_images=False) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen_train)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "<html><head/><body><p>The standard deviation of your training data is zero! This would lead to division by zero. To avoid this, I will divide by 0.0001 instead.</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Std. is zero")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
Para_dict = pd.DataFrame()
def update_para_dict():
#Document changes in the meta-file
Para_dict["AIDeveloper_Version"]=VERSION,
Para_dict["model_zoo_version"]=model_zoo_version,
try:
Para_dict["OS"]=platform.platform(),
Para_dict["CPU"]=platform.processor(),
except:
Para_dict["OS"]="Unknown",
Para_dict["CPU"]="Unknown",
Para_dict["Modelname"]=new_modelname,
Para_dict["Chosen Model"]=chosen_model,
Para_dict["new_model"]=new_model,
Para_dict["loadrestart_model"]=loadrestart_model,
Para_dict["loadcontinue_model"]=loadcontinue_model,
Para_dict["Continued_Fitting_From"]=load_modelname,
Para_dict["Input image size"]=crop,
Para_dict["Color Mode"]=color_mode,
Para_dict["Zoom order"]=zoom_order,
Para_dict["Device"]=deviceSelected,
Para_dict["gpu_used"]=gpu_used,
Para_dict["gpu_memory"]=gpu_memory,
Para_dict["Output Nr. classes"]=nr_classes,
Para_dict["Normalization"]=norm,
Para_dict["Nr. epochs"]=nr_epochs,
Para_dict["Keras refresh after nr. epochs"]=keras_refresh_nr_epochs,
Para_dict["Horz. flip"]=h_flip,
Para_dict["Vert. flip"]=v_flip,
Para_dict["rotation"]=rotation,
Para_dict["width_shift"]=width_shift,
Para_dict["height_shift"]=height_shift,
Para_dict["zoom"]=zoom,
Para_dict["shear"]=shear,
Para_dict["Brightness refresh after nr. epochs"]=brightness_refresh_nr_epochs,
Para_dict["Brightness add. lower"]=brightness_add_lower,
Para_dict["Brightness add. upper"]=brightness_add_upper,
Para_dict["Brightness mult. lower"]=brightness_mult_lower,
Para_dict["Brightness mult. upper"]=brightness_mult_upper,
Para_dict["Gaussnoise Mean"]=gaussnoise_mean,
Para_dict["Gaussnoise Scale"]=gaussnoise_scale,
Para_dict["Contrast on"]=contrast_on,
Para_dict["Contrast Lower"]=contrast_lower,
Para_dict["Contrast Higher"]=contrast_higher,
Para_dict["Saturation on"]=saturation_on,
Para_dict["Saturation Lower"]=saturation_lower,
Para_dict["Saturation Higher"]=saturation_higher,
Para_dict["Hue on"]=hue_on,
Para_dict["Hue delta"]=hue_delta,
Para_dict["Average blur on"]=avgBlur_on,
Para_dict["Average blur Lower"]=avgBlur_min,
Para_dict["Average blur Higher"]=avgBlur_max,
Para_dict["Gauss blur on"]=gaussBlur_on,
Para_dict["Gauss blur Lower"]=gaussBlur_min,
Para_dict["Gauss blur Higher"]=gaussBlur_max,
Para_dict["Motion blur on"]=motionBlur_on,
Para_dict["Motion blur Kernel"]=motionBlur_kernel,
Para_dict["Motion blur Angle"]=motionBlur_angle,
Para_dict["Epoch_Started_Using_These_Settings"]=counter,
Para_dict["expert_mode"]=expert_mode,
Para_dict["batchSize_expert"]=batchSize_expert,
Para_dict["epochs_expert"]=epochs_expert,
Para_dict["learning_rate_expert_on"]=learning_rate_expert_on,
Para_dict["learning_rate_const_on"]=learning_rate_const_on,
Para_dict["learning_rate_const"]=learning_rate_const,
Para_dict["learning_rate_cycLR_on"]=learning_rate_cycLR_on,
Para_dict["cycLrMin"]=cycLrMin,
Para_dict["cycLrMax"]=cycLrMax,
Para_dict["cycLrMethod"] = cycLrMethod,
Para_dict["clr_settings"] = self.fittingpopups_ui[listindex].clr_settings,
Para_dict["learning_rate_expo_on"]=learning_rate_expo_on,
Para_dict["expDecInitLr"]=expDecInitLr,
Para_dict["expDecSteps"]=expDecSteps,
Para_dict["expDecRate"]=expDecRate,
Para_dict["loss_expert_on"]=loss_expert_on,
Para_dict["loss_expert"]=loss_expert,
Para_dict["optimizer_expert_on"]=optimizer_expert_on,
Para_dict["optimizer_expert"]=optimizer_expert,
Para_dict["optimizer_settings"]=optimizer_settings,
Para_dict["paddingMode"]=paddingMode,
Para_dict["train_last_layers"]=train_last_layers,
Para_dict["train_last_layers_n"]=train_last_layers_n,
Para_dict["train_dense_layers"]=train_dense_layers,
Para_dict["dropout_expert_on"]=dropout_expert_on,
Para_dict["dropout_expert"]=dropout_expert,
Para_dict["lossW_expert_on"]=lossW_expert_on,
Para_dict["lossW_expert"]=lossW_expert,
Para_dict["class_weight"]=class_weight,
Para_dict["metrics"]=model_metrics,
#training data cannot be changed during training
if norm == "StdScaling using mean and std of all training data":
#This needs to be saved into Para_dict since it will be required for inference
Para_dict["Mean of training data used for scaling"]=mean_trainingdata,
Para_dict["Std of training data used for scaling"]=std_trainingdata,
if collection==False:
if counter == 0:
Para_dict.to_excel(self.fittingpopups_ui[listindex].writer,sheet_name='Parameters')
else:
Para_dict.to_excel(self.fittingpopups_ui[listindex].writer,sheet_name='Parameters',startrow=self.fittingpopups_ui[listindex].writer.sheets['Parameters'].max_row,header= False)
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH)#change to read/write
try:
self.fittingpopups_ui[listindex].writer.save()
except:
pass
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)#change to only readable
if collection==True:
for i in range(len(Writers)):
Para_dict["Chosen Model"]=model_architecture_names[i],
writer = Writers[i]
if counter==0:
Para_dict.to_excel(Writers[i],sheet_name='Parameters')
else:
Para_dict.to_excel(writer,sheet_name='Parameters',startrow=writer.sheets['Parameters'].max_row,header= False)
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
try:
writer.save()
except:
pass
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH) #read only
######################Load the Validation Data################################
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
nr_events_epoch_valid = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_valid]
rtdc_path_valid = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_valid]
zoom_factors_valid = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_valid]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_valid = [selectedfile["shuffle"] for selectedfile in SelectedFiles_valid]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_valid])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
############Cropping#####################
X_valid,y_valid,Indices,xtra_valid = [],[],[],[]
for i in range(len(SelectedFiles_valid)):
if not self.actionDataToRam.isChecked():
#Replace=true means individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else: #get a similar generator, using the ram-data
if len(DATA)==0:
#Replace=true means individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:
gen_valid = aid_img.gen_crop_img_ram(DATA,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
generator_cropped_out = next(gen_valid)
X_valid.append(generator_cropped_out[0])
#y_valid.append(np.repeat(indices_valid[i],nr_events_epoch_valid[i]))
y_valid.append(np.repeat(indices_valid[i],X_valid[-1].shape[0]))
Indices.append(generator_cropped_out[1])
xtra_valid.append(generator_cropped_out[2])
del generator_cropped_out
#Save the validation set (BEFORE normalization!)
#Write to.rtdc files
if bool(self.actionExport_Original.isChecked())==True:
print("Export original images")
save_cropped = False
aid_bin.write_rtdc(new_modelname.split(".model")[0]+'_Valid_Data.rtdc',rtdc_path_valid,X_valid,Indices,cropped=save_cropped,color_mode=self.get_color_mode(),xtra_in=xtra_valid)
elif bool(self.actionExport_Cropped.isChecked())==True:
print("Export cropped images")
save_cropped = True
aid_bin.write_rtdc(new_modelname.split(".model")[0]+'_Valid_Data.rtdc',rtdc_path_valid,X_valid,Indices,cropped=save_cropped,color_mode=self.get_color_mode(),xtra_in=xtra_valid)
elif bool(self.actionExport_Off.isChecked())==True:
print("Exporting is turned off")
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("Use a different Exporting option in ->Edit if you want to export the data")
# msg.setWindowTitle("Export is turned off!")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
X_valid = np.concatenate(X_valid)
y_valid = np.concatenate(y_valid)
Y_valid = np_utils.to_categorical(y_valid, nr_classes)# * 2 - 1
xtra_valid = np.concatenate(xtra_valid)
if not bool(self.actionExport_Off.isChecked())==True:
#Save the labels
np.savetxt(new_modelname.split(".model")[0]+'_Valid_Labels.txt',y_valid.astype(int),fmt='%i')
if len(X_valid.shape)==4:
channels=3
elif len(X_valid.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_valid.shape))
if channels==1:
#Add the "channels" dimension
X_valid = np.expand_dims(X_valid,3)
#get it to theano image format (channels first)
#X_valid = X_valid.swapaxes(-1,-2).swapaxes(-2,-3)
if norm == "StdScaling using mean and std of all training data":
X_valid = aid_img.image_normalization(X_valid,norm,mean_trainingdata,std_trainingdata)
else:
X_valid = aid_img.image_normalization(X_valid,norm)
#Validation data can be cropped to final size already since no augmentation
#will happen on this data set
dim_val = X_valid.shape
print("Current dim. of validation set (pixels x pixels) = "+str(dim_val[2]))
if dim_val[2]!=crop:
print("Change dim. (pixels x pixels) of validation set to = "+str(crop))
remove = int(dim_val[2]/2.0 - crop/2.0)
X_valid = X_valid[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
if xtra_in==True:
print("Add Xtra Data to X_valid")
X_valid = [X_valid,xtra_valid]
####################Update the PopupFitting########################
self.fittingpopups_ui[listindex].lineEdit_modelname_pop.setText(new_modelname) #set the progress bar to zero
self.fittingpopups_ui[listindex].spinBox_imagecrop_pop.setValue(crop)
self.fittingpopups_ui[listindex].spinBox_NrEpochs.setValue(nr_epochs)
self.fittingpopups_ui[listindex].comboBox_ModelSelection_pop.addItems(self.predefined_models)
chosen_model = str(self.comboBox_ModelSelection.currentText())
index = self.fittingpopups_ui[listindex].comboBox_ModelSelection_pop.findText(chosen_model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_ModelSelection_pop.setCurrentIndex(index)
self.fittingpopups_ui[listindex].comboBox_Normalization_pop.addItems(self.norm_methods)
index = self.fittingpopups_ui[listindex].comboBox_Normalization_pop.findText(norm, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_Normalization_pop.setCurrentIndex(index)
#padding
index = self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.findText(paddingMode, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.setCurrentIndex(index)
#zoom_order
self.fittingpopups_ui[listindex].comboBox_zoomOrder.setCurrentIndex(zoom_order)
#CPU setting
self.fittingpopups_ui[listindex].comboBox_cpu_pop.addItem("Default CPU")
if gpu_used==False:
self.fittingpopups_ui[listindex].radioButton_cpu_pop.setChecked(True)
self.fittingpopups_ui[listindex].doubleSpinBox_memory_pop.setValue(gpu_memory)
#GPU setting
if gpu_used==True:
self.fittingpopups_ui[listindex].radioButton_gpu_pop.setChecked(True)
self.fittingpopups_ui[listindex].comboBox_gpu_pop.addItem(deviceSelected)
self.fittingpopups_ui[listindex].doubleSpinBox_memory_pop.setValue(gpu_memory)
self.fittingpopups_ui[listindex].spinBox_RefreshAfterEpochs_pop.setValue(keras_refresh_nr_epochs)
self.fittingpopups_ui[listindex].checkBox_HorizFlip_pop.setChecked(h_flip)
self.fittingpopups_ui[listindex].checkBox_VertFlip_pop.setChecked(v_flip)
self.fittingpopups_ui[listindex].lineEdit_Rotation_pop.setText(str(rotation))
self.fittingpopups_ui[listindex].lineEdit_widthShift_pop.setText(str(width_shift))
self.fittingpopups_ui[listindex].lineEdit_heightShift_pop.setText(str(height_shift))
self.fittingpopups_ui[listindex].lineEdit_zoomRange_pop.setText(str(zoom))
self.fittingpopups_ui[listindex].lineEdit_shearRange_pop.setText(str(shear))
self.fittingpopups_ui[listindex].spinBox_RefreshAfterNrEpochs_pop.setValue(brightness_refresh_nr_epochs)
self.fittingpopups_ui[listindex].spinBox_PlusLower_pop.setValue(brightness_add_lower)
self.fittingpopups_ui[listindex].spinBox_PlusUpper_pop.setValue(brightness_add_upper)
self.fittingpopups_ui[listindex].doubleSpinBox_MultLower_pop.setValue(brightness_mult_lower)
self.fittingpopups_ui[listindex].doubleSpinBox_MultUpper_pop.setValue(brightness_mult_upper)
self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseMean_pop.setValue(gaussnoise_mean)
self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseScale_pop.setValue(gaussnoise_scale)
self.fittingpopups_ui[listindex].checkBox_contrast_pop.setChecked(contrast_on)
self.fittingpopups_ui[listindex].doubleSpinBox_contrastLower_pop.setValue(contrast_lower)
self.fittingpopups_ui[listindex].doubleSpinBox_contrastHigher_pop.setValue(contrast_higher)
self.fittingpopups_ui[listindex].checkBox_saturation_pop.setChecked(saturation_on)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.setValue(saturation_lower)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.setValue(saturation_higher)
self.fittingpopups_ui[listindex].checkBox_hue_pop.setChecked(hue_on)
self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.setValue(hue_delta)
#Special for saturation and hue. Only enabled for RGB:
saturation_enabled = bool(self.checkBox_saturation.isEnabled())
self.fittingpopups_ui[listindex].checkBox_saturation_pop.setEnabled(saturation_enabled)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.setEnabled(saturation_enabled)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.setEnabled(saturation_enabled)
hue_enabled = bool(self.checkBox_hue.isEnabled())
self.fittingpopups_ui[listindex].checkBox_hue_pop.setEnabled(hue_enabled)
self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.setEnabled(hue_enabled)
self.fittingpopups_ui[listindex].checkBox_avgBlur_pop.setChecked(avgBlur_on)
self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].label_avgBlurMin_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.setValue(avgBlur_min)
self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].label_avgBlurMax_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.setValue(avgBlur_max)
self.fittingpopups_ui[listindex].checkBox_gaussBlur_pop.setChecked(gaussBlur_on)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].label_gaussBlurMin_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.setValue(gaussBlur_min)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].label_gaussBlurMax_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.setValue(gaussBlur_max)
self.fittingpopups_ui[listindex].checkBox_motionBlur_pop.setChecked(motionBlur_on)
self.fittingpopups_ui[listindex].label_motionBlurKernel_pop.setEnabled(motionBlur_on)
self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.setEnabled(motionBlur_on)
self.fittingpopups_ui[listindex].label_motionBlurAngle_pop.setEnabled(motionBlur_on)
self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.setEnabled(motionBlur_on)
if len(motionBlur_kernel)==1:
self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.setText(str(motionBlur_kernel[0]))
if len(motionBlur_kernel)==2:
self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.setText(str(motionBlur_kernel[0])+","+str(motionBlur_kernel[1]))
if len(motionBlur_angle)==1:
self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.setText(str(motionBlur_angle[0]))
if len(motionBlur_kernel)==2:
self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.setText(str(motionBlur_angle[0])+","+str(motionBlur_angle[1]))
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setChecked(expert_mode)
self.fittingpopups_ui[listindex].spinBox_batchSize.setValue(batchSize_expert)
self.fittingpopups_ui[listindex].spinBox_epochs.setValue(epochs_expert)
self.fittingpopups_ui[listindex].groupBox_learningRate_pop.setChecked(learning_rate_expert_on)
self.fittingpopups_ui[listindex].radioButton_LrConst.setChecked(learning_rate_const_on)
self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.setValue(learning_rate_const)
self.fittingpopups_ui[listindex].radioButton_LrCycl.setChecked(learning_rate_cycLR_on)
self.fittingpopups_ui[listindex].lineEdit_cycLrMin.setText(str(cycLrMin))
self.fittingpopups_ui[listindex].lineEdit_cycLrMax.setText(str(cycLrMax))
index = self.fittingpopups_ui[listindex].comboBox_cycLrMethod.findText(cycLrMethod, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_cycLrMethod.setCurrentIndex(index)
self.fittingpopups_ui[listindex].radioButton_LrExpo.setChecked(learning_rate_expo_on)
self.fittingpopups_ui[listindex].doubleSpinBox_expDecInitLr.setValue(expDecInitLr)
self.fittingpopups_ui[listindex].spinBox_expDecSteps.setValue(expDecSteps)
self.fittingpopups_ui[listindex].doubleSpinBox_expDecRate.setValue(expDecRate)
self.fittingpopups_ui[listindex].checkBox_expt_loss_pop.setChecked(loss_expert_on)
self.fittingpopups_ui[listindex].checkBox_expt_loss_pop.setChecked(loss_expert_on)
index = self.fittingpopups_ui[listindex].comboBox_expt_loss_pop.findText(loss_expert, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_expt_loss_pop.setCurrentIndex(index)
self.fittingpopups_ui[listindex].checkBox_optimizer_pop.setChecked(optimizer_expert_on)
index = self.fittingpopups_ui[listindex].comboBox_optimizer.findText(optimizer_expert, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_optimizer.setCurrentIndex(index)
self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.setValue(learning_rate_const)
index = self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.findText(paddingMode, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.setCurrentIndex(index)
self.fittingpopups_ui[listindex].checkBox_trainLastNOnly_pop.setChecked(train_last_layers)
self.fittingpopups_ui[listindex].spinBox_trainLastNOnly_pop.setValue(train_last_layers_n)
self.fittingpopups_ui[listindex].checkBox_trainDenseOnly_pop.setChecked(train_dense_layers)
self.fittingpopups_ui[listindex].checkBox_dropout_pop.setChecked(dropout_expert_on)
do_text = [str(do_i) for do_i in dropout_expert]
self.fittingpopups_ui[listindex].lineEdit_dropout_pop.setText((', '.join(do_text)))
self.fittingpopups_ui[listindex].checkBox_lossW.setChecked(lossW_expert_on)
self.fittingpopups_ui[listindex].pushButton_lossW.setEnabled(lossW_expert_on)
self.fittingpopups_ui[listindex].lineEdit_lossW.setText(str(lossW_expert))
if channels==1:
channel_text = "Grayscale"
elif channels==3:
channel_text = "RGB"
self.fittingpopups_ui[listindex].comboBox_colorMode_pop.addItems([channel_text])
###############Continue with training data:augmentation############
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
#Dictionary defining affine image augmentation options:
aug_paras = {"v_flip":v_flip,"h_flip":h_flip,"rotation":rotation,"width_shift":width_shift,"height_shift":height_shift,"zoom":zoom,"shear":shear}
Histories,Index,Saved,Stopwatch,LearningRate = [],[],[],[],[]
if collection==True:
HISTORIES = [ [] for model in model_keras]
SAVED = [ [] for model in model_keras]
counter = 0
saving_failed = False #when saving fails, this becomes true and the user will be informed at the end of training
#Save the initial values (Epoch 1)
update_para_dict()
model_metrics_names = []
for met in model_metrics:
if type(met)==str:
model_metrics_names.append(met)
else:
metname = met.name
metlabel = met.label
if metlabel>0:
metname = metname+"_"+str(metlabel)
model_metrics_names.append(metname)
#Dictionary for records in metrics
model_metrics_records = {}
model_metrics_records["acc"] = 0 #accuracy starts at zero and approaches 1 during training
model_metrics_records["val_acc"] = 0 #accuracy starts at zero and approaches 1 during training
model_metrics_records["loss"] = 9E20 ##loss starts very high and approaches 0 during training
model_metrics_records["val_loss"] = 9E20 ##loss starts very high and approaches 0 during training
for key in model_metrics_names:
if 'precision' in key or 'recall' in key or 'f1_score' in key:
model_metrics_records[key] = 0 #those metrics start at zero and approach 1
model_metrics_records["val_"+key] = 0 #those metrics start at zero and approach 1
gen_train_refresh = False
time_start = time.time()
t1 = time.time() #Initialize a timer; this is used to save the meta file every few seconds
t2 = time.time() #Initialize a timer; this is used update the fitting parameters
while counter < nr_epochs:#nr_epochs: #resample nr_epochs times
#Only keep fitting if the respective window is open:
isVisible = self.fittingpopups[listindex].isVisible()
if isVisible:
############Keras image augmentation#####################
#Start the first iteration:
X_train,y_train,xtra_train = [],[],[]
t3 = time.time()
for i in range(len(SelectedFiles_train)):
if len(DATA)==0 or gen_train_refresh:
#Replace true means that individual cells could occur several times
gen_train = aid_img.gen_crop_img(cropsize2,rtdc_path_train[i],nr_events_epoch_train[i],random_images=shuffle_train[i],replace=True,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
gen_train_refresh = False
else:
gen_train = aid_img.gen_crop_img_ram(DATA,rtdc_path_train[i],nr_events_epoch_train[i],random_images=shuffle_train[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
data_ = next(gen_train)
X_train.append(data_[0])
y_train.append(np.repeat(indices_train[i],X_train[-1].shape[0]))
if xtra_in==True:
xtra_train.append(data_[2])
del data_
X_train = np.concatenate(X_train)
X_train = X_train.astype(np.uint8)
y_train = np.concatenate(y_train)
if xtra_in==True:
print("Retrieve Xtra Data...")
xtra_train = np.concatenate(xtra_train)
t4 = time.time()
if verbose == 1:
print("Time to load data (from .rtdc or RAM) and crop="+str(t4-t3))
if len(X_train.shape)==4:
channels=3
elif len(X_train.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_train.shape))
if channels==1:
#Add the "channels" dimension
X_train = np.expand_dims(X_train,3)
t3 = time.time()
#Some parallellization: use nr_threads (number of CPUs)
nr_threads = 1 #Somehow for MNIST and CIFAR, processing always took longer for nr_threads>1 . I tried nr_threads=2,4,8,16,24
if nr_threads == 1:
X_batch = aid_img.affine_augm(X_train,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear) #Affine image augmentation
y_batch = np.copy(y_train)
else:
#Divde data in 4 batches
X_train = np.array_split(X_train,nr_threads)
y_train = np.array_split(y_train,nr_threads)
self.X_batch = [False] * nr_threads
self.y_batch = [False] * nr_threads
self.counter_aug = 0
self.Workers_augm = []
def imgaug_worker(aug_paras,progress_callback,history_callback):
i = aug_paras["i"]
self.X_batch[i] = aid_img.affine_augm(aug_paras["X_train"],v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear)
self.y_batch[i] = aug_paras["y_train"]
self.counter_aug+=1
t3_a = time.time()
for i in range(nr_threads):
aug_paras_ = copy.deepcopy(aug_paras)
aug_paras_["i"] = i
aug_paras_["X_train"]=X_train[i]#augparas contains rotation and so on. X_train and y_train are overwritten in each iteration (for each worker new X_train)
aug_paras_["y_train"]=y_train[i]
self.Workers_augm.append(Worker(imgaug_worker,aug_paras_))
self.threadpool.start(self.Workers_augm[i])
while self.counter_aug < nr_threads:
time.sleep(0.01)#Wait 0.1s, then check the len again
t3_b = time.time()
if verbose == 1:
print("Time to perform affine augmentation_internal ="+str(t3_b-t3_a))
X_batch = np.concatenate(self.X_batch)
y_batch = np.concatenate(self.y_batch)
Y_batch = np_utils.to_categorical(y_batch, nr_classes)# * 2 - 1
t4 = time.time()
if verbose == 1:
print("Time to perform affine augmentation ="+str(t4-t3))
t3 = time.time()
#Now do the final cropping to the actual size that was set by user
dim = X_batch.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
X_batch = X_batch[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
t4 = time.time()
# if verbose == 1:
# print("Time to crop to final size="+str(t4-t3))
X_batch_orig = np.copy(X_batch) #save into new array and do some iterations with varying noise/brightness
#reuse this X_batch_orig a few times since this augmentation was costly
keras_iter_counter = 0
while keras_iter_counter < keras_refresh_nr_epochs and counter < nr_epochs:
keras_iter_counter+=1
#if t2-t1>5: #check for changed settings every 5 seconds
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
#Another while loop if the user wants to reuse the keras-augmented data
#several times and only apply brightness augmentation:
brightness_iter_counter = 0
while brightness_iter_counter < brightness_refresh_nr_epochs and counter < nr_epochs:
#In each iteration, start with non-augmented data
X_batch = np.copy(X_batch_orig)#copy from X_batch_orig, X_batch will be altered without altering X_batch_orig
X_batch = X_batch.astype(np.uint8)
#########X_batch = X_batch.astype(float)########## No float yet :) !!!
brightness_iter_counter += 1
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
if self.fittingpopups_ui[listindex].checkBox_ApplyNextEpoch.isChecked():
nr_epochs = int(self.fittingpopups_ui[listindex].spinBox_NrEpochs.value())
#Keras stuff
keras_refresh_nr_epochs = int(self.fittingpopups_ui[listindex].spinBox_RefreshAfterEpochs_pop.value())
h_flip = bool(self.fittingpopups_ui[listindex].checkBox_HorizFlip_pop.isChecked())
v_flip = bool(self.fittingpopups_ui[listindex].checkBox_VertFlip_pop.isChecked())
rotation = float(self.fittingpopups_ui[listindex].lineEdit_Rotation_pop.text())
width_shift = float(self.fittingpopups_ui[listindex].lineEdit_widthShift_pop.text())
height_shift = float(self.fittingpopups_ui[listindex].lineEdit_heightShift_pop.text())
zoom = float(self.fittingpopups_ui[listindex].lineEdit_zoomRange_pop.text())
shear = float(self.fittingpopups_ui[listindex].lineEdit_shearRange_pop.text())
#Brightness stuff
brightness_refresh_nr_epochs = int(self.fittingpopups_ui[listindex].spinBox_RefreshAfterNrEpochs_pop.value())
brightness_add_lower = float(self.fittingpopups_ui[listindex].spinBox_PlusLower_pop.value())
brightness_add_upper = float(self.fittingpopups_ui[listindex].spinBox_PlusUpper_pop.value())
brightness_mult_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultLower_pop.value())
brightness_mult_upper = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultUpper_pop.value())
gaussnoise_mean = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseMean_pop.value())
gaussnoise_scale = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseScale_pop.value())
contrast_on = bool(self.fittingpopups_ui[listindex].checkBox_contrast_pop.isChecked())
contrast_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastLower_pop.value())
contrast_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastHigher_pop.value())
saturation_on = bool(self.fittingpopups_ui[listindex].checkBox_saturation_pop.isChecked())
saturation_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.value())
saturation_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.value())
hue_on = bool(self.fittingpopups_ui[listindex].checkBox_hue_pop.isChecked())
hue_delta = float(self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.value())
avgBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_avgBlur_pop.isChecked())
avgBlur_min = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.value())
avgBlur_max = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.value())
gaussBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_gaussBlur_pop.isChecked())
gaussBlur_min = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.value())
gaussBlur_max = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.value())
motionBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_motionBlur_pop.isChecked())
motionBlur_kernel = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.text())
motionBlur_angle = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
#Expert mode stuff
expert_mode = bool(self.fittingpopups_ui[listindex].groupBox_expertMode_pop.isChecked())
batchSize_expert = int(self.fittingpopups_ui[listindex].spinBox_batchSize.value())
epochs_expert = int(self.fittingpopups_ui[listindex].spinBox_epochs.value())
learning_rate_expert_on = bool(self.fittingpopups_ui[listindex].groupBox_learningRate_pop.isChecked())
learning_rate_const_on = bool(self.fittingpopups_ui[listindex].radioButton_LrConst.isChecked())
learning_rate_const = float(self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.value())
learning_rate_cycLR_on = bool(self.fittingpopups_ui[listindex].radioButton_LrCycl.isChecked())
try:
cycLrMin = float(self.fittingpopups_ui[listindex].lineEdit_cycLrMin.text())
cycLrMax = float(self.fittingpopups_ui[listindex].lineEdit_cycLrMax.text())
except:
cycLrMin = []
cycLrMax = []
cycLrMethod = str(self.fittingpopups_ui[listindex].comboBox_cycLrMethod.currentText())
clr_settings = self.fittingpopups_ui[listindex].clr_settings.copy() #Get a copy of the current optimizer_settings. .copy prevents that changes in the UI have immediate effect
cycLrStepSize = aid_dl.get_cyclStepSize(SelectedFiles,clr_settings["step_size"],batchSize_expert)
cycLrGamma = clr_settings["gamma"]
learning_rate_expo_on = bool(self.fittingpopups_ui[listindex].radioButton_LrExpo.isChecked())
expDecInitLr = float(self.fittingpopups_ui[listindex].doubleSpinBox_expDecInitLr.value())
expDecSteps = int(self.fittingpopups_ui[listindex].spinBox_expDecSteps.value())
expDecRate = float(self.fittingpopups_ui[listindex].doubleSpinBox_expDecRate.value())
loss_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_expt_loss_pop.isChecked())
loss_expert = str(self.fittingpopups_ui[listindex].comboBox_expt_loss_pop.currentText())
optimizer_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_optimizer_pop.isChecked())
optimizer_expert = str(self.fittingpopups_ui[listindex].comboBox_optimizer.currentText())
optimizer_settings = self.fittingpopups_ui[listindex].optimizer_settings.copy() #Get a copy of the current optimizer_settings. .copy prevents that changes in the UI have immediate effect
paddingMode_ = str(self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.currentText())
print("paddingMode_:"+str(paddingMode_))
if paddingMode_ != paddingMode:
print("Changed the padding mode!")
gen_train_refresh = True#otherwise changing paddingMode will not have any effect
paddingMode = paddingMode_
train_last_layers = bool(self.fittingpopups_ui[listindex].checkBox_trainLastNOnly_pop.isChecked())
train_last_layers_n = int(self.fittingpopups_ui[listindex].spinBox_trainLastNOnly_pop.value())
train_dense_layers = bool(self.fittingpopups_ui[listindex].checkBox_trainDenseOnly_pop.isChecked())
dropout_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_dropout_pop.isChecked())
try:
dropout_expert = str(self.fittingpopups_ui[listindex].lineEdit_dropout_pop.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
lossW_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_lossW.isChecked())
lossW_expert = str(self.fittingpopups_ui[listindex].lineEdit_lossW.text())
class_weight = self.get_class_weight(self.fittingpopups_ui[listindex].SelectedFiles,lossW_expert) #
print("Updating parameter file (meta.xlsx)!")
update_para_dict()
#Changes in expert mode can affect the model: apply changes now:
if expert_mode==True:
if collection==False: #Expert mode is currently not supported for Collections
expert_mode_before = True
#Apply changes to the trainable states:
if train_last_layers==True:#Train only the last n layers
if verbose:
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
#Change the trainability states. Model compilation is done inside model_change_trainability
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model due to train_last_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only the last "+str(train_last_layers_n)+ " layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if train_dense_layers==True:#Train only dense layers
if verbose:
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
#Change the trainability states. Model compilation is done inside model_change_trainability
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model due to train_dense_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only dense layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a float
dropout_expert_list = len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
text = "Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
else:
text = "Could not understand user input at Expert->Dropout"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
#Change dropout. Model .compile happens inside change_dropout function
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model due to changed dropout. I'm not sure if this works already!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
if verbose:
print(text_do)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_do)
if learning_rate_expert_on==True:
#get the current lr_dict
lr_dict_now = aid_dl.get_lr_dict(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
if not lr_dict_now.equals(lr_dict_original):#in case the dataframes dont equal...
#generate a new callback
callback_lr = aid_dl.get_lr_callback(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
#update lr_dict_original
lr_dict_original = lr_dict_now.copy()
else:
callback_lr = None
if optimizer_expert_on==True:
optimizer_settings_now = self.fittingpopups_ui[listindex].optimizer_settings.copy()
if not optimizer_settings_now == optimizer_settings:#in case the dataframes dont equal...
#grab these new optimizer values
optimizer_settings = optimizer_settings_now.copy()
############################Invert 'expert' settings#########################
if expert_mode==False and expert_mode_before==True: #if the expert mode was selected before, change the parameters back to original vlaues
if verbose:
print("Expert mode was used before and settings are now inverted")
#Re-set trainable states back to original state
if verbose:
print("Change 'trainable' layers back to original state")
summary = aid_dl.model_change_trainability(model_keras,trainable_original,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change 'trainable' layers back to original state")
text1 = "Expert mode turns off: Request for orignal trainability states:\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if verbose:
print("Change dropout rates in dropout layers back to original values")
callback_lr = None#remove learning rate callback
if verbose:
print("Set learning rate callback to None")
if len(do_list_original)>0:
do_changed = aid_dl.change_dropout(model_keras,do_list_original,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change dropout values back to original state. I'm not sure if this works!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to original values: "+str(do_list_original)
else:
text_do = "Dropout rate(s) in model was/were not changed"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_do+"\n")
text_updates = ""
#Compare current lr and the lr on expert tab:
if collection==False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6:
if collection==False:
K.set_value(model_keras.optimizer.lr, learning_rate_const)
else:
K.set_value(model_keras[0].optimizer.lr, learning_rate_const)
text_updates += "Changed the learning rate to "+ str(learning_rate_const)+"\n"
recompile = False
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
else:
optimizer_current = aid_dl.get_optimizer_name(model_keras[0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Changed the optimizer to "+optimizer_expert+"\n"
#Compare current loss function and the loss-function on expert tab:
if collection==False:
loss_ = model_keras.loss
else:
loss_ = model_keras[0].loss
if loss_!=loss_expert:
recompile = True
model_metrics_records["loss"] = 9E20 #Reset the record for loss because new loss function could converge to a different min. value
model_metrics_records["val_loss"] = 9E20 #Reset the record for loss because new loss function could converge to a different min. value
text_updates+="Changed the loss function to "+loss_expert+"\n"
if recompile==True and collection==False:
print("Recompiling...")
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change optimizer, loss and learninig rate.")
elif recompile==True and collection==True:
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
print("Altering learning rate is not suported for collections (yet)")
return
print("Recompiling...")
for m in model_keras:
aid_dl.model_compile(m,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_updates)
#self.model_keras = model_keras #overwrite the model in self
self.fittingpopups_ui[listindex].checkBox_ApplyNextEpoch.setChecked(False)
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
X_batch = X_batch.astype(np.uint8)
if contrast_on:
t_con_aug_1 = time.time()
X_batch = aid_img.contrast_augm_cv2(X_batch,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
t_con_aug_2 = time.time()
if verbose == 1:
print("Time to augment contrast="+str(t_con_aug_2-t_con_aug_1))
if saturation_on or hue_on:
t_sat_aug_1 = time.time()
X_batch = aid_img.satur_hue_augm_cv2(X_batch.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta) #Gray and RGB; both values >0!
t_sat_aug_2 = time.time()
if verbose == 1:
print("Time to augment saturation/hue="+str(t_sat_aug_2-t_sat_aug_1))
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
t_avgBlur_1 = time.time()
X_batch = aid_img.avg_blur_cv2(X_batch,avgBlur_min,avgBlur_max)
t_avgBlur_2 = time.time()
if verbose == 1:
print("Time to perform average blurring="+str(t_avgBlur_2-t_avgBlur_1))
if gaussBlur_on:
t_gaussBlur_1 = time.time()
X_batch = aid_img.gauss_blur_cv(X_batch,gaussBlur_min,gaussBlur_max)
t_gaussBlur_2 = time.time()
if verbose == 1:
print("Time to perform gaussian blurring="+str(t_gaussBlur_2-t_gaussBlur_1))
if motionBlur_on:
t_motionBlur_1 = time.time()
X_batch = aid_img.motion_blur_cv(X_batch,motionBlur_kernel,motionBlur_angle)
t_motionBlur_2 = time.time()
if verbose == 1:
print("Time to perform motion blurring="+str(t_motionBlur_2-t_motionBlur_1))
##########Brightness noise#########
t3 = time.time()
X_batch = aid_img.brightn_noise_augm_cv2(X_batch,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
t4 = time.time()
if verbose == 1:
print("Time to augment brightness="+str(t4-t3))
t3 = time.time()
if norm == "StdScaling using mean and std of all training data":
X_batch = aid_img.image_normalization(X_batch,norm,mean_trainingdata,std_trainingdata)
else:
X_batch = aid_img.image_normalization(X_batch,norm)
t4 = time.time()
if verbose == 1:
print("Time to apply normalization="+str(t4-t3))
#Fitting can be paused
while str(self.fittingpopups_ui[listindex].pushButton_Pause_pop.text())=="":
time.sleep(2) #wait 2 seconds and then check the text on the button again
if verbose == 1:
print("X_batch.shape")
print(X_batch.shape)
if xtra_in==True:
print("Add Xtra Data to X_batch")
X_batch = [X_batch,xtra_train]
#generate a list of callbacks, get empty list if callback_lr is none
callbacks = []
if callback_lr!=None:
callbacks.append(callback_lr)
###################################################
###############Actual fitting######################
###################################################
if collection==False:
if model_keras_p == None:
history = model_keras.fit(X_batch, Y_batch, batch_size=batchSize_expert, epochs=epochs_expert,verbose=verbose, validation_data=(X_valid, Y_valid),class_weight=class_weight,callbacks=callbacks)
elif model_keras_p != None:
history = model_keras_p.fit(X_batch, Y_batch, batch_size=batchSize_expert, epochs=epochs_expert,verbose=verbose, validation_data=(X_valid, Y_valid),class_weight=class_weight,callbacks=callbacks)
Histories.append(history.history)
Stopwatch.append(time.time()-time_start)
learningrate = K.get_value(history.model.optimizer.lr)
LearningRate.append(learningrate)
#Check if any metric broke a record
record_broken = False #initially, assume there is no new record
for key in history.history.keys():
value = history.history[key][-1]
record = model_metrics_records[key]
if 'val_acc' in key or 'val_precision' in key or 'val_recall' in key or 'val_f1_score' in key:
#These metrics should go up (towards 1)
if value>record:
model_metrics_records[key] = value
record_broken = True
print(key+" broke record -> Model will be saved" )
elif 'val_loss' in key:
#This metric should go down (towards 0)
if value<record:
model_metrics_records[key] = value
record_broken = True
print(key+" broke record -> Model will be saved")
#self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
if record_broken:#if any record was broken...
if deviceSelected=="Multi-GPU":#in case of Multi-GPU...
#In case of multi-GPU, first copy the weights of the parallel model to the normal model
model_keras.set_weights(model_keras_p.layers[-2].get_weights())
#Save the model
text = "Save model to following directory: \n"+os.path.dirname(new_modelname)
print(text)
if os.path.exists(os.path.dirname(new_modelname)):
model_keras.save(new_modelname.split(".model")[0]+"_"+str(counter)+".model")
text = "Record was broken -> saved model"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
else:#in case the folder does not exist (anymore), create a folder in temp
#what is the foldername of the model?
text = "Saving failed. Create folder in temp"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
saving_failed = True
temp_path = aid_bin.create_temp_folder()#create a temp folder if it does not already exist
text = "Your temp. folder is here: "+str(temp_path)
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
parentfolder = aid_bin.splitall(new_modelname)[-2]
fname = os.path.split(new_modelname)[-1]
#create that folder in temp if it not exists already
if not os.path.exists(os.path.join(temp_path,parentfolder)):
text = "Create folder in temp:\n"+os.path.join(temp_path,parentfolder)
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
os.mkdir(os.path.join(temp_path,parentfolder))
#change the new_modelname to a path in temp
new_modelname = os.path.join(temp_path,parentfolder,fname)
#inform user!
text = "Could not find original folder. Files are now saved to "+new_modelname
text = "<span style=\' color: red;\'>" +text+"</span>"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
text = "<span style=\' color: black;\'>" +""+"</span>"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#Save the model
model_keras.save(new_modelname.split(".model")[0]+"_"+str(counter)+".model")
text = "Model saved successfully to temp"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#Also update the excel writer!
writer = pd.ExcelWriter(new_modelname.split(".model")[0]+'_meta.xlsx', engine='openpyxl')
self.fittingpopups_ui[listindex].writer = writer
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
Saved.append(1)
#Also save the model upon user-request
elif bool(self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.isChecked())==True:
if deviceSelected=="Multi-GPU":#in case of Multi-GPU...
#In case of multi-GPU, first copy the weights of the parallel model to the normal model
model_keras.set_weights(model_keras_p.layers[-2].get_weights())
model_keras.save(new_modelname.split(".model")[0]+"_"+str(counter)+".model")
Saved.append(1)
self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.setChecked(False)
else:
Saved.append(0)
elif collection==True:
for i in range(len(model_keras)):
#Expert-settings return automatically to default values when Expert-mode is unchecked
history = model_keras[i].fit(X_batch, Y_batch, batch_size=batchSize_expert, epochs=epochs_expert,verbose=verbose, validation_data=(X_valid, Y_valid),class_weight=class_weight,callbacks=callbacks)
HISTORIES[i].append(history.history)
learningrate = K.get_value(history.model.optimizer.lr)
print("model_keras_path[i]")
print(model_keras_path[i])
#Check if any metric broke a record
record_broken = False #initially, assume there is no new record
for key in history.history.keys():
value = history.history[key][-1]
record = model_metrics_records[key]
if 'val_acc' in key or 'val_precision' in key or 'val_recall' in key or 'val_f1_score' in key:
#These metrics should go up (towards 1)
if value>record:
model_metrics_records[key] = value
record_broken = True
text = key+" broke record -> Model will be saved"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#one could 'break' here, but I want to update all records
elif 'val_loss' in key:
#This metric should go down (towards 0)
if value<record:
model_metrics_records[key] = value
record_broken = True
text = key+" broke record -> Model will be saved"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#For collections of models:
if record_broken:
#Save the model
model_keras[i].save(model_keras_path[i].split(".model")[0]+"_"+str(counter)+".model")
SAVED[i].append(1)
elif bool(self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.isChecked())==True:
model_keras[i].save(model_keras_path[i].split(".model")[0]+"_"+str(counter)+".model")
SAVED[i].append(1)
self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.setChecked(False)
else:
SAVED[i].append(0)
callback_progessbar = float(counter)/nr_epochs
progress_callback.emit(100.0*callback_progessbar)
history_emit = history.history
history_emit["LearningRate"] = [learningrate]
history_callback.emit(history_emit)
Index.append(counter)
t2 = time.time()
if collection==False:
if counter==0:
#If this runs the first time, create the file with header
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1["Time"] = Stopwatch
DF1["LearningRate"] = LearningRate
DF1.index = Index
#If this runs the first time, create the file with header
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
DF1.to_excel(writer,sheet_name='History')
writer.save()
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)
meta_saving_t = int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value())
text = "meta.xlsx was saved (automatic saving every "+str(meta_saving_t)+"s)"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#self.fittingpopups_ui[listindex].backup.append({"DF1":DF1})
Index,Histories,Saved,Stopwatch,LearningRate = [],[],[],[],[]#reset the lists
#Get a sensible frequency for saving the dataframe (every 20s)
elif t2-t1>int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value()):
#elif counter%50==0: #otherwise save the history to excel after each n epochs
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1["Time"] = Stopwatch
DF1["LearningRate"] = LearningRate
DF1.index = Index
#Saving
if os.path.exists(os.path.dirname(new_modelname)):#check if folder is (still) available
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
DF1.to_excel(writer,sheet_name='History', startrow=writer.sheets['History'].max_row,header= False)
writer.save()
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH) #make read only
meta_saving_t = int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value())
text = "meta.xlsx was saved (automatic saving every "+str(meta_saving_t)+"s to directory:\n)"+new_modelname
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
Index,Histories,Saved,Stopwatch,LearningRate = [],[],[],[],[]#reset the lists
t1 = time.time()
else:#If folder not available, create a folder in temp
text = "Failed to save meta.xlsx. -> Create folder in temp\n"
saving_failed = True
temp_path = aid_bin.create_temp_folder()#create a temp folder if it does not already exist
text += "Your temp folder is here: "+str(temp_path)+"\n"
folder = os.path.split(new_modelname)[-2]
folder = os.path.split(folder)[-1]
fname = os.path.split(new_modelname)[-1]
#create that folder in temp if it does'nt exist already
if not os.path.exists(os.path.join(temp_path,folder)):
os.mkdir(os.path.join(temp_path,folder))
text +="Created directory in temp:\n"+os.path.join(temp_path,folder)
print(text)
#change the new_modelname to a path in temp
new_modelname = os.path.join(temp_path,folder,fname)
#inform user!
text = "Could not find original folder. Files are now saved to "+new_modelname
text = "<span style=\' color: red;\'>" +text+"</span>"#put red text to the infobox
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
text = "<span style=\' color: black;\'>" +""+"</span>"#reset textcolor to black
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#update the excel writer
writer = pd.ExcelWriter(new_modelname.split(".model")[0]+'_meta.xlsx', engine='openpyxl')
self.fittingpopups_ui[listindex].writer = writer
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
print("There is already such a file...AID will add new data to it. Please check if this is OK")
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
DF1.to_excel(writer,sheet_name='History')
writer.save()
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)
print("meta.xlsx was saved")
Index,Histories,Saved,Stopwatch,LearningRate = [],[],[],[],[]#reset the lists
if collection==True:
if counter==0:
for i in range(len(HISTORIES)):
Histories = HISTORIES[i]
Saved = SAVED[i]
#If this runs the first time, create the file with header
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1.index = Index
HISTORIES[i] = []#reset the Histories list
SAVED[i] = []
#If this runs the first time, create the file with header
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
DF1.to_excel(Writers[i],sheet_name='History')
Writers[i].save()
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)
print("meta.xlsx was saved")
Index = []#reset the Index list
#Get a sensible frequency for saving the dataframe (every 20s)
elif t2-t1>int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value()):
for i in range(len(HISTORIES)):
Histories = HISTORIES[i]
Saved = SAVED[i]
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1.index = Index
HISTORIES[i] = []#reset the Histories list
SAVED[i] = []
#Saving
#TODO: save to temp, if harddisk not available to prevent crash.
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
DF1.to_excel(Writers[i],sheet_name='History', startrow=Writers[i].sheets['History'].max_row,header= False)
Writers[i].save()
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH) #make read only
print("meta.xlsx was saved")
t1 = time.time()
Index = []#reset the Index list
counter+=1
progress_callback.emit(100.0)
#If the original storing locating became inaccessible (folder name changed, HD unplugged...)
#the models and meta are saved to temp folder. Inform the user!!!
if saving_failed==True:
path_orig = str(self.fittingpopups_ui[listindex].lineEdit_modelname_pop.text())
text = "<html><head/><body><p>Original path:<br>"+path_orig+\
"<br>became inaccessible during training! Files were then saved to:<br>"+\
new_modelname.split(".model")[0]+"<br>To bring both parts back together\
, you have manually open the meta files (excel) and copy;paste each sheet. \
Sorry for the inconvenience.<br>If that happens often, you may contact \
the main developer and ask him to improve that.</p></body></html>"
text = "<span style=\' font-weight:600; color: red;\'>" +text+"</span>"#put red text to the infobox
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
print('\a')#make a noise
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.setStyleSheet("background-color: yellow;")
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.moveCursor(QtGui.QTextCursor.End)
if collection==False:
if len(Histories)>0: #if the list for History files is not empty, process it!
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1["Time"] = Stopwatch
DF1["LearningRate"] = LearningRate
DF1.index = Index
Index = []#reset the Index list
Histories = []#reset the Histories list
Saved = []
#does such a file exist already? append!
if not os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
DF1.to_excel(writer,sheet_name='History')
else: # else it exists so append without writing the header
DF1.to_excel(writer,sheet_name='History', startrow=writer.sheets['History'].max_row,header= False)
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
writer.save()
writer.close()
if collection==True:
for i in range(len(HISTORIES)):
Histories = HISTORIES[i]
Saved = SAVED[i]
if len(Histories)>0: #if the list for History files is not empty, process it!
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1.index = Index
HISTORIES[i] = []#reset the Histories list
SAVED[i] = []
#does such a file exist already? append!
if not os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
DF1.to_excel(Writers[i],sheet_name='History')
else: # else it exists so append without writing the header
DF1.to_excel(writer,sheet_name='History', startrow=writer.sheets['History'].max_row,header= False)
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
Writers[i].save()
Writers[i].close()
Index = []#reset the Index list
sess.close()
# try:
# aid_dl.reset_keras(model_keras)
# except:
# pass
def action_fit_model(self):
#Take the initialized model
#Unfortunately, in TensorFlow it is not possile to pass a model from
#one thread to another. Therefore I have to load and save the models each time :(
model_keras = self.model_keras
if type(model_keras)==tuple:
collection=True
else:
collection=False
#Check if there was a model initialized:
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a path/filename for the model to be fitted!")
msg.setWindowTitle("Model path/ filename missing!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if model_keras==None:#in case the model got deleted in another task
self.action_initialize_model(duties="initialize_train")
print("Had to re-run action_initialize_model!")
model_keras = self.model_keras
self.model_keras = None#delete this copy
if model_keras==None:
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("Model could not be initialized")
# msg.setWindowTitle("Error")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
return
if not model_keras==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Model is now initialized for you, Please check Model summary window below if everything is correct and then press Fit again!")
msg.setWindowTitle("No initilized model found!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#There should be at least two outputs (index 0 and 1)
if collection==False:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras.output.shape.dims[1])
if collection==True:
#model_config = model_keras[1][0].get_config()#["layers"]
nr_classes = int(model_keras[1][0].output.shape.dims[1])
if nr_classes<2:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define at least two classes")
msg.setWindowTitle("Not enough classes")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if collection==False:
#define a variable on self which allows the fit_model_worker to load this model and fit
#(sorry, this is necessary since TensorFlow does not support passing models between threads)
self.model_keras_path = new_modelname.split(".model")[0]+"_0.model"
#save a first version of the .model
model_keras.save(self.model_keras_path)
#Delete the variable to save RAM
model_keras = None #Since this uses TensorFlow, I have to reload the model action_fit_model_worker anyway
if collection==True:
#define a variable on self which allows the fit_model_worker to load this model and fit
#(sorry, this is necessary since TensorFlow does not support passing models between threads)
self.model_keras_path = [new_modelname.split(".model")[0]+"_"+model_keras[0][i]+".model" for i in range(len(model_keras[0]))]
for i in range(len(self.model_keras_path)):
#save a first version of the .model
model_keras[1][i].save(self.model_keras_path[i])
#Delete the variable to save RAM
model_keras = None #Since this uses TensorFlow, I have to reload the model action_fit_model_worker anyway
#Check that Data is on RAM
DATA_len = len(self.ram) #this returns the len of a dictionary. The dictionary is supposed to contain the training/validation data; otherwise the data is read from .rtdc data directly (SLOW unless you have ultra-good SSD)
def popup_data_to_ram(button):
yes_or_no = button.text()
if yes_or_no == "&Yes":
print("Moving data to ram")
self.actionDataToRamNow_function()
elif yes_or_no == "&No":
pass
if DATA_len==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Would you like transfer the Data to RAM now?\n(Currently the data is not in RAM and would be read from .rtdc, which slows down fitting dramatically unless you have a super-fast SSD.)")
msg.setWindowTitle("Data to RAM now?")
msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
msg.buttonClicked.connect(popup_data_to_ram)
msg.exec_()
###################Popup Window####################################
self.fittingpopups.append(MyPopup())
ui = aid_frontend.Fitting_Ui()
ui.setupUi(self.fittingpopups[-1]) #append the ui to the last element on the list
self.fittingpopups_ui.append(ui)
# Increase the popupcounter by one; this will help to coordinate the data flow between main ui and popup
self.popupcounter += 1
listindex=self.popupcounter-1
##############################Define functions#########################
self.fittingpopups_ui[listindex].pushButton_UpdatePlot_pop.clicked.connect(lambda: self.update_historyplot_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_Stop_pop.clicked.connect(lambda: self.stop_fitting_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_Pause_pop.clicked.connect(lambda: self.pause_fitting_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_saveTextWindow_pop.clicked.connect(lambda: self.saveTextWindow_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_clearTextWindow_pop.clicked.connect(lambda: self.clearTextWindow_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_showModelSumm_pop.clicked.connect(lambda: self.showModelSumm_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_saveModelSumm_pop.clicked.connect(lambda: self.saveModelSumm_pop(listindex))
#Expert mode functions
#self.fittingpopups_ui[listindex].checkBox_pTr_pop.toggled.connect(lambda on_or_off: self.partialtrainability_activated_pop(on_or_off,listindex))
self.fittingpopups_ui[listindex].pushButton_lossW.clicked.connect(lambda: self.lossWeights_popup(listindex))
self.fittingpopups_ui[listindex].checkBox_lossW.clicked.connect(lambda on_or_off: self.lossWeights_activated(on_or_off,listindex))
self.fittingpopups_ui[listindex].Form.setWindowTitle(os.path.split(new_modelname)[1])
self.fittingpopups_ui[listindex].progressBar_Fitting_pop.setValue(0) #set the progress bar to zero
self.fittingpopups_ui[listindex].pushButton_ShowExamleImgs_pop.clicked.connect(lambda: self.action_show_example_imgs_pop(listindex))
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.doubleClicked.connect(lambda item: self.tableWidget_HistoryInfo_pop_dclick(item,listindex))
#Cyclical learning rate extra settings
self.fittingpopups_ui[listindex].pushButton_cycLrPopup.clicked.connect(lambda: self.popup_clr_settings(listindex))
self.fittingpopups_ui[listindex].comboBox_optimizer.currentTextChanged.connect(lambda: self.expert_optimizer_changed(optimizer_text=self.fittingpopups_ui[listindex].comboBox_optimizer.currentText(),listindex=listindex))
self.fittingpopups_ui[listindex].pushButton_LR_plot.clicked.connect(lambda: self.popup_lr_plot(listindex))
self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.valueChanged.connect(lambda: self.expert_lr_changed(value=self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.value(),optimizer_text=self.fittingpopups_ui[listindex].comboBox_optimizer.currentText(),listindex=listindex))
self.fittingpopups_ui[listindex].doubleSpinBox_expDecInitLr.valueChanged.connect(lambda: self.expert_lr_changed(value=self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.value(),optimizer_text=self.fittingpopups_ui[listindex].comboBox_optimizer.currentText(),listindex=listindex))
self.fittingpopups_ui[listindex].pushButton_optimizer_pop.clicked.connect(lambda: self.optimizer_change_settings_popup(listindex))
worker = Worker(self.action_fit_model_worker)
#Get a signal from the worker to update the progressbar
worker.signals.progress.connect(self.fittingpopups_ui[listindex].progressBar_Fitting_pop.setValue)
#Define a func which prints information during fitting to textbrowser
#And furthermore provide option to do real-time plotting
def real_time_info(dic):
self.fittingpopups_ui[listindex].Histories.append(dic) #append to a list. Will be used for plotting in the "Update plot" function
OtherMetrics_keys = self.fittingpopups_ui[listindex].RealTime_OtherMetrics.keys()
#Append to lists for real-time plotting
self.fittingpopups_ui[listindex].RealTime_Acc.append(dic["acc"][0])
self.fittingpopups_ui[listindex].RealTime_ValAcc.append(dic["val_acc"][0])
self.fittingpopups_ui[listindex].RealTime_Loss.append(dic["loss"][0])
self.fittingpopups_ui[listindex].RealTime_ValLoss.append(dic["val_loss"][0])
keys = list(dic.keys())
#sort keys alphabetically
keys_ = [l.lower() for l in keys]
ind_sort = np.argsort(keys_)
keys = list(np.array(keys)[ind_sort])
#First keys should always be acc,loss,val_acc,val_loss -in this order
keys_first = ["acc","loss","val_acc","val_loss"]
for i in range(len(keys_first)):
if keys_first[i] in keys:
ind = np.where(np.array(keys)==keys_first[i])[0][0]
if ind!=i:
del keys[ind]
keys.insert(i,keys_first[i])
for key in keys:
if "precision" in key or "f1" in key or "recall" in key or "LearningRate" in key:
if not key in OtherMetrics_keys: #if this key is missing in self.fittingpopups_ui[listindex].RealTime_OtherMetrics attach it!
self.fittingpopups_ui[listindex].RealTime_OtherMetrics[key] = []
self.fittingpopups_ui[listindex].RealTime_OtherMetrics[key].append(dic[key])
dic_text = [("{} {}".format(item, np.round(amount[0],4))) for item, amount in dic.items()]
text = "Epoch "+str(self.fittingpopups_ui[listindex].epoch_counter)+"\n"+" ".join(dic_text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
self.fittingpopups_ui[listindex].epoch_counter+=1
if self.fittingpopups_ui[listindex].epoch_counter==1:
#for each key, put a checkbox on the tableWidget_HistoryInfo_pop
rowPosition = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.rowCount()
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.insertRow(rowPosition)
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.setColumnCount(len(keys))
for columnPosition in range(len(keys)):#(2,4):
key = keys[columnPosition]
#for each item, also create 2 checkboxes (train/valid)
item = QtWidgets.QTableWidgetItem(str(key))#("item {0} {1}".format(rowNumber, columnNumber))
item.setBackground(QtGui.QColor(self.colorsQt[columnPosition]))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.setItem(rowPosition, columnPosition, item)
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.resizeColumnsToContents()
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.resizeRowsToContents()
########################Real-time plotting#########################
if self.fittingpopups_ui[listindex].checkBox_realTimePlotting_pop.isChecked():
#get the range for the real time fitting
if hasattr(self.fittingpopups_ui[listindex], 'historyscatters'):#if update plot was hit before
x = range(len(self.fittingpopups_ui[listindex].Histories))
realTimeEpochs = self.fittingpopups_ui[listindex].spinBox_realTimeEpochs.value()
if len(x)>realTimeEpochs:
x = x[-realTimeEpochs:]
#is any metric checked on the table?
colcount = int(self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.columnCount())
#Collect items that are checked
selected_items,Colors = [],[]
for colposition in range(colcount):
#is it checked?
cb = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.item(0, colposition)
if not cb==None:
if cb.checkState() == QtCore.Qt.Checked:
selected_items.append(str(cb.text()))
Colors.append(cb.background())
for i in range(len(self.fittingpopups_ui[listindex].historyscatters)): #iterate over all available plots
key = list(self.fittingpopups_ui[listindex].historyscatters.keys())[i]
if key in selected_items:
if key=="acc":
y = np.array(self.fittingpopups_ui[listindex].RealTime_Acc).astype(float)
elif key=="val_acc":
y = np.array(self.fittingpopups_ui[listindex].RealTime_ValAcc).astype(float)
elif key=="loss":
y = np.array(self.fittingpopups_ui[listindex].RealTime_Loss).astype(float)
elif key=="val_loss":
y = np.array(self.fittingpopups_ui[listindex].RealTime_ValLoss).astype(float)
elif "precision" in key or "f1" in key or "recall" in key or "LearningRate" in key:
y = np.array(self.fittingpopups_ui[listindex].RealTime_OtherMetrics[key]).astype(float).reshape(-1,)
else:
return
#Only show the last 250 epochs
if y.shape[0]>realTimeEpochs:
y = y[-realTimeEpochs:]
if y.shape[0]==len(x):
self.fittingpopups_ui[listindex].historyscatters[key].setData(x, y)#,pen=None,symbol='o',symbolPen=None,symbolBrush=brush,clear=False)
else:
print("x and y are not the same size! Omitted plotting. I will try again to plot after the next epoch.")
pg.QtGui.QApplication.processEvents()
self.fittingpopups_ui[listindex].epoch_counter = 0
#self.fittingpopups_ui[listindex].backup = [] #backup of the meta information -> in case the original folder is not accessible anymore
worker.signals.history.connect(real_time_info)
#Finally start the worker!
self.threadpool.start(worker)
self.fittingpopups[listindex].show()
def action_lr_finder(self):
#lr_find
model_keras = self.model_keras
if type(model_keras)==tuple:
collection=True
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("LR screening is not supported for Collections of models. Please select single model")
msg.setWindowTitle("LR screening not supported for Collections!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
else:
collection=False
#Check if there was a model initialized:
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a path/filename for the model to be fitted!")
msg.setWindowTitle("Model path/ filename missing!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if model_keras==None:#in case the model got deleted in another task
self.action_initialize_model(duties="initialize_train")
print("Had to re-run action_initialize_model!")
model_keras = self.model_keras
self.model_keras = None#delete this copy
if model_keras==None:
return
if not model_keras==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Model is now initialized for you, Please check Model summary window below if everything is correct and then press Fit again!")
msg.setWindowTitle("No initilized model found!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
nr_classes = int(model_keras.output.shape.dims[1])
if nr_classes<2:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define at least two classes")
msg.setWindowTitle("Not enough classes")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#define a variable on self which allows the fit_model_worker to load this model and fit
#(sorry, this is necessary since TensorFlow does not support passing models between threads)
self.model_keras_path = new_modelname.split(".model")[0]+"_0.model"
#save a first version of the .model
model_keras.save(self.model_keras_path)
#Delete the variable to save RAM
model_keras = None #Since this uses TensorFlow, I have to reload the model action_fit_model_worker anyway
#Check that Data is on RAM
DATA_len = len(self.ram) #this returns the len of a dictionary. The dictionary is supposed to contain the training/validation data; otherwise the data is read from .rtdc data directly (SLOW unless you have ultra-good SSD)
def popup_data_to_ram(button):
yes_or_no = button.text()
if yes_or_no == "&Yes":
print("Moving data to ram")
self.actionDataToRamNow_function()
elif yes_or_no == "&No":
pass
if DATA_len==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Would you like transfer the Data to RAM now?\n(Currently the data is not in RAM and would be read from .rtdc, which slows down fitting dramatically unless you have a super-fast SSD.)")
msg.setWindowTitle("Data to RAM now?")
msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
msg.buttonClicked.connect(popup_data_to_ram)
msg.exec_()
worker = Worker(self.action_lr_finder_worker)
#Get a signal from the worker to update the progressbar
worker.signals.progress.connect(print)
worker.signals.history.connect(print)
#Finally start the worker!
self.threadpool.start(worker)
def action_lr_finder_worker(self,progress_callback,history_callback):
if self.radioButton_cpu.isChecked():
gpu_used = False
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
gpu_used = True
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
#Retrieve more Multi-GPU Options from Menubar:
cpu_merge = bool(self.actioncpu_merge.isEnabled())
cpu_relocation = bool(self.actioncpu_relocation.isEnabled())
cpu_weight_merge = bool(self.actioncpu_weightmerge.isEnabled())
#Create config (define which device to use)
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
#get an index of the fitting popup
#listindex = self.popupcounter-1
#Get user-specified filename for the new model
model_keras_path = self.model_keras_path
if type(model_keras_path)==list:
collection = True
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("LR screening is currently not supported for Collections of models. Please use single model")
msg.setWindowTitle("LR screening not supported for Collections")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
else:
collection = False
if deviceSelected=="Multi-GPU" and cpu_weight_merge==True:
with tf.device("/cpu:0"):
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
else:
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
#Initialize a variable for the parallel model
model_keras_p = None
#Multi-GPU
if deviceSelected=="Multi-GPU":
if collection==False:
print("Adjusting the model for Multi-GPU")
model_keras_p = multi_gpu_model(model_keras, gpus=gpu_nr, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation)#indicate the numbers of gpus that you have
if self.radioButton_LoadContinueModel.isChecked():#calling multi_gpu_model resets the weights. Hence, they need to be put in place again
model_keras_p.layers[-2].set_weights(model_keras.get_weights())
elif collection==True:
print("Collection & Multi-GPU is not supported yet")
return
##############Main function after hitting FIT MODEL####################
if self.radioButton_LoadRestartModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_LoadContinueModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_NewModel.isChecked():
load_modelname = "" #No model is loaded
if collection==False:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras.output.shape.dims[1])
if collection==True:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras[0].output.shape.dims[1])
#Metrics to be displayed during fitting (real-time)
model_metrics = self.get_metrics(nr_classes)
#Compile model
if deviceSelected=="Single-GPU":
model_keras.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif deviceSelected=="Multi-GPU":
model_keras_p.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
#Collect all information about the fitting routine that was user
#defined
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
trainable_original, layer_names = self.trainable_original, self.layer_names
crop = int(self.spinBox_imagecrop.value())
norm = str(self.comboBox_Normalization.currentText())
nr_epochs = int(self.spinBox_NrEpochs.value())
h_flip = bool(self.checkBox_HorizFlip.isChecked())
v_flip = bool(self.checkBox_VertFlip.isChecked())
rotation = float(self.lineEdit_Rotation.text())
width_shift = float(self.lineEdit_widthShift.text())
height_shift = float(self.lineEdit_heightShift.text())
zoom = float(self.lineEdit_zoomRange.text())
shear = float(self.lineEdit_shearRange.text())
brightness_add_lower = float(self.spinBox_PlusLower.value())
brightness_add_upper = float(self.spinBox_PlusUpper.value())
brightness_mult_lower = float(self.doubleSpinBox_MultLower.value())
brightness_mult_upper = float(self.doubleSpinBox_MultUpper.value())
gaussnoise_mean = float(self.doubleSpinBox_GaussianNoiseMean.value())
gaussnoise_scale = float(self.doubleSpinBox_GaussianNoiseScale.value())
contrast_on = bool(self.checkBox_contrast.isChecked())
contrast_lower = float(self.doubleSpinBox_contrastLower.value())
contrast_higher = float(self.doubleSpinBox_contrastHigher.value())
saturation_on = bool(self.checkBox_saturation.isChecked())
saturation_lower = float(self.doubleSpinBox_saturationLower.value())
saturation_higher = float(self.doubleSpinBox_saturationHigher.value())
hue_on = bool(self.checkBox_hue.isChecked())
hue_delta = float(self.doubleSpinBox_hueDelta.value())
avgBlur_on = bool(self.checkBox_avgBlur.isChecked())
avgBlur_min = int(self.spinBox_avgBlurMin.value())
avgBlur_max = int(self.spinBox_avgBlurMax.value())
gaussBlur_on = bool(self.checkBox_gaussBlur.isChecked())
gaussBlur_min = int(self.spinBox_gaussBlurMin.value())
gaussBlur_max = int(self.spinBox_gaussBlurMax.value())
motionBlur_on = bool(self.checkBox_motionBlur.isChecked())
motionBlur_kernel = str(self.lineEdit_motionBlurKernel.text())
motionBlur_angle = str(self.lineEdit_motionBlurAngle.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
if collection==False:
expert_mode = bool(self.groupBox_expertMode.isChecked())
elif collection==True:
expert_mode = self.groupBox_expertMode.setChecked(False)
print("Expert mode was switched off. Not implemented yet for collections")
expert_mode = False
learning_rate_const = float(self.doubleSpinBox_learningRate.value())
loss_expert = str(self.comboBox_expt_loss.currentText()).lower()
optimizer_expert = str(self.comboBox_optimizer.currentText()).lower()
optimizer_settings = self.optimizer_settings.copy()
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
train_last_layers = bool(self.checkBox_trainLastNOnly.isChecked())
train_last_layers_n = int(self.spinBox_trainLastNOnly.value())
train_dense_layers = bool(self.checkBox_trainDenseOnly.isChecked())
dropout_expert_on = bool(self.checkBox_dropout.isChecked())
try:
dropout_expert = str(self.lineEdit_dropout.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
lossW_expert = str(self.lineEdit_lossW.text())
#To get the class weights (loss), the SelectedFiles are required
SelectedFiles = self.items_clicked_no_rtdc_ds()
#Check if xtra_data should be used for training
xtra_in = [s["xtra_in"] for s in SelectedFiles]
if len(set(xtra_in))==1:
xtra_in = list(set(xtra_in))[0]
elif len(set(xtra_in))>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
#Get the class weights. This function runs now the first time in the fitting routine.
#It is possible that the user chose Custom weights and then changed the classes. Hence first check if
#there is a weight for each class available.
class_weight = self.get_class_weight(SelectedFiles,lossW_expert,custom_check_classes=True)
if type(class_weight)==list:
#There has been a mismatch between the classes described in class_weight and the classes available in SelectedFiles!
lossW_expert = class_weight[0] #overwrite
class_weight = class_weight[1]
print(class_weight)
print("There has been a mismatch between the classes described in \
Loss weights and the classes available in the selected files! \
Hence, the Loss weights are set to Balanced")
###############################Expert Mode values##################
if expert_mode==True:
#Some settings only need to be changed once, after user clicked apply at next epoch
#Apply the changes to trainable states:
if train_last_layers==True:#Train only the last n layers
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_last_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only the last "+str(train_last_layers_n)+ " layer(s)\n"
#text2 = "\n--------------------\n"
print(text1+summary)
if train_dense_layers==True:#Train only dense layers
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_dense_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only dense layer(s)\n"
#text2 = "\n--------------------\n"
print(text1+summary)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a single float
dropout_expert_list = len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
text = "Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers"
print(text)
else:
text = "Could not understand user input at Expert->Dropout"
print(text)
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change dropout. I'm not sure if this works already!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
print(text_do)
text_updates = ""
#Compare current lr and the lr on expert tab:
if collection == False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6:
if collection == False:
K.set_value(model_keras.optimizer.lr, learning_rate_const)
if collection == True:
for m in model_keras:
K.set_value(m.optimizer.lr, learning_rate_const)
text_updates += "Changed the learning rate to "+ str(learning_rate_const)+"\n"
recompile = False
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
if collection==True:
optimizer_current = aid_dl.get_optimizer_name(model_keras[0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Changed the optimizer to "+optimizer_expert+"\n"
#Compare current loss function and the loss-function on expert tab:
if collection==False:
if model_keras.loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if collection==True:
if model_keras[0].loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if recompile==True:
print("Recompiling...")
if collection==False:
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if collection==True:
for m in model_keras[1]:
aid_dl.model_compile(m, loss_expert, optimizer_settings, learning_rate_const,model_metrics, nr_classes)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to adjust learning rate, loss, optimizer")
print(text_updates)
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
nr_events_epoch_train = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_train]
rtdc_path_train = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_train]
zoom_factors_train = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_train]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_train = [selectedfile["shuffle"] for selectedfile in SelectedFiles_train]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_train])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
if verbose==1:
print("Length of DATA (in RAM) = "+str(len(self.ram)))
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles_train)):
if len(self.ram)==0: #Here, the entire training set needs to be used! Not only random images!
#Replace=true: means individual cells could occur several times
gen_train = aid_img.gen_crop_img(crop,rtdc_path_train[i],random_images=False,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen_train = aid_img.gen_crop_img_ram(self.ram,rtdc_path_train[i],random_images=False) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen_train)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "<html><head/><body><p>The standard deviation of your training data is zero! This would lead to division by zero. To avoid this, I will divide by 0.0001 instead.</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Std. is zero")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
######################Load the Validation Data################################
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
nr_events_epoch_valid = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_valid]
rtdc_path_valid = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_valid]
zoom_factors_valid = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_valid]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_valid = [selectedfile["shuffle"] for selectedfile in SelectedFiles_valid]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_valid])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
############Cropping#####################
percDataV = float(self.popup_lrfinder_ui.doubleSpinBox_percDataV.value())
percDataV = percDataV/100.0
X_valid,y_valid,Indices,xtra_valid = [],[],[],[]
for i in range(len(SelectedFiles_valid)):
if len(self.ram)==0:#if there is no data available on ram
#replace=true means individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],int(np.rint(percDataV*nr_events_epoch_valid[i])),random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:#get a similar generator, using the ram-data
gen_valid = aid_img.gen_crop_img_ram(self.ram,rtdc_path_valid[i],int(np.rint(percDataV*nr_events_epoch_valid[i])),random_images=shuffle_valid[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
generator_cropped_out = next(gen_valid)
X_valid.append(generator_cropped_out[0])
#y_valid.append(np.repeat(indices_valid[i],nr_events_epoch_valid[i]))
y_valid.append(np.repeat(indices_valid[i],X_valid[-1].shape[0]))
Indices.append(generator_cropped_out[1])
xtra_valid.append(generator_cropped_out[2])
del generator_cropped_out
X_valid = np.concatenate(X_valid)
y_valid = np.concatenate(y_valid)
Y_valid = np_utils.to_categorical(y_valid, nr_classes)# * 2 - 1
xtra_valid = np.concatenate(xtra_valid)
if len(X_valid.shape)==4:
channels=3
elif len(X_valid.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_valid.shape))
if channels==1:
#Add the "channels" dimension
X_valid = np.expand_dims(X_valid,3)
if norm == "StdScaling using mean and std of all training data":
X_valid = aid_img.image_normalization(X_valid,norm,mean_trainingdata,std_trainingdata)
else:
X_valid = aid_img.image_normalization(X_valid,norm)
#Validation data can be cropped to final size already since no augmentation
#will happen on this data set
dim_val = X_valid.shape
print("Current dim. of validation set (pixels x pixels) = "+str(dim_val[2]))
if dim_val[2]!=crop:
print("Change dim. (pixels x pixels) of validation set to = "+str(crop))
remove = int(dim_val[2]/2.0 - crop/2.0)
X_valid = X_valid[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
if xtra_in==True:
print("Add Xtra Data to X_valid")
X_valid = [X_valid,xtra_valid]
###################Load training data####################
#####################and perform#########################
##################Image augmentation#####################
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
#Should only a certain percentage of the numbers given in the table be sampled?
percDataT = float(self.popup_lrfinder_ui.doubleSpinBox_percDataT.value())
percDataT = percDataT/100.0
X_train,y_train,xtra_train = [],[],[]
t3 = time.time()
for i in range(len(SelectedFiles_train)):
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen_train = aid_img.gen_crop_img(cropsize2,rtdc_path_train[i],int(np.rint(percDataT*nr_events_epoch_train[i])),random_images=shuffle_train[i],replace=True,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:
gen_train = aid_img.gen_crop_img_ram(self.ram,rtdc_path_train[i],int(np.rint(percDataT*nr_events_epoch_train[i])),random_images=shuffle_train[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
data_ = next(gen_train)
X_train.append(data_[0])
y_train.append(np.repeat(indices_train[i],X_train[-1].shape[0]))
if xtra_in==True:
xtra_train.append(data_[2])
del data_
X_train = np.concatenate(X_train)
X_train = X_train.astype(np.uint8)
y_train = np.concatenate(y_train)
if xtra_in==True:
print("Retrieve Xtra Data...")
xtra_train = np.concatenate(xtra_train)
t4 = time.time()
if verbose == 1:
print("Time to load data (from .rtdc or RAM) and crop="+str(t4-t3))
if len(X_train.shape)==4:
channels=3
elif len(X_train.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_train.shape))
if channels==1:
#Add the "channels" dimension
X_train = np.expand_dims(X_train,3)
t3 = time.time()
#Affine augmentation
X_train = aid_img.affine_augm(X_train,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear) #Affine image augmentation
y_train = np.copy(y_train)
Y_train = np_utils.to_categorical(y_train, nr_classes)# * 2 - 1
t4 = time.time()
if verbose == 1:
print("Time to perform affine augmentation ="+str(t4-t3))
t3 = time.time()
#Now do the final cropping to the actual size that was set by user
dim = X_train.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
X_train = X_train[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
t4 = time.time()
#X_train = np.copy(X_train) #save into new array and do some iterations with varying noise/brightness
#reuse this X_batch_orig a few times since this augmentation was costly
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
#In each iteration, start with non-augmented data
#X_batch = np.copy(X_batch_orig)#copy from X_batch_orig, X_batch will be altered without altering X_batch_orig
#X_train = X_train.astype(np.uint8)
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
X_train = X_train.astype(np.uint8)
if contrast_on:
t_con_aug_1 = time.time()
X_train = aid_img.contrast_augm_cv2(X_train,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
t_con_aug_2 = time.time()
if verbose == 1:
print("Time to augment contrast="+str(t_con_aug_2-t_con_aug_1))
if saturation_on or hue_on:
t_sat_aug_1 = time.time()
X_train = aid_img.satur_hue_augm_cv2(X_train.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta) #Gray and RGB; both values >0!
t_sat_aug_2 = time.time()
if verbose == 1:
print("Time to augment saturation/hue="+str(t_sat_aug_2-t_sat_aug_1))
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
t_avgBlur_1 = time.time()
X_train = aid_img.avg_blur_cv2(X_train,avgBlur_min,avgBlur_max)
t_avgBlur_2 = time.time()
if verbose == 1:
print("Time to perform average blurring="+str(t_avgBlur_2-t_avgBlur_1))
if gaussBlur_on:
t_gaussBlur_1 = time.time()
X_train = aid_img.gauss_blur_cv(X_train,gaussBlur_min,gaussBlur_max)
t_gaussBlur_2 = time.time()
if verbose == 1:
print("Time to perform gaussian blurring="+str(t_gaussBlur_2-t_gaussBlur_1))
if motionBlur_on:
t_motionBlur_1 = time.time()
X_train = aid_img.motion_blur_cv(X_train,motionBlur_kernel,motionBlur_angle)
t_motionBlur_2 = time.time()
if verbose == 1:
print("Time to perform motion blurring="+str(t_motionBlur_2-t_motionBlur_1))
##########Brightness noise#########
t3 = time.time()
X_train = aid_img.brightn_noise_augm_cv2(X_train,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
t4 = time.time()
if verbose == 1:
print("Time to augment brightness="+str(t4-t3))
t3 = time.time()
if norm == "StdScaling using mean and std of all training data":
X_train = aid_img.image_normalization(X_train,norm,mean_trainingdata,std_trainingdata)
else:
X_train = aid_img.image_normalization(X_train,norm)
t4 = time.time()
if verbose == 1:
print("Time to apply normalization="+str(t4-t3))
if verbose == 1:
print("X_train.shape")
print(X_train.shape)
if xtra_in==True:
print("Add Xtra Data to X_train")
X_train = [X_train,xtra_train]
###################################################
###############Actual fitting######################
###################################################
batch_size = int(self.popup_lrfinder_ui.spinBox_batchSize.value())
stepsPerEpoch = int(self.popup_lrfinder_ui.spinBox_stepsPerEpoch.value())
epochs = int(self.popup_lrfinder_ui.spinBox_epochs.value())
start_lr = float(self.popup_lrfinder_ui.lineEdit_startLr.text())
stop_lr = float(self.popup_lrfinder_ui.lineEdit_stopLr.text())
valMetrics = bool(self.popup_lrfinder_ui.checkBox_valMetrics.isChecked())
####################lr_find algorithm####################
if model_keras_p == None:
lrf = aid_dl.LearningRateFinder(model_keras)
elif model_keras_p != None:
lrf = aid_dl.LearningRateFinder(model_keras_p)
if valMetrics==True:
lrf.find([X_train,Y_train],[X_valid,Y_valid],start_lr,stop_lr,stepsPerEpoch=stepsPerEpoch,batchSize=batch_size,epochs=epochs)
else:
lrf.find([X_train,Y_train],None,start_lr,stop_lr,stepsPerEpoch=stepsPerEpoch,batchSize=batch_size,epochs=epochs)
skipBegin,skipEnd = 10,1
self.learning_rates = lrf.lrs[skipBegin:-skipEnd]
self.losses_or = lrf.losses_or[skipBegin:-skipEnd]
self.losses_sm = lrf.losses_sm[skipBegin:-skipEnd]
self.accs_or = lrf.accs_or[skipBegin:-skipEnd]
self.accs_sm = lrf.accs_sm[skipBegin:-skipEnd]
self.val_losses_sm = lrf.val_losses_sm[skipBegin:-skipEnd]
self.val_losses_or = lrf.val_losses_or[skipBegin:-skipEnd]
self.val_accs_sm = lrf.val_accs_sm[skipBegin:-skipEnd]
self.val_accs_or = lrf.val_accs_or[skipBegin:-skipEnd]
# Enable the groupboxes
self.popup_lrfinder_ui.groupBox_singleLr.setEnabled(True)
self.popup_lrfinder_ui.groupBox_LrRange.setEnabled(True)
self.update_lrfind_plot()
def update_lrfind_plot(self):
if not hasattr(self, 'learning_rates'):
return
metric = str(self.popup_lrfinder_ui.comboBox_metric.currentText())
color = self.popup_lrfinder_ui.pushButton_color.palette().button().color()
width = int(self.popup_lrfinder_ui.spinBox_lineWidth.value())
color = list(color.getRgb())
color = tuple(color)
pencolor = pg.mkPen(color, width=width)
smooth = bool(self.popup_lrfinder_ui.checkBox_smooth.isChecked())
try:# try to empty the plot
self.popup_lrfinder_ui.lr_plot.clear()
#self.popup_lrfinder_ui.lr_plot.removeItem(self.lr_line)
except:
pass
if metric=="Loss" and smooth==True:
self.y_values = self.losses_sm
elif metric=="Loss" and smooth==False:
self.y_values = self.losses_or
elif metric=="Loss 1st derivative" and smooth==True:
self.y_values = np.diff(self.losses_sm,n=1)
elif metric=="Loss 1st derivative" and smooth==False:
self.y_values = np.diff(self.losses_or,n=1)
elif metric=="Accuracy" and smooth==True:
self.y_values = self.accs_sm
elif metric=="Accuracy" and smooth==False:
self.y_values = self.accs_or
elif metric=="Accuracy 1st derivative" and smooth==True:
self.y_values = np.diff(self.accs_sm,n=1)
elif metric=="Accuracy 1st derivative" and smooth==False:
self.y_values = np.diff(self.accs_or,n=1)
elif metric=="Val. loss" and smooth==True:
self.y_values = self.val_losses_sm
elif metric=="Val. loss" and smooth==False:
self.y_values = self.val_losses_or
elif metric=="Val. loss 1st derivative" and smooth==True:
self.y_values = np.diff(self.val_losses_sm,n=1)
elif metric=="Val. loss 1st derivative" and smooth==False:
self.y_values = np.diff(self.val_losses_or,n=1)
elif metric=="Val. accuracy" and smooth==True:
self.y_values = self.val_accs_sm
elif metric=="Val. accuracy" and smooth==False:
self.y_values = self.val_accs_or
elif metric=="Val. accuracy 1st derivative" and smooth==True:
self.y_values = np.diff(self.val_accs_sm,n=1)
elif metric=="Val. accuracy 1st derivative" and smooth==False:
self.y_values = np.diff(self.val_accs_or,n=1)
else:
print("The combination of "+str(metric)+" and smooth="+str(smooth)+" is not supported!")
if len(self.learning_rates)==len(self.y_values):
self.lr_line = pg.PlotCurveItem(x=np.log10(self.learning_rates), y=self.y_values,pen=pencolor,name=metric)
elif len(self.learning_rates)-1==len(self.y_values):
self.lr_line = pg.PlotCurveItem(x=np.log10(self.learning_rates)[1:], y=self.y_values,pen=pencolor,name=metric)
else:
print("No data available. Probably, validation metrics were not computed. Please click Run again.")
return
self.popup_lrfinder_ui.lr_plot.addItem(self.lr_line)
#In case the groupBox_singleLr is already checked, carry out the function:
if self.popup_lrfinder_ui.groupBox_singleLr.isChecked():
self.get_lr_single(on_or_off=True)
#In case the groupBox_LrRange is already checked, carry out the function:
if self.popup_lrfinder_ui.groupBox_LrRange.isChecked():
self.get_lr_range(on_or_off=True)
def get_lr_single(self,on_or_off):
if on_or_off==True: #bool(self.popup_lrfinder_ui.groupBox_LrRange.isChecked()):
ind = np.argmin(self.y_values)#find location of loss-minimum
mini_x = self.learning_rates[ind]
mini_x = np.log10(mini_x)
pen = pg.mkPen(color="w")
self.lr_single = pg.InfiniteLine(pos=mini_x, angle=90, pen=pen, movable=True)
self.popup_lrfinder_ui.lr_plot.addItem(self.lr_single)
def position_changed():
#where did the user drag the region_linfit to?
new_position = 10**(self.lr_single.value())
self.popup_lrfinder_ui.lineEdit_singleLr.setText(str(new_position))
self.lr_single.sigPositionChangeFinished.connect(position_changed)
if on_or_off==False: #user unchecked the groupbox->remove the InfiniteLine if possible
try:
self.popup_lrfinder_ui.lr_plot.removeItem(self.lr_single)
except:
pass
def get_lr_range(self,on_or_off):
#print(on_or_off)
#start_lr = float(self.popup_lrfinder_ui.lineEdit_startLr.text())
#stop_lr = float(self.popup_lrfinder_ui.lineEdit_stopLr.text())
if on_or_off==True: #bool(self.popup_lrfinder_ui.groupBox_LrRange.isChecked()):
start_x = 0.00001
start_x = np.log10(start_x)
ind = np.argmin(self.y_values)#find location of loss-minimum
end_x = self.learning_rates[ind]
end_x = np.log10(end_x)
self.lr_region = pg.LinearRegionItem([start_x, end_x], movable=True)
self.popup_lrfinder_ui.lr_plot.addItem(self.lr_region)
def region_changed():
#where did the user drag the region_linfit to?
new_region = self.lr_region.getRegion()
new_region_left = 10**(new_region[0])
new_region_right = 10**(new_region[1])
self.popup_lrfinder_ui.lineEdit_LrMin.setText(str(new_region_left))
self.popup_lrfinder_ui.lineEdit_LrMax.setText(str(new_region_right))
self.lr_region.sigRegionChangeFinished.connect(region_changed)
if on_or_off==False: #bool(self.popup_lrfinder_ui.groupBox_LrRange.isChecked()):
try:
self.popup_lrfinder_ui.lr_plot.removeItem(self.lr_region)
except:
pass
def action_show_example_imgs(self): #this function is only for the main window
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
#Get state of the comboboxes!
tr_or_valid = str(self.comboBox_ShowTrainOrValid.currentText())
w_or_wo_augm = str(self.comboBox_ShowWOrWoAug.currentText())
#most of it should be similar to action_fit_model_worker
#Used files go to a separate sheet on the MetaFile.xlsx
SelectedFiles = self.items_clicked_no_rtdc_ds()
#Collect all information about the fitting routine that was user defined
crop = int(self.spinBox_imagecrop.value())
norm = str(self.comboBox_Normalization.currentText())
h_flip = bool(self.checkBox_HorizFlip.isChecked())
v_flip = bool(self.checkBox_VertFlip.isChecked())
rotation = float(self.lineEdit_Rotation.text())
width_shift = float(self.lineEdit_widthShift.text())
height_shift = float(self.lineEdit_heightShift.text())
zoom = float(self.lineEdit_zoomRange.text())
shear = float(self.lineEdit_shearRange.text())
brightness_add_lower = float(self.spinBox_PlusLower.value())
brightness_add_upper = float(self.spinBox_PlusUpper.value())
brightness_mult_lower = float(self.doubleSpinBox_MultLower.value())
brightness_mult_upper = float(self.doubleSpinBox_MultUpper.value())
gaussnoise_mean = float(self.doubleSpinBox_GaussianNoiseMean.value())
gaussnoise_scale = float(self.doubleSpinBox_GaussianNoiseScale.value())
contrast_on = bool(self.checkBox_contrast.isChecked())
contrast_lower = float(self.doubleSpinBox_contrastLower.value())
contrast_higher = float(self.doubleSpinBox_contrastHigher.value())
saturation_on = bool(self.checkBox_saturation.isChecked())
saturation_lower = float(self.doubleSpinBox_saturationLower.value())
saturation_higher = float(self.doubleSpinBox_saturationHigher.value())
hue_on = bool(self.checkBox_hue.isChecked())
hue_delta = float(self.doubleSpinBox_hueDelta.value())
avgBlur_on = bool(self.checkBox_avgBlur.isChecked())
avgBlur_min = int(self.spinBox_avgBlurMin.value())
avgBlur_max = int(self.spinBox_avgBlurMax.value())
gaussBlur_on = bool(self.checkBox_gaussBlur.isChecked())
gaussBlur_min = int(self.spinBox_gaussBlurMin.value())
gaussBlur_max = int(self.spinBox_gaussBlurMax.value())
motionBlur_on = bool(self.checkBox_motionBlur.isChecked())
motionBlur_kernel = str(self.lineEdit_motionBlurKernel.text())
motionBlur_angle = str(self.lineEdit_motionBlurAngle.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
#which index is requested by user:?
req_index = int(self.spinBox_ShowIndex.value())
if tr_or_valid=='Training':
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
elif tr_or_valid=='Validation':
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles = np.array(SelectedFiles)[ind]
SelectedFiles = list(SelectedFiles)
indices = [selectedfile["class"] for selectedfile in SelectedFiles]
ind = np.where(np.array(indices)==req_index)[0]
if len(ind)<1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no data for this class available")
msg.setWindowTitle("Class not available")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
indices = list(np.array(indices)[ind])
SelectedFiles = list(np.array(SelectedFiles)[ind])
nr_events_epoch = len(indices)*[10] #[selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles]
rtdc_path = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles]
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle = [selectedfile["shuffle"] for selectedfile in SelectedFiles]
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],random_images=False) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
print("std_trainingdata was zero and is now set to 0.0001 to avoid div. by zero!")
if self.actionVerbose.isChecked():
print("Used all training data to get mean and std for normalization")
if w_or_wo_augm=='With Augmentation':
###############Continue with training data:augmentation############
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
############Cropping and image augmentation#####################
#Start the first iteration:
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=True,replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
try: #When all cells are at the border of the image, the generator will be empty. Avoid program crash by try, except
X.append(next(gen)[0])
except StopIteration:
print("All events at border of image and discarded")
return
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
X = X.astype(np.uint8) #make sure we stay in uint8
y = np.concatenate(y)
if len(X.shape)==4:
channels=3
elif len(X.shape)==3:
channels=1
X = np.expand_dims(X,3)#Add the "channels" dimension
else:
print("Invalid data dimension:" +str(X.shape))
X_batch, y_batch = aid_img.affine_augm(X,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear), y #Affine image augmentation
X_batch = X_batch.astype(np.uint8) #make sure we stay in uint8
#Now do the final cropping to the actual size that was set by user
dim = X_batch.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
#X_batch = X_batch[:,:,remove:-remove,remove:-remove] #crop to crop x crop pixels #Theano
X_batch = X_batch[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
if contrast_on:
X_batch = aid_img.contrast_augm_cv2(X_batch,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
if saturation_on or hue_on:
X_batch = aid_img.satur_hue_augm_cv2(X_batch.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta)
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
X_batch = aid_img.avg_blur_cv2(X_batch,avgBlur_min,avgBlur_max)
if gaussBlur_on:
X_batch = aid_img.gauss_blur_cv(X_batch,gaussBlur_min,gaussBlur_max)
if motionBlur_on:
X_batch = aid_img.motion_blur_cv(X_batch,motionBlur_kernel,motionBlur_angle)
X_batch = aid_img.brightn_noise_augm_cv2(X_batch,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
if norm == "StdScaling using mean and std of all training data":
X_batch = aid_img.image_normalization(X_batch,norm,mean_trainingdata,std_trainingdata)
else:
X_batch = aid_img.image_normalization(X_batch,norm)
X = X_batch
if verbose: print("Shape of the shown images is:"+str(X.shape))
elif w_or_wo_augm=='Original image':
############Cropping#####################
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=True,replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
try:
X.append(next(gen)[0])
except:
return
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
y = np.concatenate(y)
if len(X.shape)==4:
channels=3
elif len(X.shape)==3:
channels=1
X = np.expand_dims(X,3) #Add the "channels" dimension
else:
print("Invalid data dimension: " +str(X.shape))
if norm == "StdScaling using mean and std of all training data":
X = aid_img.image_normalization(X,norm,mean_trainingdata,std_trainingdata)
else:
X = aid_img.image_normalization(X,norm)
if verbose: print("Shape of the shown images is: "+str(X.shape))
#Is there already anything shown on the widget?
children = self.widget_ViewImages.findChildren(QtWidgets.QGridLayout)
if len(children)>0: #if there is something, delete it!
for i in reversed(range(self.gridLayout_ViewImages.count())):
widgetToRemove = self.gridLayout_ViewImages.itemAt(i).widget()
widgetToRemove.setParent(None)
widgetToRemove.deleteLater()
else: #else, create a Gridlayout to put the images
self.gridLayout_ViewImages = QtWidgets.QGridLayout(self.widget_ViewImages)
for i in range(5):
if channels==1:
img = X[i,:,:,0] #TensorFlow
if channels==3:
img = X[i,:,:,:] #TensorFlow
#Stretch pixel value to full 8bit range (0-255); only for display
img = img-np.min(img)
fac = np.max(img)
img = (img/fac)*255.0
img = img.astype(np.uint8)
if channels==1:
height, width = img.shape
if channels==3:
height, width, _ = img.shape
# qi=QtGui.QImage(img_zoom.data, width, height,width, QtGui.QImage.Format_Indexed8)
# self.label_image_show = QtWidgets.QLabel(self.widget_ViewImages)
# self.label_image_show.setPixmap(QtGui.QPixmap.fromImage(qi))
# self.gridLayout_ViewImages.addWidget(self.label_image_show, 1,i)
# self.label_image_show.show()
#Use pygtgraph instead, in order to allow for exporting images
self.image_show = pg.ImageView(self.widget_ViewImages)
self.image_show.show()
if verbose: print("Shape of zoomed image: "+str(img.shape))
if channels==1:
self.image_show.setImage(img.T,autoRange=False)
if channels==3:
self.image_show.setImage(np.swapaxes(img,0,1),autoRange=False)
self.image_show.ui.histogram.hide()
self.image_show.ui.roiBtn.hide()
self.image_show.ui.menuBtn.hide()
self.gridLayout_ViewImages.addWidget(self.image_show, 1,i)
self.widget_ViewImages.show()
def tableWidget_HistoryInfo_pop_dclick(self,item,listindex):
if item is not None:
tableitem = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.item(item.row(), item.column())
if str(tableitem.text())!="Show saved only":
color = QtGui.QColorDialog.getColor()
if color.getRgb()==(0, 0, 0, 255):#no black!
return
else:
tableitem.setBackground(color)
#self.update_historyplot_pop(listindex)
def action_show_example_imgs_pop(self,listindex): #this function is only for the main window
#Get state of the comboboxes!
tr_or_valid = str(self.fittingpopups_ui[listindex].comboBox_ShowTrainOrValid_pop.currentText())
w_or_wo_augm = str(self.fittingpopups_ui[listindex].comboBox_ShowWOrWoAug_pop.currentText())
#most of it should be similar to action_fit_model_worker
#Used files go to a separate sheet on the MetaFile.xlsx
SelectedFiles = self.items_clicked_no_rtdc_ds()
#Collect all information about the fitting routine that was user defined
crop = int(self.fittingpopups_ui[listindex].spinBox_imagecrop_pop.value())
norm = str(self.fittingpopups_ui[listindex].comboBox_Normalization_pop.currentText())
h_flip = bool(self.fittingpopups_ui[listindex].checkBox_HorizFlip_pop.isChecked())
v_flip = bool(self.fittingpopups_ui[listindex].checkBox_VertFlip_pop.isChecked())
rotation = float(self.fittingpopups_ui[listindex].lineEdit_Rotation_pop.text())
width_shift = float(self.fittingpopups_ui[listindex].lineEdit_widthShift_pop.text())
height_shift = float(self.fittingpopups_ui[listindex].lineEdit_heightShift_pop.text())
zoom = float(self.fittingpopups_ui[listindex].lineEdit_zoomRange_pop.text())
shear = float(self.fittingpopups_ui[listindex].lineEdit_shearRange_pop.text())
brightness_add_lower = float(self.fittingpopups_ui[listindex].spinBox_PlusLower_pop.value())
brightness_add_upper = float(self.fittingpopups_ui[listindex].spinBox_PlusUpper_pop.value())
brightness_mult_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultLower_pop.value())
brightness_mult_upper = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultUpper_pop.value())
gaussnoise_mean = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseMean_pop.value())
gaussnoise_scale = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseScale_pop.value())
contrast_on = bool(self.fittingpopups_ui[listindex].checkBox_contrast_pop.isChecked())
contrast_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastLower_pop.value())
contrast_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastHigher_pop.value())
saturation_on = bool(self.fittingpopups_ui[listindex].checkBox_saturation_pop.isChecked())
saturation_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.value())
saturation_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.value())
hue_on = bool(self.fittingpopups_ui[listindex].checkBox_hue_pop.isChecked())
hue_delta = float(self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.value())
avgBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_avgBlur_pop.isChecked())
avgBlur_min = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.value())
avgBlur_max = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.value())
gaussBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_gaussBlur_pop.isChecked())
gaussBlur_min = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.value())
gaussBlur_max = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.value())
motionBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_motionBlur_pop.isChecked())
motionBlur_kernel = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.text())
motionBlur_angle = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
paddingMode = str(self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.currentText()).lower()
#which index is requested by user:?
req_index = int(self.fittingpopups_ui[listindex].spinBox_ShowIndex_pop.value())
if tr_or_valid=='Training':
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
elif tr_or_valid=='Validation':
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles = np.array(SelectedFiles)[ind]
SelectedFiles = list(SelectedFiles)
indices = [selectedfile["class"] for selectedfile in SelectedFiles]
ind = np.where(np.array(indices)==req_index)[0]
if len(ind)<1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no data for this class available")
msg.setWindowTitle("Class not available")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
indices = list(np.array(indices)[ind])
SelectedFiles = list(np.array(SelectedFiles)[ind])
nr_events_epoch = len(indices)*[10] #[selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles]
rtdc_path = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles]
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle = [selectedfile["shuffle"] for selectedfile in SelectedFiles]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
else:
if len(self.ram)==0:
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],random_images=False,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
print("std_trainingdata turned out to be zero. I set it to 0.0001, to avoid division by zero!")
if self.actionVerbose.isChecked():
print("Used all training data to get mean and std for normalization")
if w_or_wo_augm=='With Augmentation':
###############Continue with training data:augmentation############
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
############Get cropped images with image augmentation#####################
#Start the first iteration:
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=shuffle[i],replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
X.append(next(gen)[0])
#y.append(np.repeat(indices[i],nr_events_epoch[i]))
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
y = np.concatenate(y)
if len(X.shape)==4:
channels=3
elif len(X.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X.shape))
if channels==1:
#Add the "channels" dimension
X = np.expand_dims(X,3)
X_batch, y_batch = aid_img.affine_augm(X,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear), y #Affine image augmentation
X_batch = X_batch.astype(np.uint8) #make sure we stay in uint8
#Now do the final cropping to the actual size that was set by user
dim = X_batch.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
#X_batch = X_batch[:,:,remove:-remove,remove:-remove] #crop to crop x crop pixels #Theano
X_batch = X_batch[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
if contrast_on:
X_batch = aid_img.contrast_augm_cv2(X_batch,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
if saturation_on or hue_on:
X_batch = aid_img.satur_hue_augm_cv2(X_batch.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta)
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
X_batch = aid_img.avg_blur_cv2(X_batch,avgBlur_min,avgBlur_max)
if gaussBlur_on:
X_batch = aid_img.gauss_blur_cv(X_batch,gaussBlur_min,gaussBlur_max)
if motionBlur_on:
X_batch = aid_img.motion_blur_cv(X_batch,motionBlur_kernel,motionBlur_angle)
X_batch = aid_img.brightn_noise_augm_cv2(X_batch,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
if norm == "StdScaling using mean and std of all training data":
X_batch = aid_img.image_normalization(X_batch,norm,mean_trainingdata,std_trainingdata)
else:
X_batch = aid_img.image_normalization(X_batch,norm)
X = X_batch
elif w_or_wo_augm=='Original image':
############Cropping#####################
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=shuffle[i],replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
X.append(next(gen)[0])
#y.append(np.repeat(indices[i],nr_events_epoch[i]))
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
y = np.concatenate(y)
if len(X.shape)==4:
channels = 3
elif len(X.shape)==3:
channels = 1
X = np.expand_dims(X,3)#Add the "channels" dimension
else:
print("Invalid data dimension:" +str(X.shape))
if norm == "StdScaling using mean and std of all training data":
X = aid_img.image_normalization(X,norm,mean_trainingdata,std_trainingdata)
else:
X = aid_img.image_normalization(X,norm)
#Is there already anything shown on the widget?
children = self.fittingpopups_ui[listindex].widget_ViewImages_pop.findChildren(QtWidgets.QGridLayout)
if len(children)>0: #if there is something, delete it!
for i in reversed(range(self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop.count())):
widgetToRemove = self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop.itemAt(i).widget()
widgetToRemove.setParent(None)
widgetToRemove.deleteLater()
else: #else, create a Gridlayout to put the images
self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop = QtWidgets.QGridLayout(self.fittingpopups_ui[listindex].widget_ViewImages_pop)
for i in range(5):
if channels==1:
img = X[i,:,:,0]
if channels==3:
img = X[i,:,:,:]
#Normalize image to full 8bit range (from 0 to 255)
img = img-np.min(img)
fac = np.max(img)
img = (img/fac)*255.0
img = img.astype(np.uint8)
# height, width = img_zoom.shape
# qi=QtGui.QImage(img_zoom.data, width, height,width, QtGui.QImage.Format_Indexed8)
# self.label_image_show = QtWidgets.QLabel(self.widget_ViewImages)
# self.label_image_show.setPixmap(QtGui.QPixmap.fromImage(qi))
# self.gridLayout_ViewImages_pop.addWidget(self.label_image_show, 1,i)
# self.label_image_show.show()
#Use pygtgraph instead, in order to allow for exporting images
self.fittingpopups_ui[listindex].image_show_pop = pg.ImageView(self.fittingpopups_ui[listindex].widget_ViewImages_pop)
self.fittingpopups_ui[listindex].image_show_pop.show()
if channels==1:
self.fittingpopups_ui[listindex].image_show_pop.setImage(img.T,autoRange=False)
if channels==3:
self.fittingpopups_ui[listindex].image_show_pop.setImage(np.swapaxes(img,0,1),autoRange=False)
self.fittingpopups_ui[listindex].image_show_pop.ui.histogram.hide()
self.fittingpopups_ui[listindex].image_show_pop.ui.roiBtn.hide()
self.fittingpopups_ui[listindex].image_show_pop.ui.menuBtn.hide()
self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop.addWidget(self.fittingpopups_ui[listindex].image_show_pop, 1,i)
self.fittingpopups_ui[listindex].widget_ViewImages_pop.show()
def get_color_mode(self):
if str(self.comboBox_GrayOrRGB.currentText())=="Grayscale":
return "Grayscale"
elif str(self.comboBox_GrayOrRGB.currentText())=="RGB":
return "RGB"
else:
return None
def checkBox_rollingMedian_statechange(self,item):#used in frontend
self.horizontalSlider_rollmedi.setEnabled(item)
def update_historyplot(self):
#After loading a history, there are checkboxes available. Check, if user checked some:
colcount = self.tableWidget_HistoryItems.columnCount()
#Collect items that are checked
selected_items = []
Colors = []
for colposition in range(colcount):
#get checkbox item and; is it checked?
cb = self.tableWidget_HistoryItems.item(0, colposition)
if not cb==None:
if cb.checkState() == QtCore.Qt.Checked:
selected_items.append(str(cb.text()))
Colors.append(cb.background())
#Get a list of the color from the background of the table items
DF1 = self.loaded_history
#Clear the plot
self.widget_Scatterplot.clear()
#Add plot
self.plt1 = self.widget_Scatterplot.addPlot()
self.plt1.showGrid(x=True,y=True)
self.plt1.addLegend()
self.plt1.setLabel('bottom', 'Epoch', units='')
self.plot_rollmedis = [] #list for plots of rolling medians
if "Show saved only" in selected_items:
#nr_of_selected_items = len(selected_items)-1
#get the "Saved" column from DF1
saved = DF1["Saved"]
saved = np.where(np.array(saved==1))[0]
# else:
# nr_of_selected_items = len(selected_items)
self.Colors = Colors
scatter_x,scatter_y = [],[]
for i in range(len(selected_items)):
key = selected_items[i]
if key!="Show saved only":
df = DF1[key]
epochs = range(len(df))
win = int(self.horizontalSlider_rollmedi.value())
rollmedi = df.rolling(window=win).median()
if "Show saved only" in selected_items:
df = np.array(df)[saved]
epochs = np.array(epochs)[saved]
rollmedi = pd.DataFrame(df).rolling(window=win).median()
scatter_x.append(epochs)
scatter_y.append(df)
color = self.Colors[i]
pen_rollmedi = list(color.color().getRgb())
pen_rollmedi = pg.mkColor(pen_rollmedi)
pen_rollmedi = pg.mkPen(color=pen_rollmedi,width=6)
color = list(color.color().getRgb())
color[-1] = int(0.6*color[-1])
color = tuple(color)
pencolor = pg.mkColor(color)
brush = pg.mkBrush(color=pencolor)
self.plt1.plot(epochs, df,pen=None,symbol='o',symbolPen=None,symbolBrush=brush,name=key,clear=False)
if bool(self.checkBox_rollingMedian.isChecked()):#Should a rolling median be plotted?
try:
rollmedi = np.array(rollmedi).reshape(rollmedi.shape[0])
rm = self.plt1.plot(np.array(epochs), rollmedi,pen=pen_rollmedi,clear=False)
self.plot_rollmedis.append(rm)
except Exception as e:
#There is an issue for the rolling median plotting!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText(str(e)+"\n->There are likely too few points to have a rolling median with such a window size ("+str(round(win))+")")
msg.setWindowTitle("Error occured when plotting rolling median:")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
if len(str(self.lineEdit_LoadHistory.text()))==0:
#if DF1==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please load History file first (.meta)")
msg.setWindowTitle("No History file loaded")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if len(scatter_x)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please select at least one of " +"\n".join(list(DF1.keys())))
msg.setWindowTitle("No quantity selected")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Keep the information as lists available for this function
self.scatter_x_l, self.scatter_y_l = scatter_x,scatter_y
if bool(self.checkBox_linearFit.isChecked()):
#Put a liner region on the plot; cover the last 10% of points
if np.max(np.concatenate(scatter_x))<12:
start_x = 0
end_x = np.max(np.concatenate(scatter_x))+1
else:
start_x = int(0.9*np.max(np.concatenate(scatter_x)))
end_x = int(1.0*np.max(np.concatenate(scatter_x)))
self.region_linfit = pg.LinearRegionItem([start_x, end_x], bounds=[-np.inf,np.inf], movable=True)
self.plt1.addItem(self.region_linfit)
def region_changed():
try: #clear the plot from other fits if there are any
if len(self.plot_fits)>0:
for i in range(len(self.plot_fits)):
self.plt1.legend.removeItem(self.names[i])
self.plt1.removeItem(self.plot_fits[i])
except:
pass
#where did the user drag the region_linfit to?
new_region = self.region_linfit.getRegion()
#for each curve, do a linear regression
self.plot_fits,self.names = [], []
for i in range(len(self.scatter_x_l)):
scatter_x_vals = np.array(self.scatter_x_l[i])
ind = np.where( (scatter_x_vals<new_region[1]) & (scatter_x_vals>new_region[0]) )
scatter_x_vals = scatter_x_vals[ind]
scatter_y_vals = np.array(self.scatter_y_l[i])[ind]
if len(scatter_x_vals)>1:
fit = np.polyfit(scatter_x_vals,scatter_y_vals,1)
fit_y = fit[0]*scatter_x_vals+fit[1]
pencolor = pg.mkColor(self.Colors[i].color())
pen = pg.mkPen(color=pencolor,width=6)
text = 'y='+("{:.2e}".format(fit[0]))+"x + " +("{:.2e}".format(fit[1]))
self.names.append(text)
self.plot_fits.append(self.plt1.plot(name=text))
self.plot_fits[i].setData(scatter_x_vals,fit_y,pen=pen,clear=False,name=text)
self.region_linfit.sigRegionChangeFinished.connect(region_changed)
def slider_changed():
if bool(self.checkBox_rollingMedian.isChecked()):
#remove other rolling median lines:
for i in range(len(self.plot_rollmedis)):
self.plt1.removeItem(self.plot_rollmedis[i])
#Start with fresh list
self.plot_rollmedis = []
win = int(self.horizontalSlider_rollmedi.value())
for i in range(len(self.scatter_x_l)):
epochs = np.array(self.scatter_x_l[i])
if type(self.scatter_y_l[i]) == pd.core.frame.DataFrame:
rollmedi = self.scatter_y_l[i].rolling(window=win).median()
else:
rollmedi = pd.DataFrame(self.scatter_y_l[i]).rolling(window=win).median()
rollmedi = np.array(rollmedi).reshape(rollmedi.shape[0])
pencolor = pg.mkColor(self.Colors[i].color())
pen_rollmedi = pg.mkPen(color=pencolor,width=6)
rm = self.plt1.plot(np.array(epochs), rollmedi,pen=pen_rollmedi,clear=False)
self.plot_rollmedis.append(rm)
self.horizontalSlider_rollmedi.sliderMoved.connect(slider_changed)
scatter_x = np.concatenate(scatter_x)
scatter_y = np.concatenate(scatter_y)
scatter_x_norm = (scatter_x.astype(float))/float(np.max(scatter_x))
scatter_y_norm = (scatter_y.astype(float))/float(np.max(scatter_y))
self.model_was_selected_before = False
def onClick(event):
#Get all plotting items
#if len(self.plt1.listDataItems())==nr_of_selected_items+1:
#delete the last item if the user selected already one:
if self.model_was_selected_before:
self.plt1.removeItem(self.plt1.listDataItems()[-1])
items = self.widget_Scatterplot.scene().items(event.scenePos())
#get the index of the viewbox
isviewbox = [type(item)==pg.graphicsItems.ViewBox.ViewBox for item in items]
index = np.where(np.array(isviewbox)==True)[0]
vb = np.array(items)[index]
try: #when user rescaed the vew and clicks somewhere outside, it could appear an IndexError.
clicked_x = float(vb[0].mapSceneToView(event.scenePos()).x())
clicked_y = float(vb[0].mapSceneToView(event.scenePos()).y())
except:
return
try:
a1 = (clicked_x)/float(np.max(scatter_x))
a2 = (clicked_y)/float(np.max(scatter_y))
except Exception as e:
print(str(e))
return
#Which is the closest scatter point?
dist = np.sqrt(( a1-scatter_x_norm )**2 + ( a2-scatter_y_norm )**2)
index = np.argmin(dist)
clicked_x = scatter_x[index]
clicked_y = scatter_y[index]
#Update the spinBox
#self.spinBox_ModelIndex.setValue(int(clicked_x))
#Modelindex for textBrowser_SelectedModelInfo
text_index = "\nModelindex: "+str(clicked_x)
#Indicate the selected model on the scatter plot
self.plt1.plot([clicked_x], [clicked_y],pen=None,symbol='o',symbolPen='w',clear=False)
#Get more information about this model
Modelname = str(self.loaded_para["Modelname"].iloc[0])
path, filename = os.path.split(Modelname)
filename = filename.split(".model")[0]+"_"+str(clicked_x)+".model"
path = os.path.join(path,filename)
if os.path.isfile(path):
text_path = "\nFile is located in:"+path
else:
text_path = "\nFile not found!:"+path+"\nProbably the .model was deleted or not saved"
text_acc = str(DF1.iloc[clicked_x])
self.textBrowser_SelectedModelInfo.setText("Loaded model: "+filename+text_index+text_path+"\nPerformance:\n"+text_acc)
self.model_was_selected_before = True
self.model_2_convert = path
self.widget_Scatterplot.scene().sigMouseClicked.connect(onClick)
def action_load_history(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open meta-data', Default_dict["Path of last model"],"AIDeveloper Meta file (*meta.xlsx)")
filename = filename[0]
if not filename.endswith("meta.xlsx"):
return
if not os.path.isfile(filename):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("File not found")
msg.setWindowTitle("File not found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.lineEdit_LoadHistory.setText(filename)
self.action_plot_history(filename)
def action_load_history_current(self):
if self.model_keras_path==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no fitting going on")
msg.setWindowTitle("No current fitting process!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
history_path = self.model_keras_path
if type(history_path)==list:#collection=True
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Not implemented for collections. Please use 'Load History' button to specify a single .meta file")
msg.setWindowTitle("Not implemented for collecitons")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
filename = history_path.split("_0.model")[0]+"_meta.xlsx"
if not filename.endswith("meta.xlsx"):
return
if not os.path.isfile(filename):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("File not found")
msg.setWindowTitle("File not found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.lineEdit_LoadHistory.setText(filename)
self.action_plot_history(filename)
def action_plot_history(self,filename):
#If there is a file, it can happen that fitting is currently going on
#and with bad luck AID just tries to write to the file. This would cause a crash.
#Therfore, first try to copy the file to a temporary folder. If that fails,
#wait 1 seconds and try again
#There needs to be a "temp" folder. If there os none, create it!
#does temp exist?
tries = 0 #during fitting, AID sometimes wants to write to the history file. In this case we cant read
try:
while tries<15:#try a few times
try:
temp_path = aid_bin.create_temp_folder()#create a temp folder if it does not already exist
#Create a random filename for a temp. file
someletters = list("STERNBURGPILS")
temporaryfile = np.random.choice(someletters,5,replace=True)
temporaryfile = "".join(temporaryfile)+".xlsx"
temporaryfile = os.path.join(temp_path,temporaryfile)
shutil.copyfile(filename,temporaryfile) #copy the original excel file there
dic = pd.read_excel(temporaryfile,sheet_name='History',index_col=0) #open it there
self.loaded_history = dic
para = pd.read_excel(temporaryfile,sheet_name='Parameters')
print(temporaryfile)
#delete the tempfile
os.remove(temporaryfile)
self.loaded_para = para
tries = 16
except:
time.sleep(1.5)
tries+=1
except Exception as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Check if dic exists now
try:
keys = list(dic.keys())
except Exception as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Remember the path for next time
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
#sort the list alphabetically
keys_ = [l.lower() for l in keys]
ind_sort = np.argsort(keys_)
keys = list(np.array(keys)[ind_sort])
#First keys should always be acc,loss,val_acc,val_loss -in this order
keys_first = ["acc","loss","val_acc","val_loss"]
for i in range(len(keys_first)):
if keys_first[i] in keys:
ind = np.where(np.array(keys)==keys_first[i])[0][0]
if ind!=i:
del keys[ind]
keys.insert(i,keys_first[i])
#Lastly check if there is "Saved" or "Time" present and shift it to the back
keys_last = ["Saved","Time"]
for i in range(len(keys_last)):
if keys_last[i] in keys:
ind = np.where(np.array(keys)==keys_last[i])[0][0]
if ind!=len(keys):
del keys[ind]
keys.append(keys_last[i])
self.tableWidget_HistoryItems.setColumnCount(len(keys)+1) #+1 because of "Show saved only"
#for each key, put a checkbox on the tableWidget_HistoryInfo_pop
rowPosition = self.tableWidget_HistoryItems.rowCount()
if rowPosition==0:
self.tableWidget_HistoryItems.insertRow(0)
else:
rowPosition=0
for columnPosition in range(len(keys)):#(2,4):
key = keys[columnPosition]
item = QtWidgets.QTableWidgetItem(str(key))#("item {0} {1}".format(rowNumber, columnNumber))
item.setBackground(QtGui.QColor(self.colorsQt[columnPosition]))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.tableWidget_HistoryItems.setItem(rowPosition, columnPosition, item)
#One checkbox at the end to switch on/of to show only the models that are saved
columnPosition = len(keys)
item = QtWidgets.QTableWidgetItem("Show saved only")#("item {0} {1}".format(rowNumber, columnNumber))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.tableWidget_HistoryItems.setItem(rowPosition, columnPosition, item)
self.tableWidget_HistoryItems.resizeColumnsToContents()
self.tableWidget_HistoryItems.resizeRowsToContents()
def history_tab_get_model_path(self):#Let user define a model he would like to convert
#pushButton_LoadModel
#Open a QFileDialog
filepath = QtWidgets.QFileDialog.getOpenFileName(self, 'Select a trained model you want to convert', Default_dict["Path of last model"],"Keras Model file (*.model)")
filepath = filepath[0]
if os.path.isfile(filepath):
self.model_2_convert = filepath
path, filename = os.path.split(filepath)
try:
modelindex = filename.split(".model")[0]
modelindex = int(modelindex.split("_")[-1])
except:
modelindex = np.nan
self.textBrowser_SelectedModelInfo.setText("Error loading model")
return
text = "Loaded model: "+filename+"\nModelindex: "+str(modelindex)+"\nFile is located in: "+filepath
self.textBrowser_SelectedModelInfo.setText(text)
def history_tab_convertModel(self):
#Check if there is text in textBrowser_SelectedModelInfo
path = self.model_2_convert
try:
os.path.isfile(path)
except:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No file defined!")
msg.setWindowTitle("No file defined!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if not os.path.isfile(path):
#text_path = "\nFile not found!:"+path+"\nProbably the .model was deleted or not saved"
#self.pushButton_convertModel.setEnabled(False)
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("\nFile not found!:"+path+"\nProbably the .model was deleted or not saved")
msg.setWindowTitle("File not found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#If the source format is Keras tensforflow:
source_format = str(self.combobox_initial_format.currentText())
target_format = str(self.comboBox_convertTo.currentText()) #What is the target format?
##TODO: All conversion methods to multiprocessing functions!
def conversion_successful_msg(text):#Enable the Convert to .nnet button
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(text)
msg.setWindowTitle("Successfully converted model!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
##################Keras TensorFlow -> .nnet############################
if target_format==".nnet" and source_format=="Keras TensorFlow":
ConvertToNnet = 1
worker = Worker(self.history_tab_convertModel_nnet_worker,ConvertToNnet)
def get_model_keras_from_worker(dic):
self.model_keras = dic["model_keras"]
worker.signals.history.connect(get_model_keras_from_worker)
def conversion_successful(i):#Enable the Convert to .nnet button
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "Conversion Keras TensorFlow -> .nnet done"
msg.setText(text)
msg.setWindowTitle("Successfully converted model!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#self.pushButton_convertModel.setEnabled(True)
worker.signals.history.connect(conversion_successful)
self.threadpool.start(worker)
##################Keras TensorFlow -> Frozen .pb#######################
elif target_format=="Frozen TensorFlow .pb" and source_format=="Keras TensorFlow":
#target filename should be like source +_frozen.pb
path_new = os.path.splitext(path)[0] + "_frozen.pb"
aid_dl.convert_kerastf_2_frozen_pb(path,path_new)
text = "Conversion Keras TensorFlow -> Frozen .pb is done"
conversion_successful_msg(text)
##################Keras TensorFlow -> Optimized .pb####################
elif target_format=="Optimized TensorFlow .pb" and source_format=="Keras TensorFlow":
path_new = os.path.splitext(path)[0] + "_optimized.pb"
aid_dl.convert_kerastf_2_optimized_pb(path,path_new)
text = "Conversion Keras TensorFlow -> Optimized .pb is done"
conversion_successful_msg(text)
####################Frozen -> Optimized .pb############################
elif target_format=="Optimized TensorFlow .pb" and source_format=="Frozen TensorFlow .pb":
path_new = os.path.splitext(path)[0] + "_optimized.pb"
aid_dl.convert_frozen_2_optimized_pb(path,path_new)
text = "Conversion Frozen -> Optimized .pb is done"
conversion_successful_msg(text)
##################Keras TensorFlow -> ONNX####################
elif target_format=="ONNX (via keras2onnx)" and source_format=="Keras TensorFlow":
path_new = os.path.splitext(path)[0] + ".onnx"
aid_dl.convert_kerastf_2_onnx(path,path_new)
text = "Conversion Keras TensorFlow -> ONNX (via keras2onnx) is done"
conversion_successful_msg(text)
##################Keras TensorFlow -> ONNX via MMdnn####################
elif target_format=="ONNX (via MMdnn)" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_onnx_mmdnn(path)
text = "Conversion Keras TensorFlow -> ONNX (via MMdnn) is done"
conversion_successful_msg(text)
##################Keras TensorFlow -> PyTorch Script####################
elif target_format=="PyTorch Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"pytorch")
text = "Conversion Keras TensorFlow -> PyTorch Script is done. You can now use this script and the saved weights to build the model using your PyTorch installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> Caffe Script####################
elif target_format=="Caffe Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"caffe")
text = "Conversion Keras TensorFlow -> Caffe Script is done. You can now use this script and the saved weights to build the model using your Caffe installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> CNTK Script####################
elif target_format=="CNTK Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"cntk")
text = "Conversion Keras TensorFlow -> CNTK Script is done. You can now use this script and the saved weights to build the model using your CNTK installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> mxnet Script####################
elif target_format=="MXNet Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"mxnet")
text = "Conversion Keras TensorFlow -> MXNet Script is done. You can now use this script and the saved weights to build the model using your MXNet installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> onnx Script####################
elif target_format=="ONNX Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"onnx")
text = "Conversion Keras TensorFlow -> ONNX Script is done. You can now use this script and the saved weights to build the model using your ONNX installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> TensorFlow Script####################
elif target_format=="TensorFlow Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"tensorflow")
text = "Conversion Keras TensorFlow -> TensorFlow Script is done. You can now use this script and the saved weights to build the model using your Tensorflow installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> Keras Script####################
elif target_format=="Keras Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"keras")
text = "Conversion Keras TensorFlow -> Keras Script is done. You can now use this script and the saved weights to build the model using your Keras installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> CoreML####################
elif "CoreML" in target_format and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_coreml(path)
text = "Conversion Keras TensorFlow -> CoreML is done."
conversion_successful_msg(text)
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Not implemeted (yet)")
msg.setWindowTitle("Not implemeted (yet)")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#If that worked without error, save the filepath for next time
Default_dict["Path of last model"] = os.path.split(path)[0]
aid_bin.save_aid_settings(Default_dict)
def history_tab_convertModel_nnet_worker(self,ConvertToNnet,progress_callback,history_callback):
#Define a new session -> Necessary for threading in TensorFlow
#with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
with tf.Session() as sess:
path = self.model_2_convert
try:
model_keras = load_model(path,custom_objects=aid_dl.get_custom_metrics())
except:
model_keras = load_model(path)
dic = {"model_keras":model_keras}
history_callback.emit(dic)
progress_callback.emit(1)
if ConvertToNnet==1:
#Since this happened in a thread, TensorFlow cant access it anywhere else
#Therefore perform Conversion to nnet right away:
model_config = model_keras.get_config()#["layers"]
if type(model_config)==dict:
model_config = model_config["layers"]#for keras version>2.2.3, there is a change in the output of get_config()
#Convert model to theano weights format (Only necesary for CNNs)
for layer in model_keras.layers:
if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:
original_w = K.get_value(layer.W)
converted_w = convert_kernel(original_w)
K.set_value(layer.W, converted_w)
nnet_path, nnet_filename = os.path.split(self.model_2_convert)
nnet_filename = nnet_filename.split(".model")[0]+".nnet"
out_path = os.path.join(nnet_path,nnet_filename)
aid_dl.dump_to_simple_cpp(model_keras=model_keras,model_config=model_config,output=out_path,verbose=False)
# sess.close()
# try:
# aid_dl.reset_keras()
# except:
# print("Could not reset Keras (1)")
def history_tab_ConvertToNnet(self):
print("Not used")
# model_keras = self.model_keras
# model_config = model_keras.get_config()["layers"]
# #Convert model to theano weights format (Only necesary for CNNs)
# for layer in model_keras.layers:
# if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:
# original_w = K.get_value(layer.W)
# converted_w = convert_kernel(original_w)
# K.set_value(layer.W, converted_w)
#
# nnet_path, nnet_filename = os.path.split(self.model_2_convert)
# nnet_filename = nnet_filename.split(".model")[0]+".nnet"
# out_path = os.path.join(nnet_path,nnet_filename)
# aid_dl.dump_to_simple_cpp(model_keras=model_keras,model_config=model_config,output=out_path,verbose=False)
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("Successfully converted model and saved to\n"+out_path)
# msg.setWindowTitle("Successfully converted model!")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
# self.pushButton_convertModel.setEnabled(False)
#TODO
def test_nnet(self):
#I need a function which calls a cpp app that uses the nnet and applies
#it on a random image.
#The same image is also used as input the the original .model and
#both results are then compared
print("Not implemented yet")
print("Placeholder")
print("Building site")
def actionDocumentation_function(self):
icon = QtGui.QImage(os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff))
icon = QtGui.QPixmap(icon).scaledToHeight(32, QtCore.Qt.SmoothTransformation)
msg = QtWidgets.QMessageBox()
msg.setIconPixmap(icon)
text = "Currently, there is no detailed written documentation. AIDeveloper instead makes strong use of tooltips."
text = "<html><head/><body><p>"+text+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Documentation")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def actionSoftware_function(self):
if sys.platform == "win32":
plat = "win"
elif sys.platform=="darwin":
plat = "mac"
elif sys.platform=="linux":
plat = "linux"
else:
print("Unknown Operating system")
plat = "Win"
dir_deps = os.path.join(dir_root,"aid_dependencies_"+plat+".txt")#dir to aid_dependencies
f = open(dir_deps, "r")
text_modules = f.read()
f.close()
icon = QtGui.QImage(os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff))
icon = QtGui.QPixmap(icon).scaledToHeight(32, QtCore.Qt.SmoothTransformation)
msg = QtWidgets.QMessageBox()
msg.setIconPixmap(icon)
text = "<html><head/><body><p>AIDeveloper "+str(VERSION)+"<br>"+sys.version+"<br>Click 'Show Details' to retrieve a list of all Python packages used."+"<br>AID_GPU uses CUDA (NVIDIA) to facilitate GPU processing</p></body></html>"
msg.setText(text)
msg.setDetailedText(text_modules)
msg.setWindowTitle("Software")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def actionAbout_function(self):
icon = QtGui.QImage(os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff))
icon = QtGui.QPixmap(icon).scaledToHeight(32, QtCore.Qt.SmoothTransformation)
msg = QtWidgets.QMessageBox()
msg.setIconPixmap(icon)
text = "AIDeveloper is written and maintained by <NAME>. Use <EMAIL> to contact the main developer if you find bugs or if you wish a particular feature. Icon theme 2 was mainly designed and created by <NAME>."
text = "<html><head/><body><p>"+text+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("About")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def actionLoadSession_function(self):
#This function should allow to select and load a metafile and
#Put the GUI the the corresponing state (place the stuff in the table, click Train/Valid)
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open meta-data', Default_dict["Path of last model"],"AIDeveloper Meta or session file (*meta.xlsx *session.xlsx)")
filename = filename[0]
if len(filename)==0:
return
xlsx = pd.ExcelFile(filename)
UsedData = pd.read_excel(xlsx,sheet_name="UsedData")
Files = list(UsedData["rtdc_path"])
file_exists = [os.path.exists(url) for url in Files]
ind_true = np.where(np.array(file_exists)==True)[0]
UsedData_true = UsedData.iloc[ind_true]
Files_true = list(UsedData_true["rtdc_path"]) #select the indices that are valid
#Add stuff to table_dragdrop
rowPosition = int(self.table_dragdrop.rowCount())
self.dataDropped(Files_true)
#update the index, train/valid checkbox and shuffle checkbox
for i in range(len(Files_true)):
#set the index (celltype)
try:
index = int(np.array(UsedData_true["class"])[i])
except:
index = int(np.array(UsedData_true["index"])[i])
print("You are using an old version of AIDeveloper. Consider upgrading")
self.table_dragdrop.cellWidget(rowPosition+i, 1).setValue(index)
#is it checked for train or valid?
trorvalid = str(np.array(UsedData_true["TrainOrValid"])[i])
if trorvalid=="Train":
self.table_dragdrop.item(rowPosition+i, 2).setCheckState(QtCore.Qt.Checked)
elif trorvalid=="Valid":
self.table_dragdrop.item(rowPosition+i, 3).setCheckState(QtCore.Qt.Checked)
#how many cells/epoch during training or validation?
try:
nr_events_epoch = str(np.array(UsedData_true["nr_events_epoch"])[i])
except:
nr_events_epoch = str(np.array(UsedData_true["nr_cells_epoch"])[i])
self.table_dragdrop.item(rowPosition+i, 6).setText(nr_events_epoch)
#Shuffle or not?
shuffle = bool(np.array(UsedData_true["shuffle"])[i])
if shuffle==False:
self.table_dragdrop.item(rowPosition+i, 8).setCheckState(QtCore.Qt.Unchecked)
#Set Cells/Epoch to not editable
item = self.table_dragdrop.item(rowPosition+i, 6)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
else:
self.table_dragdrop.item(rowPosition+i, 8).setCheckState(QtCore.Qt.Checked)
#zoom_factor = float(np.array(UsedData_true["zoom_factor"])[i])
zoom_factor = str(np.array(UsedData_true["zoom_factor"])[i])
self.table_dragdrop.item(rowPosition+i, 9).setText(zoom_factor)
#Now take care of missing data
#Take care of missing files (they might have been moved to a different location)
ind_false = np.where(np.array(file_exists)==False)[0]
#Files_false = list(UsedData_false["rtdc_path"]) #select the indices that are valid
if len(ind_false)>0:
UsedData_false = UsedData.iloc[ind_false]
Files_false = list(UsedData_false["rtdc_path"]) #select the indices that are valid
self.dataDropped(Files_false)
self.user_selected_path = None
#Create popup that informs user that there is missing data and let him specify a location
#to search for the missing files
def add_missing_files():
filename = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select directory', Default_dict["Path of last model"])
user_selected_path = filename
if len(filename)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Invalid directory")
msg.setWindowTitle("Invalid directory")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#get the hashes
hashes = list(np.array(UsedData_false["hash"])[ind_false])
paths = list(np.array(UsedData_false["rtdc_path"])[ind_false])
paths_new,info = aid_bin.find_files(user_selected_path,paths,hashes)
text = ('\n'.join([str(a) +"\t"+ b for a,b in zip(paths_new,info)]))
self.textBrowser_Info_pop2.setText(text)
#Add stuff to table_dragdrop
rowPosition = int(self.table_dragdrop.rowCount())
self.dataDropped(paths_new)
for i in range(len(paths_new)):
#set the index (celltype)
try:
index = int(np.array(UsedData_false["class"])[i])
except:
index = int(np.array(UsedData_false["index"])[i])
print("You are using an old version of AIDeveloper. Consider upgrading")
self.table_dragdrop.cellWidget(rowPosition+i, 1).setValue(index)
#is it checked for train or valid?
trorvalid = str(np.array(UsedData_false["TrainOrValid"])[i])
if trorvalid=="Train":
self.table_dragdrop.item(rowPosition+i, 2).setCheckState(QtCore.Qt.Checked)
elif trorvalid=="Valid":
self.table_dragdrop.item(rowPosition+i, 3).setCheckState(QtCore.Qt.Checked)
#how many cells/epoch during training or validation?
nr_events_epoch = str(np.array(UsedData_false["nr_events_epoch"])[i])
#how many cells/epoch during training or validation?
try:
nr_events_epoch = str(np.array(UsedData_false["nr_events_epoch"])[i])
except:
nr_events_epoch = str(np.array(UsedData_false["nr_cells_epoch"])[i])
print("You are using an old version of AIDeveloper. Consider upgrading")
self.table_dragdrop.item(rowPosition+i, 6).setText(nr_events_epoch)
#Shuffle or not?
shuffle = bool(np.array(UsedData_false["shuffle"])[i])
if shuffle==False:
self.table_dragdrop.item(rowPosition+i, 8).setCheckState(QtCore.Qt.Unchecked)
#Set Cells/Epoch to not editable
item = self.table_dragdrop.item(rowPosition+i, 6)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
else:
self.table_dragdrop.item(rowPosition+i, 8).setCheckState(QtCore.Qt.Checked)
#zoom_factor = float(np.array(UsedData_false["zoom_factor"])[i])
zoom_factor = str(np.array(UsedData_false["zoom_factor"])[i])
self.table_dragdrop.item(rowPosition+i, 9).setText(zoom_factor)
self.w_pop2 = MyPopup()
self.gridLayout_w_pop2 = QtWidgets.QGridLayout(self.w_pop2)
self.gridLayout_w_pop2.setObjectName("gridLayout_w_pop2")
self.verticalLayout_w_pop2 = QtWidgets.QVBoxLayout()
self.verticalLayout_w_pop2.setObjectName("verticalLayout_w_pop2")
self.horizontalLayout_w_pop2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_w_pop2.setObjectName("horizontalLayout_w_pop2")
self.pushButton_Close_pop2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_Close_pop2.setObjectName("pushButton_Close_pop2")
self.pushButton_Close_pop2.clicked.connect(self.w_pop2.close)
self.horizontalLayout_w_pop2.addWidget(self.pushButton_Close_pop2)
self.pushButton_Search_pop2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_Search_pop2.clicked.connect(add_missing_files)
self.pushButton_Search_pop2.setObjectName("pushButton_Search")
self.horizontalLayout_w_pop2.addWidget(self.pushButton_Search_pop2)
self.verticalLayout_w_pop2.addLayout(self.horizontalLayout_w_pop2)
self.textBrowser_Info_pop2 = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser_Info_pop2.setObjectName("textBrowser_Info_pop2")
self.verticalLayout_w_pop2.addWidget(self.textBrowser_Info_pop2)
self.gridLayout_w_pop2.addLayout(self.verticalLayout_w_pop2, 0, 0, 1, 1)
self.w_pop2.setWindowTitle("There are missing files. Do you want to search for them?")
self.pushButton_Close_pop2.setText("No")
self.pushButton_Search_pop2.setText("Define folder to search files")
self.w_pop2.show()
#Ask user if only data, or the full set of parameters should be loaded
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
msg.setText(tooltips["msg_loadSession"])
msg.setWindowTitle("Load only data table all parameters?")
msg.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.Save)# | QtGui.QMessageBox.Cancel)
dataonly = msg.button(QtGui.QMessageBox.Yes)
dataonly.setText('Data table only')
allparams = msg.button(QtGui.QMessageBox.Save)
allparams.setText('Data and all parameters')
# cancel = msg.button(QtGui.QMessageBox.Cancel)
# cancel.setText('Cancel')
msg.exec_()
#Only update the data table.
if msg.clickedButton()==dataonly: #show image and heatmap overlay
pass
#Load the parameters
elif msg.clickedButton()==allparams: #show image and heatmap overlay
Parameters = pd.read_excel(xlsx,sheet_name="Parameters")
aid_frontend.load_hyper_params(self,Parameters)
# if msg.clickedButton()==cancel: #show image and heatmap overlay
# return
#If all this run without error, save the path.
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
#Update the overview-box
if self.groupBox_DataOverview.isChecked()==True:
self.dataOverviewOn()
def actionSaveSession_function(self):
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save session', Default_dict["Path of last model"],"AIDeveloper Session file (*_session.xlsx)")
filename = filename[0]
path, fname = os.path.split(filename)
if len(fname)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid filename was chosen.")
msg.setWindowTitle("No valid filename was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if fname.endswith(".xlsx"):
fname = fname.split(".xlsx")[0]
if fname.endswith("_session"):
fname = fname.split("_session")[0]
if fname.endswith("_meta"):
fname = fname.split("_meta")[0]
if fname.endswith(".model"):
fname = fname.split(".model")[0]
if fname.endswith(".arch"):
fname = fname.split(".arch")[0]
#add the suffix _session.xlsx
if not fname.endswith("_session.xlsx"):
fname = fname +"_session.xlsx"
filename = os.path.join(path,fname)
writer = pd.ExcelWriter(filename, engine='openpyxl')
#Used files go to a separate sheet on the -session.xlsx
SelectedFiles = self.items_clicked()
SelectedFiles_df = pd.DataFrame(SelectedFiles)
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df = self.get_dataOverview()
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
#Get all hyper parameters
Para_dict =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""K-Means Classifier"""
import collections
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import minmax_scale
from default_clf import DefaultNSL, COL_NAMES, ATTACKS
class KMeansNSL(DefaultNSL):
def __init__(self):
super(KMeansNSL, self).__init__()
self.cols = None
self.clusters = {0: None, 1: None, 2: None, 3: None}
def load_training_data(self, filepath):
data, labels = self.load_data(filepath)
self.cols = data.columns
self.training = [data, labels]
def load_test_data(self, filepath):
data, labels = self.load_data(filepath)
map_data = pd.DataFrame(columns=self.cols)
map_data = map_data.append(data)
data = map_data.fillna(0)
self.testing = [data[self.cols], labels]
@staticmethod
def load_data(filepath):
data = pd.read_csv(filepath, names=COL_NAMES, index_col=False)
# Shuffle data
data = data.sample(frac=1).reset_index(drop=True)
NOM_IND = [1, 2, 3]
BIN_IND = [6, 11, 13, 14, 20, 21]
# Need to find the numerical columns for normalization
NUM_IND = list(set(range(40)).difference(NOM_IND).difference(BIN_IND))
# Scale all numerical data to [0-1]
data.iloc[:, NUM_IND] = minmax_scale(data.iloc[:, NUM_IND])
labels = data['labels']
del data['labels']
data = pd.get_dummies(data)
return [data, labels]
def train_clf(self):
self.clf = KMeans(n_clusters=4, init='random').fit(self.training[0])
self.set_categories()
def test_clf(self, train=False):
if train:
data, labels = self.training
else:
data, labels = self.testing
test_preds = self.clf.predict(data)
test_preds = [self.clusters[x] for x in test_preds]
bin_labels = labels.apply(lambda x: x if x == 'normal' else 'anomaly')
test_acc = sum(test_preds == bin_labels)/(len(test_preds) * 1.0)
return [test_preds, test_acc]
def set_categories(self):
labels = self.training[1]
bin_labels = labels.apply(lambda x: x if x == 'normal' else 'anomaly')
clust_preds = self.clf.labels_
count = collections.Counter(zip(clust_preds, bin_labels))
num = [0, 0, 0, 0]
for k, val in count.items():
clust = k[0]
if val > num[clust]:
num[clust] = val
self.clusters[clust] = k[1]
def predict(self, packet):
data =
|
pd.DataFrame([packet], columns=COL_NAMES)
|
pandas.DataFrame
|
import os
import sys
import keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.callbacks import CSVLogger, History
from keras.layers import BatchNormalization, Dense, Dropout, Input
from keras.models import Model
# from .IntegratedGradient import integrated_gradients
"""
Created by <NAME> on 6/15/18.
Email : <EMAIL> or <EMAIL>
Website: http://ce.sharif.edu/~naghipourfar
Github: https://github.com/naghipourfar
Skype: mn7697np
"""
def loadGradients(path="../Results/IntegratedGradient/integrated_gradients.csv"):
return pd.read_csv(path, header=None)
def save_summaries_for_each_feature(feature_importance, path="./IntegratedGradient/Summaries/"):
for i in range(feature_importance.shape[1]):
description = feature_importance[i].describe()
description.to_csv(path + "feature_{0}.txt".format(i))
def analyze_top_100_features_for_each_sample(feature_importance):
top_importances = []
for i in range(feature_importance.shape[0]):
importances = feature_importance.iloc[i, :]
importances = list(reversed(sorted(abs(importances))))
top_100_importances = importances[:100]
top_importances.append(top_100_importances)
np.savetxt(fname="./top100_deeplift.csv",
X=np.array(top_importances), delimiter=',')
def plot_heatmaps(feature_importances, path="./IntegratedGradient/Heatmaps/"):
plt.rcParams["figure.figsize"] = 5, 2
for i in range(feature_importances.shape[0]):
y = feature_importances.iloc[i, :]
fig, ax = plt.subplots(nrows=1, sharex='all')
# extent = [x[0] - (x[1] - x[0]) / 2., x[-1] + (x[1] - x[0]) / 2., 0, 1]
heatmap = ax.imshow(y[np.newaxis, :], cmap="plasma", aspect="auto")
ax.set_yticks([])
# ax.set_xlim(extent[0], extent[1])
plt.tight_layout()
plt.colorbar(heatmap)
plt.savefig(path + "sample_{0}.png".format(i))
plt.close()
def plot_distributions(feature_importance, path="../Results/IntegratedGradient/DistPlots/"):
import seaborn as sns
for i in range(feature_importance.shape[1]):
plt.figure()
sns.distplot(feature_importance[i])
plt.xlabel("Feature Importance")
plt.ylabel("Density")
plt.title("Feature_{0} Distribution of Importance".format(i))
plt.savefig(path + "feature_{0}.png".format(i))
plt.close()
def plot_distribution(feature_importance, path="../Results/IntegratedGradient/"):
file_name = "distribution.png"
feature_importance = feature_importance.as_matrix() # Convert to numpy ndarray
new_shape = (feature_importance.shape[0] * feature_importance.shape[1],)
feature_importance = np.reshape(feature_importance, newshape=new_shape)
import seaborn as sns
sns.distplot(feature_importance)
plt.xlabel("Feature Importance")
plt.ylabel("Density")
plt.title("Distribution of all feature importances")
plt.savefig(path + file_name)
plt.close()
def box_plot(feature_importance, path="../Results/IntegratedGradient/"):
pass
def calculate_statistical_criteria(feature_importance=None, criteria="absolute_error",
path="../Results/IntegratedGradient/"):
file_name = "intgrad_" + criteria + ".csv"
feature_importance = feature_importance.as_matrix() # Convert to np.ndarray
statistical_criteria = np.zeros(shape=(feature_importance.shape[1], 1))
if criteria == "absolute_error":
num_features = feature_importance.shape[1]
statistical_criteria = np.array([[np.max(feature_importance[:, i]) - np.min(
feature_importance[:, i])] for i in range(num_features)])
elif criteria == "relative_error":
statistical_criteria = np.array([[(np.max(feature_importance[:, i]) - np.min(
feature_importance[:, i])) / (np.max(feature_importance[:, i]))] for i in
range(feature_importance.shape[1])])
np.savetxt(fname=path + file_name,
X=statistical_criteria, delimiter=",")
def plot_statistical_criteria(criteria="absolute_error", data_path="../Results/IntegratedGradient/",
save_path="../Results/IntegratedGradient/"):
data_path = data_path + "intgrad_" + criteria + ".csv"
save_path = save_path + "intgrad_" + criteria + ".png"
statistical_criteria = pd.read_csv(data_path, header=None).as_matrix()
import seaborn as sns
sns.distplot(statistical_criteria)
if criteria == "absolute_error":
plt.xlabel("Absolute Error")
plt.title("Distribution of Absolute Error")
elif criteria == "relative_error":
plt.xlabel("Relative Error")
plt.title("Distribution of Relative Error")
plt.ylabel("Density")
plt.savefig(save_path)
plt.close()
def make_summary_data(feature_importance, path="../Results/IntegratedGradient/"):
file_name = "summaries.csv"
feature_importance = feature_importance
num_features = feature_importance.shape[1]
all_describtions = np.zeros(
shape=(num_features, 4)) # mean - std - min - max
for i in range(num_features):
describtion = feature_importance[i].describe()
describtion = describtion.iloc[[1, 2, 3, 7]].as_matrix()
all_describtions[i] = describtion.T
print(all_describtions.shape)
np.savetxt(fname=path + file_name, X=all_describtions, delimiter=',')
def compute_integrated_gradient(machine="damavand", save_path="../Results/IntegratedGradient/", verbose=1):
file_name = "integrated_gradient.csv"
if machine == "damavand":
mrna_address = "~/f/Behrooz/dataset_local/fpkm_normalized.csv"
else:
mrna_address = "../Data/fpkm_normalized.csv"
m_rna = pd.read_csv(mrna_address, header=None)
model = keras.models.load_model("../Results/classifier.h5")
ig = integrated_gradients(model, verbose=verbose)
num_samples = m_rna.shape[0]
num_features = m_rna.shape[1]
feature_importances = np.zeros(shape=(num_samples, num_features))
for i in range(num_samples):
feature_importances[i] = ig.explain(m_rna.as_matrix()[i, :])
if verbose == 1:
sys.stdout.flush()
sys.stdout.write('\r')
sys.stdout.write("Progress: " + str((i / 10787) * 100) + " %")
sys.stdout.flush()
if verbose == 1:
sys.stdout.flush()
sys.stdout.write('\r')
sys.stdout.write("Progress: " + str((10787 / 10787) * 100) + " %")
sys.stdout.flush()
np.savetxt(fname="../Results/IntegratedGradient/integrated_gradients.csv",
X=np.array(feature_importances), delimiter=',')
return
|
pd.DataFrame(feature_importances)
|
pandas.DataFrame
|
import os, sys, math, random, time
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import pickle as pkl
import scipy.sparse as sp
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable
from tqdm import tqdm
import xclib.evaluation.xc_metrics as xc_metrics
import xclib.data.data_utils as data_utils
def ToD(batch, device):
if isinstance(batch, torch.Tensor):
batch = batch.to(device)
if isinstance(batch, Dict):
for outkey in batch:
if isinstance(batch[outkey], torch.Tensor):
batch[outkey] = batch[outkey].to(device)
if isinstance(batch[outkey], Dict):
for inkey in batch[outkey]:
if isinstance(batch[outkey][inkey], torch.Tensor):
batch[outkey][inkey] = batch[outkey][inkey].to(device)
return batch
def csr_to_pad_tensor(spmat, pad):
inds_tensor = torch.LongTensor(spmat.indices)
data_tensor = torch.LongTensor(spmat.data)
return {'inds': torch.nn.utils.rnn.pad_sequence([inds_tensor[spmat.indptr[i]:spmat.indptr[i+1]] for i in range(spmat.shape[0])], batch_first=True, padding_value=pad),
'vals': torch.nn.utils.rnn.pad_sequence([data_tensor[spmat.indptr[i]:spmat.indptr[i+1]] for i in range(spmat.shape[0])], batch_first=True, padding_value=0.0)}
def read_sparse_mat(filename, use_xclib=True):
if use_xclib:
return xclib.data.data_utils.read_sparse_file(filename)
else:
with open(filename) as f:
nr, nc = map(int, f.readline().split(' '))
data = []; indices = []; indptr = [0]
for line in tqdm(f):
if len(line) > 1:
row = [x.split(':') for x in line.split()]
tempindices, tempdata = list(zip(*row))
indices.extend(list(map(int, tempindices)))
data.extend(list(map(float, tempdata)))
indptr.append(indptr[-1]+len(tempdata))
else:
indptr.append(indptr[-1])
score_mat = sp.csr_matrix((data, indices, indptr), (nr, nc))
del data, indices, indptr
return score_mat
from xclib.utils.sparse import rank as sp_rank
def _topk(rank_mat, K, inplace=False):
topk_mat = rank_mat if inplace else rank_mat.copy()
topk_mat.data[topk_mat.data > K] = 0
topk_mat.eliminate_zeros()
return topk_mat
def Recall(rank_intrsxn_mat, true_mat, K=[1,3,5,10,20,50,100]):
K = sorted(K, reverse=True)
topk_intrsxn_mat = rank_intrsxn_mat.copy()
res = {}
for k in K:
topk_intrsxn_mat = _topk(topk_intrsxn_mat, k, inplace=True)
res[k] = (topk_intrsxn_mat.getnnz(1)/true_mat.getnnz(1)).mean()*100.0
return res
def MRR(rank_intrsxn_mat, true_mat, K=[1,3,5,10,20,50,100]):
K = sorted(K, reverse=True)
topk_intrsxn_mat = _topk(rank_intrsxn_mat, K[0], inplace=True)
rr_topk_intrsxn_mat = topk_intrsxn_mat.copy()
rr_topk_intrsxn_mat.data = 1/rr_topk_intrsxn_mat.data
max_rr = rr_topk_intrsxn_mat.max(axis=1).toarray().ravel()
res = {}
for k in K:
max_rr[max_rr < 1/k] = 0.0
res[k] = max_rr.mean()*100
return res
def XCMetrics(score_mat, X_Y, inv_prop, disp = True, fname = None, method = 'Method'):
X_Y = X_Y.tocsr().astype(np.bool_)
acc = xc_metrics.Metrics(X_Y, inv_prop)
xc_eval_metrics = np.array(acc.eval(score_mat, 5))*100
xc_eval_metrics =
|
pd.DataFrame(xc_eval_metrics)
|
pandas.DataFrame
|
import cPickle as pickle
import numpy as np
import pandas as pd
import functools
from scoop import futures
from scipy.interpolate import griddata
from scipy.signal import convolve2d
from sklearn.metrics import average_precision_score, roc_auc_score, precision_recall_curve
def calculate_hessian(model, data, step_size):
"""
Computes the mixed derivative using finite differences mathod
:param model: The imported model module
:param data: The sampled data in structured form
:param step_size: The dx time step taken between each
:returns: mixed derivative
"""
hessian = pd.DataFrame(0, index = np.arange(data.shape[0]), columns=pd.MultiIndex.from_product([model.output_names, model.perturbation_feature_pairs + model.feature_names], names=['model.output_names','model.feature_pairs']))
for output_name in model.output_names:
hessian_calculation_helpers = create_hessian_calculation_columns(model, output_name)
mixed_derivative = (data.loc[:, hessian_calculation_helpers[0]].values - data.loc[:, hessian_calculation_helpers[1]].values - data.loc[:, hessian_calculation_helpers[2]].values + data.loc[:, hessian_calculation_helpers[3]].values) / (step_size * step_size)
mixed_derivative *= np.sign(data.loc[:, hessian_calculation_helpers[1]].values + data.loc[:, hessian_calculation_helpers[2]].values - 2 * data.loc[:, hessian_calculation_helpers[0]].values)
hessian.loc[:, zip([output_name] * len(model.perturbation_feature_pairs), model.perturbation_feature_pairs)] = mixed_derivative
hessian.loc[:, zip([output_name] * len(model.feature_names), model.feature_names)] = np.array([(data.loc[:, (output_name,f)] - data.loc[:, (output_name,'core')]) / (step_size) for f in model.feature_names]).T
return hessian
def create_hessian_calculation_columns(model, output_name):
hessian_calculation_helpers = []
hessian_calculation_helpers.append([(output_name, 'core') for p in range(len(model.perturbation_feature_pairs))])
hessian_calculation_helpers.append([(output_name, p[:p.find(' ')]) for p in model.perturbation_feature_pairs])
hessian_calculation_helpers.append([(output_name, p[p.find(' and ') + 5:]) for p in model.perturbation_feature_pairs])
hessian_calculation_helpers.append([(output_name, p) for p in model.perturbation_feature_pairs])
return hessian_calculation_helpers
def max_filter_activation(matrix, filter_size):
kernel = np.ones((filter_size,filter_size)) / np.power(filter_size, 2)
out = convolve2d(matrix, kernel, mode='valid')
return out.max()
def get_max_filters(matrix, num_filters = 100, threshold = 3):
matrix_size = matrix.shape[0]
filter_sizes = np.linspace(5, matrix_size, num_filters).astype(int)
filter_results = list(futures.map(functools.partial(max_filter_activation, np.abs(matrix)), filter_sizes))
if len(np.where(np.array(filter_results) >= threshold)[0]) == 0:
return -1
else:
return np.where(np.array(filter_results) < threshold)[0][0] - 1
def create_interaction_map(model, hessian, core_feature_vectors, output_name, method, pair):
first_feature = pair[:pair.find(' ')]
second_feature = pair[pair.find(' and ') + 5:]
coordinates = core_feature_vectors.loc[:, (first_feature, second_feature)].values * 99
grid_x, grid_y = np.mgrid[0:100:(100j), 0:100:(100j)]
if len(hessian) == 1:
values = hessian.loc[:,(output_name, pair)]
else:
values = hessian.loc[:,(output_name, pair)] / hessian.loc[:, zip([output_name] * len(model.normalization_feature_pairs), model.normalization_feature_pairs)].values.std()
grid_z0 = griddata(coordinates, values.values, (grid_x, grid_y), method=method, fill_value=0)
return(grid_z0)
def rank_samples_in_pair(model, centers, magnitudes, dimensions, interaction_map_and_pair):
interaction_map, pair = interaction_map_and_pair
first_feature = pair[:pair.find(' ')]
second_feature = pair[pair.find(' and ') + 5:]
grid_x, grid_y = np.mgrid[0:1:(100j), 0:1:(100j)]
y_true = np.zeros(interaction_map.shape)
for dim_ind in range(len(dimensions)):
if (model.feature_names.index(first_feature) in dimensions[dim_ind]) and (model.feature_names.index(second_feature) in dimensions[dim_ind]):
first_v = np.where(dimensions[dim_ind] == model.feature_names.index(first_feature))[0]
second_v = np.where(dimensions[dim_ind] == model.feature_names.index(second_feature))[0]
y_true += (np.array((np.power(magnitudes[dim_ind][first_v] * (grid_x - centers[dim_ind][first_v]), 2) + np.power(magnitudes[dim_ind][second_v] * (grid_y - centers[dim_ind][second_v]), 2))) < 5.9).astype(int)
return(interaction_map.flatten(), np.clip(y_true,0,1).flatten())
def measure_local_accuracy(model, number_of_core_samples, step_size, name, output_path):
"""
Computes the mixed derivative for each sample, using finite differences mathod
:param model: The imported model module
:param data: The sampled data in structured form
:param step_size: The dx time step taken between each
:returns: hessian matrix, with the core sample index as rows and feature pair as column name
"""
feature_vectors = pd.DataFrame(np.load('{}/feature_vectors_{}_{}_{}.npy'.format(output_path, number_of_core_samples, step_size, name)), index = np.arange(number_of_core_samples), columns=pd.MultiIndex.from_product([model.perturbation_status_columns, model.feature_names], names=['perturbation_status','features']))
outputs = pd.DataFrame(np.load('{}/outputs_{}_{}_{}.npy'.format(output_path, number_of_core_samples, step_size, name)), index = np.arange(number_of_core_samples), columns=pd.MultiIndex.from_product([model.output_names, model.perturbation_status_columns_output], names=['outputs','perturbation_status']))
hessian = calculate_hessian(model, outputs, step_size)
(centers, magnitudes, dimensions) = model.get_local_ground_truth(output_path,number_of_core_samples, step_size, name)
core_feature_vectors = feature_vectors.loc[:, 'core']
output_name = model.output_names[0]
interaction_maps = list(futures.map(functools.partial(create_interaction_map, model, hessian, core_feature_vectors, output_name, 'nearest'), model.feature_pairs))
local_ranking = list(futures.map(functools.partial(rank_samples_in_pair, model, centers, magnitudes, dimensions), zip(interaction_maps, model.feature_pairs)))
ranking = np.concatenate(np.array(local_ranking), axis=1)
accuracies = average_precision_score(ranking[1,:], np.abs(ranking[0,:]))
ROCs = np.array(precision_recall_curve(ranking[1,:], np.abs(ranking[0,:])))
pickle.dump(obj = accuracies, file = open('{}/local_accuracies_{}_{}_{}.pickle'.format(output_path,number_of_core_samples, step_size, name),'wb'))
pickle.dump(obj = ROCs, file = open('{}/local_ROCs_{}_{}_{}.pickle'.format(output_path,number_of_core_samples, step_size, name),'wb'))
return accuracies
def denoise_hessian(hessian):
"""
Rectifies the uppermost and bottommost 0.1% of the hessian to remove noises
"""
new_hessian = hessian.copy()
s = new_hessian.values.shape
c = new_hessian.columns
new_hessian = new_hessian.values.flatten()
new_hessian[np.argsort(new_hessian.flatten())[int(len(new_hessian.flatten()) * 0.999):]] = np.sign(new_hessian[np.argsort(new_hessian.flatten())[int(len(new_hessian.flatten()) * 0.999):]]) * np.abs(new_hessian.flatten()[np.argsort(new_hessian.flatten())][int(len(new_hessian.flatten()) * 0.999)])
new_hessian[np.argsort(new_hessian.flatten())[::-1][int(len(new_hessian.flatten()) * 0.999):]] = np.sign(new_hessian[np.argsort(new_hessian.flatten())[::-1][int(len(new_hessian.flatten()) * 0.999):]]) * np.abs(new_hessian.flatten()[np.argsort(new_hessian.flatten())][::-1][int(len(new_hessian.flatten()) * 0.999)])
return pd.DataFrame(new_hessian.reshape(s), columns = c)
def normalize_outputs(model, outputs):
new_outputs = outputs.copy()
for output_name in model.output_names:
if (outputs.loc[:, output_name].max().max() == outputs.loc[:, output_name].min().min()):
new_outputs.loc[:, output_name] = outputs.loc[:, output_name].values
else:
new_outputs.loc[:, output_name] = ((new_outputs.loc[:, output_name] - new_outputs.loc[:, output_name].min().min()) / np.abs((new_outputs.loc[:, output_name].max().max() - new_outputs.loc[:, output_name].min().min()))).values
return new_outputs
def normalize_inputs(model, feature_vectors):
new_feature_vectors = feature_vectors.copy()
for feature in model.feature_names:
if (new_feature_vectors.loc[:, (feature)].max().max() == new_feature_vectors.loc[:, (feature)].min().min()):
new_feature_vectors.loc[:, (feature)] = new_feature_vectors.loc[:, (feature)]
else:
new_feature_vectors.loc[:, (feature)] = ((new_feature_vectors.loc[:, (feature)] - new_feature_vectors.loc[:, (feature)].min()) / (new_feature_vectors.loc[:, (feature)].max() - new_feature_vectors.loc[:, (feature)].min())).values
return new_feature_vectors
def plot_interaction_map(model, name, matrix, output_name, first_variable, second_variable, x_coord, y_coord, output_path):
"""
Plots a map of the parameter space for two input parameters, with the areas with more nonlinearity colored white
:param ax: The axes on which to plot
:param args: The arguments for the plot -
The matrix to plot,
the name of the first variable
The name of the second variable,
The name of the first variable, as a key to the parameter limits dictionary
The name of the second variable, as a key to the parameter limits dictionary
the x coordinate of the sample being studied
the y coordinate of the sample being studied
:returns: The axes with the plotted sample
"""
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
font = {'size' : 14}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(5,5))
ax = plt.subplot()
maxValue = np.max(np.abs(matrix))
img = ax.imshow((matrix), cmap = cm.bwr, origin='lower', vmin = -min(maxValue, 6), vmax = min(maxValue, 6), interpolation='spline16')
first_variable = '{}'.format(first_variable)
second_variable = '{}'.format(second_variable)
ax.set_ylabel(r'$x_i$ = ' + first_variable)
ax.set_xlabel(r'$y_i$ = ' + second_variable)
ax.axes.set_xticks([0, 50, 99])
ax.axes.set_yticks([0, 50, 99])
xticks = np.linspace(np.array(model.feature_limits[first_variable]).min(), np.array(model.feature_limits[first_variable]).max(), 3)
yticks = np.linspace(np.array(model.feature_limits[second_variable]).min(), np.array(model.feature_limits[second_variable]).max(), 3)
ax.scatter([x_coord], [y_coord], marker='o', color='white', s = 250, edgecolors='black', linewidth=3)
ax.set_yticklabels([xticks[tind] for tind in range(3)])
ax.set_xticklabels([yticks[tind] for tind in range(3)])
ax.axis([0, (100) - 1, 0, (100) - 1])
# ax.scatter([x_coord_linear], [y_coord_linear], marker='o', color='blue', s = 250, edgecolors='black', linewidth=3)
t = ax.set_title(r'$\mathregular{\frac{\delta ^2 F(\bar{x})}{\delta x_i \delta x_j}}$')
# t = ax.set_title('{} and {} - '.format(first_variable, second_variable) + r'$\mathregular{\frac{\delta ^2 F(\bar{x})}{\delta x_i \delta x_j}}$')
t.set_position([.5, 1.025])
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cb = plt.colorbar(img, cax=cax)
cb.set_label("Nomralized mixed derivative", rotation=90)
plt.savefig('{}/{}_{}_{}_{}_nonlinear_map.pdf'.format(output_path, name, output_name, first_variable, second_variable), transparent=True, bbox_inches='tight', format='pdf', dpi=600)
# plt.close('all')
def rank_local(model, number_of_core_samples, step_size, name, threshold, output_path, top_k_to_plot):
"""
Computes the mixed derivative for each sample, using finite differences mathod
:param model: The imported model module
:param data: The sampled data in structured form
:param step_size: The dx time step taken between each
:returns: hessian matrix, with the core sample index as rows and feature pair as column name
"""
feature_vectors = pd.DataFrame(np.load('{}/feature_vectors_{}_{}_{}.npy'.format(output_path, number_of_core_samples, step_size, name)), index = np.arange(number_of_core_samples), columns=pd.MultiIndex.from_product([model.perturbation_status_columns, model.feature_names], names=['perturbation_status','features']))
core_feature_vectors = feature_vectors.loc[:, 'core'].copy()
core_feature_vectors = normalize_inputs(model, core_feature_vectors)
raw_outputs = pd.DataFrame(np.load('{}/outputs_{}_{}_{}.npy'.format(output_path, number_of_core_samples, step_size, name)), index = np.arange(number_of_core_samples), columns=pd.MultiIndex.from_product([model.output_names, model.perturbation_status_columns_output], names=['outputs','perturbation_status']))
outputs = normalize_outputs(model,
|
pd.DataFrame(raw_outputs)
|
pandas.DataFrame
|
import os
import pandas
import logging
import datetime
import psycopg2
import functools
from dotenv import load_dotenv
from .utils import classproperty
import urllib.request, urllib.error
logger = logging.getLogger(__name__)
if not hasattr(functools, 'cache'):
# Function below is copied straight
# from Python 3.9 GitHub
# Reference: https://github.com/python/cpython/blob/3.9/Lib/functools.py#L650
def functools_cache(user_function): return functools.lru_cache(maxsize=None)(user_function)
functools.cache = functools_cache
load_dotenv()
class values:
@classproperty
def raw_data(cls):
try:
return urllib.request.urlopen(os.environ['bday_data_URL']).read().decode('UTF-8')
except (urllib.error.URLError, urllib.error.HTTPError):
logger.critical("Failed to access the raw data from drneato.com")
raise
except KeyError:
logger.critical("Failed to access environment variable 'bday_data_URL'")
raise
@classproperty
def bday_df(cls):
if not hasattr(cls, 'og_raw_data'):
cls.og_raw_data = cls.raw_data
if cls.og_raw_data != cls.raw_data or not hasattr(cls, 'og_bday_df'):
cls.og_raw_data = cls.raw_data
data_dict = dict(((column_name, []) for column_name in ['PeriodNumber',
'Birthdate',
'Birthyear',
'Radio',
'Question#1',
'Question#2',
'Question#3',
'StuID']))
# Eight rows before it's someone elses data
keys = list(data_dict.keys())
for index, attr in enumerate(cls.raw_data.splitlines()):
index %= 8
if index == 0:
continue
elif index == 2:
unparsed_date = attr.split('-')
try:
data_dict[keys[index - 1]].append(
datetime.date(
*map(int, unparsed_date)
).replace(year=datetime.date.today().year)
)
data_dict[keys[index]].append(int(unparsed_date[0]))
except (ValueError, TypeError):
data_dict[keys[index - 1]].append(None)
data_dict[keys[index]].append(0)
else:
try:
attr = int(attr)
except ValueError:
pass
data_dict[keys[index - 1 if index == 1 else index]].append(attr)
logger.info('Sucessfully parsed the raw data from drneato.com')
bday_df = pandas.concat([
pandas.DataFrame(data_dict),
pandas.DataFrame({
'PeriodNumber': [-1],
'Birthdate': [datetime.date.today().replace(month=11, day=15)],
'Birthyear': [0], # Use 0 since we don't know her birthyear
'Radio': [None],
'Question #1': [None],
'Question #2': [None],
'Question #3': [None],
'StuID': [1]
})
])
bday_df['Birthdate'] = pandas.to_datetime(bday_df['Birthdate'])
student_df = cls.student_data_df
bday_df = bday_df[bday_df['StuID'].isin(student_df['stuid'])]
bday_df.drop_duplicates(['StuID'], inplace=True)
bday_df['StuID'] = pandas.to_numeric(bday_df['StuID'])
bday_df.set_index('StuID', inplace=True)
student_df = student_df.set_index('stuid')
columns = ['AddrLine1', 'AddrLine2', 'City', 'State', 'Zipcode', 'FirstName', 'LastName']
bday_df[columns] = student_df[map(lambda text: text.lower(), columns)]
bday_df = bday_df[['FirstName', 'LastName'] + list(bday_df.columns)[:-2]]
logger.info("Sucessfully created and modified the 'bday_df' DataFrame")
cls.og_bday_df = bday_df
else:
bday_df = cls.og_bday_df
if bday_df.iloc[0]['Birthdate'].year != datetime.date.today().year:
bday_df['Birthdate'] = bday_df['Birthdate'].transform(lambda date: date.replace(year=datetime.date.today().year))
cls.og_bday_df = bday_df
def timedelta_today(date):
if hasattr(date, 'to_pydatetime'):
date = date.to_pydatetime()
if hasattr(date, 'date'):
date = date.date()
delta = date - datetime.date.today()
if delta == datetime.timedelta(days=-365):
# Condition for edge case with bdays
# on Jan 1st
return datetime.timedelta(days=1)
return delta if delta >= datetime.timedelta() else delta + datetime.timedelta(days=365)
bday_df['Timedelta'] = bday_df['Birthdate'].transform(timedelta_today)
return bday_df.sort_values(['Timedelta', 'LastName', 'FirstName'])
@classproperty
@functools.cache
def student_data_df(cls):
# NOTE: With the current implementation we are storing
# A LOT of data in the background, due to the shear size of the
# student_data table (it has over 3,000 row). This may or may not
# be worth it depending on how often this data is utilized
# Might want to not store if this variable is not called very often
# INFO: According the `.info()` method for dataframes, the dataframe
# takes up roughly 233.1KB of memory
try:
temp_connection = psycopg2.connect(dbname='botsdb')
except psycopg2.OperationalError:
temp_connection = psycopg2.connect(
dbname='botsdb',
host=os.environ['host'],
user=os.environ['dbuser'],
password=os.environ['password']
)
# NOTE: For some reason using the table name only causes
# a syntax error even though in the documentation table names
# are supported. It might be because we are using an unsupported
# DBAPI
df =
|
pandas.read_sql('SELECT * FROM student_data', temp_connection)
|
pandas.read_sql
|
# ,---------------------------------------------------------------------------,
# | This module is part of the krangpower electrical distribution simulation |
# | suit by <NAME> <<EMAIL>> et al. |
# | Please refer to the license file published together with this code. |
# | All rights not explicitly granted by the license are reserved. |
# '---------------------------------------------------------------------------'
# OpendssdirectEnhancer by <NAME>
# a wrapper for opendssdirect.py by <NAME> and <NAME>
import logging
import types
from copy import deepcopy as _deepcopy
from functools import reduce as _reduce, partial as _partial
from inspect import getmembers as _inspect_getmembers
from json import load as _json_load
from logging import DEBUG as _DBG_LVL
from math import sqrt as _sqrt
from operator import getitem as _getitem, attrgetter as _attrgetter
from re import compile as _re_compile
from re import sub as _sub
from sys import modules as _sys_modules
import numpy as _np
import opendssdirect as _odr
from pandas import DataFrame as _DataFrame
from ._stdout_hijack import stdout_redirected, NullCm
from .._aux_fcn import lower as _lower
from .._aux_fcn import pairwise as _pairwise
from .._components import LineGeometry
from .._components import _resolve_unit, _type_recovery, _odssrep, matricize_str
from .._components import get_classmap as _get_classmap
from .._config_loader import DEFAULT_ENH_NAME, UNIT_MEASUREMENT_PATH, TREATMENTS_PATH, ERROR_STRINGS, DANGEROUS_STACKS, \
UM as _UM, INTERFACE_METHODS_PATH, DEFAULT_COMP as _DEFAULT_COMP, PINT_QTY_TYPE, INTERF_SELECTORS_PATH, \
C_FORCE_UNSAFE_CALLS as _FORCE_UNSAFE_CALLS
from .._exceptions import UnsolvedCircuitError
from .._logging_init import clog, mlog, bclog, get_log_level
try:
assert _odr.Basic.Start(0)
except TypeError:
# retrocompatibility with OpenDSSDirect.py <0.3
assert _odr.Basic.Start()
# <editor-fold desc="Auxiliary functions">
def _assign_unit(item, unit: type(_UM.m) or None):
if unit is None:
return item
elif isinstance(item, dict):
return {k: v * unit for k, v in item.items()}
elif isinstance(item, _DataFrame):
# pandas' _DataFrame is a mess together with pint
return item
# elif hasattr(item, '__iter__'):
# # return [el * unit for el in item]
# return _asarray(item) * unit
else:
return item * unit
def _asarray(item):
return _np.asarray(item)
def _couplearray(item):
return _np.array(item[0::2]) + _np.array(item[1::2]) * 1j
def _terminalize_cpx(item):
# note that, when I pass an item to terminalize, I am taking for granted that I can find nterm and ncond in the
# respective calls to odr. If you called odr.CktElement.Powers(), for example, I take it that you knew what
# you were doing. Calls coming from PackedElements, instead, should perform the cktelement selection just before
# the call.
nterm = _odr.CktElement.NumTerminals()
ncond = _odr.CktElement.NumConductors()
assert len(item) == nterm * ncond * 2
cpxr = _np.zeros([nterm, ncond], dtype=_np.complex)
for idx, couple in enumerate(_pairwise(item)):
real = couple[0]
imag = couple[1]
cpxr[int(idx / ncond), (idx % ncond)] = _np.sum([_np.multiply(1j, imag), real], axis=0)
return cpxr
def _terminalize_int(item):
nterm = _odr.CktElement.NumTerminals()
ncond = _odr.CktElement.NumConductors()
assert len(item) == nterm * ncond * 1
int_r = _np.zeros([nterm, ncond], dtype=_np.int)
for idx, element in enumerate(item):
int_r[int(idx / ncond), (idx % ncond)] = element
return int_r
def _cpx(item):
return item[0] + 1j*item[1]
def _dictionize_cktbus(item):
return dict(zip(_lower(_odr.Circuit.AllBusNames()), item))
def _dictionize_cktels(item):
return dict(zip(_lower(_odr.Circuit.AllElementNames()), item))
def _dictionize_cktnodes(item):
return dict(zip(_lower(_odr.Circuit.YNodeOrder()), item))
def _matricize_ybus(item):
raw_n_ord = _lower(_odr.Circuit.YNodeOrder())
mtx = _matricize(item)
return
|
_DataFrame(data=mtx, index=raw_n_ord, columns=raw_n_ord)
|
pandas.DataFrame
|
import asyncio
import io
import os
import random
import shutil
from collections import defaultdict
import pandas as pd
import pytest
pa = pytest.importorskip("pyarrow")
import dask
import dask.dataframe as dd
from dask.distributed import Worker
from dask.utils import stringify
from distributed.shuffle.shuffle_extension import (
dump_batch,
list_of_buffers_to_table,
load_arrow,
split_by_partition,
split_by_worker,
)
from distributed.utils_test import gen_cluster
def clean_worker(worker):
"""Assert that the worker has no shuffle state"""
assert not worker.extensions["shuffle"].shuffles
for dirpath, dirnames, filenames in os.walk(worker.local_directory):
assert "shuffle" not in dirpath
for fn in dirnames + filenames:
assert "shuffle" not in fn
def clean_scheduler(scheduler):
"""Assert that the scheduler has no shuffle state"""
assert not scheduler.extensions["shuffle"].worker_for
assert not scheduler.extensions["shuffle"].heartbeats
assert not scheduler.extensions["shuffle"].schemas
assert not scheduler.extensions["shuffle"].columns
assert not scheduler.extensions["shuffle"].output_workers
assert not scheduler.extensions["shuffle"].completed_workers
@gen_cluster(client=True)
async def test_basic(c, s, a, b):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-01-10",
dtypes={"x": float, "y": float},
freq="10 s",
)
out = dd.shuffle.shuffle(df, "x", shuffle="p2p")
x, y = c.compute([df.x.size, out.x.size])
x = await x
y = await y
assert x == y
clean_worker(a)
clean_worker(b)
clean_scheduler(s)
@gen_cluster(client=True)
async def test_concurrent(c, s, a, b):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-01-10",
dtypes={"x": float, "y": float},
freq="10 s",
)
x = dd.shuffle.shuffle(df, "x", shuffle="p2p")
y = dd.shuffle.shuffle(df, "y", shuffle="p2p")
x, y = c.compute([x.x.size, y.y.size])
x = await x
y = await y
assert x == y
clean_worker(a)
clean_worker(b)
clean_scheduler(s)
@gen_cluster(client=True)
async def test_bad_disk(c, s, a, b):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-01-10",
dtypes={"x": float, "y": float},
freq="10 s",
)
out = dd.shuffle.shuffle(df, "x", shuffle="p2p")
out = out.persist()
while not a.extensions["shuffle"].shuffles:
await asyncio.sleep(0.01)
shutil.rmtree(a.local_directory)
while not b.extensions["shuffle"].shuffles:
await asyncio.sleep(0.01)
shutil.rmtree(b.local_directory)
with pytest.raises(FileNotFoundError) as e:
out = await c.compute(out)
assert os.path.split(a.local_directory)[-1] in str(e.value) or os.path.split(
b.local_directory
)[-1] in str(e.value)
# clean_worker(a) # TODO: clean up on exception
# clean_worker(b) # TODO: clean up on exception
# clean_scheduler(s)
@pytest.mark.xfail
@pytest.mark.slow
@gen_cluster(client=True)
async def test_crashed_worker(c, s, a, b):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-01-10",
dtypes={"x": float, "y": float},
freq="10 s",
)
out = dd.shuffle.shuffle(df, "x", shuffle="p2p")
out = out.persist()
while (
len(
[
ts
for ts in s.tasks.values()
if "shuffle_transfer" in ts.key and ts.state == "memory"
]
)
< 3
):
await asyncio.sleep(0.01)
await b.close()
with pytest.raises(Exception) as e:
out = await c.compute(out)
assert b.address in str(e.value)
# clean_worker(a) # TODO: clean up on exception
# clean_worker(b)
# clean_scheduler(s)
@gen_cluster(client=True)
async def test_heartbeat(c, s, a, b):
await a.heartbeat()
clean_scheduler(s)
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-01-10",
dtypes={"x": float, "y": float},
freq="10 s",
)
out = dd.shuffle.shuffle(df, "x", shuffle="p2p")
out = out.persist()
while not s.extensions["shuffle"].heartbeats:
await asyncio.sleep(0.001)
await a.heartbeat()
assert s.extensions["shuffle"].heartbeats.values()
await out
clean_worker(a)
clean_worker(b)
clean_scheduler(s)
def test_processing_chain():
"""
This is a serial version of the entire compute chain
In practice this takes place on many different workers.
Here we verify its accuracy in a single threaded situation.
"""
workers = ["a", "b", "c"]
npartitions = 5
df = pd.DataFrame({"x": range(100), "y": range(100)})
df["_partitions"] = df.x % npartitions
schema = pa.Schema.from_pandas(df)
worker_for = {i: random.choice(workers) for i in list(range(npartitions))}
worker_for = pd.Series(worker_for, name="_worker").astype("category")
data = split_by_worker(df, "_partitions", worker_for=worker_for)
assert set(data) == set(worker_for.cat.categories)
assert sum(map(len, data.values())) == len(df)
batches = {
worker: [b.serialize().to_pybytes() for b in t.to_batches()]
for worker, t in data.items()
}
# Typically we communicate to different workers at this stage
# We then receive them back and reconstute them
by_worker = {
worker: list_of_buffers_to_table(list_of_batches, schema)
for worker, list_of_batches in batches.items()
}
assert sum(map(len, by_worker.values())) == len(df)
# We split them again, and then dump them down to disk
splits_by_worker = {
worker: split_by_partition(t, "_partitions") for worker, t in by_worker.items()
}
splits_by_worker = {
worker: {
partition: [batch.serialize() for batch in t.to_batches()]
for partition, t in d.items()
}
for worker, d in splits_by_worker.items()
}
# No two workers share data from any partition
assert not any(
set(a) & set(b)
for w1, a in splits_by_worker.items()
for w2, b in splits_by_worker.items()
if w1 is not w2
)
# Our simple file system
filesystem = defaultdict(io.BytesIO)
for worker, partitions in splits_by_worker.items():
for partition, batches in partitions.items():
for batch in batches:
dump_batch(batch, filesystem[partition], schema)
out = {}
for k, bio in filesystem.items():
bio.seek(0)
out[k] = load_arrow(bio)
assert sum(map(len, out.values())) == len(df)
@gen_cluster(client=True)
async def test_head(c, s, a, b):
a_files = list(os.walk(a.local_directory))
b_files = list(os.walk(b.local_directory))
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-01-10",
dtypes={"x": float, "y": float},
freq="10 s",
)
out = dd.shuffle.shuffle(df, "x", shuffle="p2p")
out = await out.head(compute=False).persist() # Only ask for one key
assert list(os.walk(a.local_directory)) == a_files # cleaned up files?
assert list(os.walk(b.local_directory)) == b_files
clean_worker(a)
clean_worker(b)
clean_scheduler(s)
def test_split_by_worker():
workers = ["a", "b", "c"]
npartitions = 5
df = pd.DataFrame({"x": range(100), "y": range(100)})
df["_partitions"] = df.x % npartitions
worker_for = {i: random.choice(workers) for i in range(npartitions)}
s =
|
pd.Series(worker_for, name="_worker")
|
pandas.Series
|
import pandas as pd
import networkx as nx
import pytest
from kgextension.feature_selection import hill_climbing_filter, hierarchy_based_filter, tree_based_filter
from kgextension.generator import specific_relation_generator, direct_type_generator
class TestHillCLimbingFilter:
def test1_high_beta(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_expected.csv")
output_df = hill_climbing_filter(input_df, 'uri_bool_http://class', G= input_DG, beta=0.5, k=2)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test2_generator_data_low_beta(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
input_df = specific_relation_generator(
df, columns=['link'], hierarchy_relation='http://www.w3.org/2004/02/skos/core#broader')
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test2_expected.csv")
output_df = hill_climbing_filter(input_df, 'link_in_boolean_http://dbpedia.org/resource/Category:Prefectures_in_France', beta=0.05, k=3)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test3_nan(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_expected.csv")
output_df = hill_climbing_filter(input_df, 'class', G= input_DG, beta=0.5, k=2)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test4_callable_function(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
def fake_metric(df, class_col, param=5):
return 1/((df.sum(axis=1)*class_col).sum()/param)
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test4_expected.csv")
output_df = hill_climbing_filter(input_df, 'uri_bool_http://class', metric=fake_metric, G= input_DG, param=6)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test5_no_graph(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_input.csv")
with pytest.raises(RuntimeError) as excinfo:
_ = hill_climbing_filter(input_df, 'class', beta=0.5, k=2)
assert "df.attrs['hierarchy]" in str(excinfo.value)
class TestHierarchyBasedFilter():
def test1_no_pruning_info_gain_with_G(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test1_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", threshold=0.99, G=input_DG, metric="info_gain", pruning=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test2_no_pruning_correlation(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test2_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
output_df = hierarchy_based_filter(input_df, "link", threshold=0.99, G=input_DG, metric="correlation", pruning=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test3_pruning_info_gain_all_remove_True(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test3_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", G=input_DG, threshold=0.99, metric="info_gain", pruning=True)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test4_pruning_correlation_all_remove_True(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test4_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", G=input_DG, threshold=0.99, metric="correlation", pruning=True)
pd.testing.assert_frame_equal(output_df, expected_df, check_like = True)
def test5_pruning_info_gain_all_remove_False(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test5_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", G=input_DG, threshold=0.99, metric="info_gain", pruning=True, all_remove=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like = True)
def test6_pruning_correlation_all_remove_False(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test6_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", G=input_DG, threshold=0.99, metric="correlation", pruning=True,
all_remove=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like = True)
def test7_no_input_G(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test7_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
output_df = hierarchy_based_filter(input_df, "link", threshold=0.99, metric="correlation", pruning=True,
all_remove=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like = True)
def test8_nan(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
expected_df = pd.read_csv("test/data/feature_selection/hierarchy_based_test8_expected.csv")
output_df = hierarchy_based_filter(input_df, 'class', G=input_DG, threshold=0.99, metric="info_gain", pruning=True)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test9_callable_function(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
def fake_metric(df_from_hierarchy, l, d):
equivalence = df_from_hierarchy[l] == df_from_hierarchy[d]
return equivalence.sum()/len(equivalence)
expected_df =
|
pd.read_csv("test/data/feature_selection/hierarchy_based_test9_expected.csv")
|
pandas.read_csv
|
#Copyright 2019 NUS pathogen genomics
#Written by <NAME> (<EMAIL>)
import os
import sys
import gzip
import argparse
import pandas as pd
import statistics
import subprocess
from statistics import mode
from collections import Counter
#function to determine repeat number based on total number of mismatches in primer sequence
def chooseMode(name, table, CounterList):
maxcount = max(CounterList.values())
repeatToCheck = []
for k, v in CounterList.items():
if v == maxcount:
repeatToCheck.append(k)
x = 0
for i, j in table.items():
if name in i:
x += 1
mismatchDict = {}
for rp in repeatToCheck:
mismatchDict[rp] = 0
for i in range(x):
string = name + '_' + str(i+1)
if table[string][1] in repeatToCheck:
mismatchDict[table[string][1]] += table[string][0]
checklist2 = []
for m, n in mismatchDict.items():
checklist2.append(n)
duplicates = ''
for item in checklist2:
if checklist2.count(item) > 1:
duplicates = 'yes'
finalMode = ''
if duplicates == 'yes':
finalMode = '/'.join(str(r) for min_value in (min(mismatchDict.values()),) for r in mismatchDict if mismatchDict[r]==min_value)
else:
finalMode = min(mismatchDict.keys(), key=(lambda k: mismatchDict[k]))
return finalMode
'''
Main function
'''
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
MIRU_table = script_dir + "/MIRU_table"
MIRU_table_0580 = script_dir + "/MIRU_table_0580"
MIRU_primers = script_dir + "/MIRU_primers"
parser = argparse.ArgumentParser()
main_group = parser.add_argument_group('Main options')
main_group.add_argument('-r', '--reads', required=True, help='input reads file in fastq/fasta format (required)')
main_group.add_argument('-p', '--prefix', required=True, help='sample ID (required)')
main_group.add_argument('--table', type=str, default=MIRU_table, help='allele calling table')
main_group.add_argument('--primers', type=str, default=MIRU_primers, help='primers sequences')
optional_group = parser.add_argument_group('Optional options')
optional_group.add_argument('--amplicons', help='provide output from primersearch and summarize MIRU profile directly', action='store_true')
optional_group.add_argument('--details', help='for inspection', action='store_true')
optional_group.add_argument('--nofasta', help='delete the fasta reads file generated if your reads are in fastq format', action='store_true')
args = parser.parse_args()
if not os.path.exists(args.reads):
sys.exit('Error: ' + args.reads + ' is not found!')
sample_prefix = args.prefix
sample_dir = os.path.dirname(os.path.abspath(args.reads))
mismatch_allowed = 18
psearchOut = sample_dir + '/' + sample_prefix + '.' + str(mismatch_allowed) + '.primersearch.out'
df =
|
pd.read_csv(MIRU_table, sep='\t')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Downloads rfr and stores in sqlite database for future reference
"""
import datetime
import os
import zipfile
import pandas as pd
import urllib
from datetime import date
import logging
from solvency2_data.sqlite_handler import EiopaDB
from solvency2_data.util import get_config
from solvency2_data.rfr import read_spot, read_spreads, read_govies, read_meta
from solvency2_data.scraping import eiopa_link
def get_workspace() -> dict:
"""
Get the workspace for saving xl and the database
Args:
None
Returns:
dictionary with workspace data
"""
config = get_config().get("Directories")
path_db = config.get("db_folder")
database = os.path.join(path_db, "eiopa.db")
path_raw = config.get("raw_data")
return {"database": database, "raw_data": path_raw}
def download_file(url: str, raw_folder: str, filename: str = "") -> str:
"""
This function downloads a file and give it a name if not explicitly specified.
Args:
raw_folder: url of the file to download
filename: file path+name to give to the file to download
Returns:
name of the file
"""
if filename:
extension = url[(url.rfind(".")) :]
if extension not in filename:
filename = filename + extension
else:
pass
else:
# if filename not specified, then the file name will be the original file name
filename = url[(url.rfind("/") + 1) :]
target_file = os.path.join(raw_folder, filename)
if os.path.isfile(target_file):
logging.info("file already exists in this location, not downloading")
else:
if not os.path.exists(raw_folder):
os.makedirs(raw_folder)
urllib.request.urlretrieve(url, target_file) # simpler for file downloading
logging.info(
"file downloaded and saved in the following location: " + target_file
)
return target_file
def download_EIOPA_rates(url: str, ref_date: str) -> dict:
"""
Download and unzip the EIOPA files
Args:
url: url from which data is to be downloaded
ref_date: the reference date of the data
Returns:
dictionary with metadata
"""
workspace = get_workspace()
raw_folder = workspace["raw_data"]
zip_file = download_file(url, raw_folder)
# Change format of ref_date string for EIOPA Excel files from YYYY-mm-dd to YYYYmmdd:
reference_date = ref_date.replace('-', '')
name_excelfile = "EIOPA_RFR_" + reference_date + "_Term_Structures" + ".xlsx"
name_excelfile_spreads = "EIOPA_RFR_" + reference_date + "_PD_Cod" + ".xlsx"
with zipfile.ZipFile(zip_file) as zipobj:
zipobj.extract(name_excelfile, raw_folder)
zipobj.extract(name_excelfile_spreads, raw_folder)
return {
"rfr": os.path.join(raw_folder, name_excelfile),
"meta": os.path.join(raw_folder, name_excelfile),
"spreads": os.path.join(raw_folder, name_excelfile_spreads),
"govies": os.path.join(raw_folder, name_excelfile_spreads),
}
def extract_spot_rates(rfr_filepath: str) -> dict:
"""
Extract spot rates
Args:
rfr_filepath: path to Excel file with rfr data
Returns:
dictionary with metadata
"""
logging.info("Extracting spots: " + rfr_filepath)
# TODO: Complete this remap dictionary
currency_codes_and_regions = {
"EUR": "Euro",
"PLN": "Poland",
"CHF": "Switzerland",
"USD": "United States",
"GBP": "United Kingdom",
"NOK": "Norway",
"SEK": "Sweden",
"DKK": "Denmark",
"HRK": "Croatia",
}
currency_dict = dict((v, k) for k, v in currency_codes_and_regions.items())
xls = pd.ExcelFile(rfr_filepath, engine="openpyxl")
rates_tables = read_spot(xls)
rates_tables = pd.concat(rates_tables)
rates_tables = rates_tables.rename(columns=currency_dict)[currency_dict.values()]
label_remap = {
"RFR_spot_no_VA": "base",
"RFR_spot_with_VA": "va",
"Spot_NO_VA_shock_UP": "up",
"Spot_NO_VA_shock_DOWN": "down",
"Spot_WITH_VA_shock_UP": "va_up",
"Spot_WITH_VA_shock_DOWN": "va_down",
}
rates_tables = rates_tables.rename(label_remap)
rates_tables = rates_tables.stack().rename("spot")
rates_tables.index.names = ["scenario", "duration", "currency_code"]
rates_tables.index = rates_tables.index.reorder_levels([0, 2, 1])
rates_tables = rates_tables.sort_index()
return rates_tables
def extract_meta(rfr_filepath: str) -> dict:
"""
Extract spot rates
Args:
rfr_filepath: path to Excel file with rfr data
Returns:
dictionary with metadata
"""
logging.info("Extracting meta data :" + rfr_filepath)
meta = read_meta(rfr_filepath)
meta = pd.concat(meta).T
meta.columns = meta.columns.droplevel()
meta.index.name = "Country"
meta = meta.sort_index()
return meta
def extract_spreads(spread_filepath):
"""
Extract spreads data
Args:
spread_filepath: path to Excel file with spreads data
Returns:
dictionary with metadata
"""
logging.info("Extracting spreads: " + spread_filepath)
xls = pd.ExcelFile(spread_filepath, engine="openpyxl")
spreads = read_spreads(xls)
spreads_non_gov = pd.concat(
{
i: pd.concat(spreads[i])
for i in [
"financial fundamental spreads",
"non-financial fundamental spreads",
]
}
)
spreads_non_gov = spreads_non_gov.stack().rename("spread")
spreads_non_gov.index.names = ["type", "currency_code", "duration", "cc_step"]
spreads_non_gov.index = spreads_non_gov.index.reorder_levels([0, 1, 3, 2])
spreads_non_gov = spreads_non_gov.rename(
{
"financial fundamental spreads": "fin",
"non-financial fundamental spreads": "non_fin",
}
)
return spreads_non_gov
def extract_govies(govies_filepath):
"""
Extract govies data
Args:
govies_filepath: path to Excel file with govies data
Returns:
dictionary with metadata
"""
logging.info("Extracting govies: " + govies_filepath)
xls = pd.ExcelFile(govies_filepath, engine="openpyxl")
cache = read_govies(xls)
if cache["central government fundamental spreads"] is not None:
spreads_gov = (
cache["central government fundamental spreads"]
.stack()
.rename("spread")
.to_frame()
)
spreads_gov.index.names = ["duration", "country_code"]
spreads_gov.index = spreads_gov.index.reorder_levels([1, 0])
else:
logging.error("No govies found: " + govies_filepath)
spreads_gov = None
return spreads_gov
def extract_sym_adj(sym_adj_filepath: str, ref_date: str) -> pd.DataFrame:
"""
Extract symmetric adjustment
Args:
sym_adj_filepath: path to Excel file with symmetric adjustment
Returns:
DataFrame with symmetric adjustment data
"""
df = pd.read_excel(
sym_adj_filepath,
sheet_name="Symmetric_adjustment",
usecols="E, K",
nrows=1,
skiprows=7,
header=None,
squeeze=True,
names=["ref_date", "sym_adj"],
)
input_ref = ref_date
ref_check = df.at[0, "ref_date"].strftime("%Y-%m-%d")
if input_ref != ref_check:
logging.warning("Date mismatch in sym_adj file: " + sym_adj_filepath)
logging.warning(
"Try opening this file and setting the date correctly then save and close, and rerun."
)
return None
else:
df = df.set_index("ref_date")
return df
def add_to_db(ref_date: str, db: EiopaDB, data_type: str = "rfr"):
"""
Call this if a set is missing
Args:
ref_date: reference date
db: database to be used
data_type: type of the dataset to be added
Returns:
None
"""
url = eiopa_link(ref_date, data_type=data_type)
set_id = db.get_set_id(url)
if data_type != "sym_adj":
files = download_EIOPA_rates(url, ref_date)
if data_type == "rfr":
df = extract_spot_rates(files[data_type])
elif data_type == "meta":
df = extract_meta(files[data_type])
elif data_type == "spreads":
df = extract_spreads(files[data_type])
elif data_type == "govies":
df = extract_govies(files[data_type])
else:
raise KeyError
elif data_type == "sym_adj":
workspace = get_workspace()
raw_folder = workspace["raw_data"]
file = download_file(url, raw_folder)
df = extract_sym_adj(file, ref_date)
if df is not None:
df = df.reset_index()
df["url_id"] = set_id
df["ref_date"] = ref_date
df.to_sql(data_type, con=db.conn, if_exists="append", index=False)
set_types = {"govies": "rfr", "spreads": "rfr", "meta": "rfr"}
db.update_catalog(
url_id=set_id,
dict_vals={
"set_type": set_types.get(data_type, data_type),
"primary_set": True,
"ref_date": ref_date,
},
)
return None
def validate_date_string(ref_date):
"""
This function just converts the input date to a string YYYY-mm-dd for use in SQL
e.g.
from datetime import date
ref_date = validate_date_string(date(2021,12,31))
ref_date = validate_date_string('2021-12-31')
Both return the same result
"""
if type(ref_date) == datetime.date:
return ref_date.strftime('%Y-%m-%d')
elif type(ref_date) == str:
try:
return datetime.datetime.strptime(ref_date, "%Y-%m-%d").strftime("%Y-%m-%d")
except (TypeError, ValueError):
logging.warning("Date type not recognised. Try datetime.date or YYYY-mm-dd")
return None
else:
return None
def get(ref_date: str, data_type: str = "rfr"):
"""
Main API function
Args:
ref_date: reference date. Can be string or datetime.date. If passed as datetime.date this is converted for use elsewhere.
data_type: type of the required dataset
Returns:
DataFrame with data if not empty dataset, otherwise None
"""
# Validate the provided ref_date:
ref_date = validate_date_string(ref_date)
# Check if DB exists, if not, create it:
workspace = get_workspace()
database = workspace["database"]
db = EiopaDB(database)
sql_map = {
"rfr": "SELECT * FROM rfr WHERE ref_date = '" + ref_date + "'",
"meta": "SELECT * FROM meta WHERE ref_date = '" + ref_date + "'",
"spreads": "SELECT * FROM spreads WHERE ref_date = '" + ref_date + "'",
"govies": "SELECT * FROM govies WHERE ref_date = '" + ref_date + "'",
"sym_adj": "SELECT * FROM sym_adj WHERE ref_date = '" + ref_date + "'",
}
sql = sql_map.get(data_type)
df = pd.read_sql(sql, con=db.conn)
if df.empty:
add_to_db(ref_date, db, data_type)
df =
|
pd.read_sql(sql, con=db.conn)
|
pandas.read_sql
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from Dimension_Reduction import Viewer
from Silhouette import Silhouette_viewer
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture as GMM
from sklearn.cluster import DBSCAN
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import SpectralClustering
import skfuzzy as fuzz
from sklearn.ensemble import IsolationForest
suffix = "5"
data = pd.read_csv(f"data_preprocess_{suffix}.csv", delimiter=",")
data_plot = pd.read_csv(f"pca_dim2_{suffix}.csv", delimiter=",")
n_clusters = 2
# Compute Elbow method with K-Means to determine number of clusters.
def compute_elbow(max_num_cluster=7):
file = f"elbow_{suffix}.png"
distortions = []
K = range(1, max_num_cluster)
for k in K:
print("K-Means with k = ", k)
kmeanModel = KMeans(n_clusters=k, random_state=10)
kmeanModel.fit(data)
distortions.append(kmeanModel.inertia_)
plt.figure(figsize=(8, 8))
plt.plot(K, distortions, 'bx-')
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('The Elbow Method showing the optimal k')
plt.savefig(file)
def compute_silhouette_profile():
for nn in range(2,4):
model = KMeans(n_clusters=nn, random_state=10)
labels = model.fit_predict(data)
centers = model.cluster_centers_
sil = Silhouette_viewer()
sil.silhouette_plot(data, labels, centers, f'silhouette_{suffix}_{nn}.png')
def clustering():
view_tool = Viewer()
print("kmeans")
kmeans = KMeans(n_clusters=n_clusters, random_state=10)
labels_km = kmeans.fit_predict(data)
labels_km_df = pd.DataFrame(labels_km)
labels_km_df.to_csv(f"labels_km_{suffix}.csv", index=False)
view_tool.view_vs_target(data_plot, labels_km_df, suffix, 'km')
print("fuzz")
cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(data.T.values, n_clusters, 2, error=0.005, maxiter=1000)
labels_fuz = np.argmax(u, axis=0)
labels_fuz_df = pd.DataFrame(labels_fuz)
labels_fuz_df.to_csv(f"labels_fuzz_{suffix}.csv", index=False)
view_tool.view_vs_target(data_plot, labels_fuz_df, suffix, 'fuzz')
print("gmm")
gmm = GMM(n_components=n_clusters, random_state=10)
labels_gmm = gmm.fit_predict(data)
labels_gmm_df = pd.DataFrame(labels_gmm)
labels_gmm_df.to_csv(f"labels_gmm_{suffix}.csv", index=False)
view_tool.view_vs_target(data_plot, labels_gmm_df, suffix, 'gmm')
print("dbsc")
dbscan = DBSCAN(eps=2, min_samples=20).fit(data)
labels_dbsc = dbscan.labels_
labels_dbsc_df = pd.DataFrame(labels_dbsc)
labels_dbsc_df.to_csv(f"labels_dbsc_{suffix}.csv", index=False)
view_tool.view_vs_target(data_plot, labels_dbsc_df, suffix, 'dbsc')
print("hier")
hierarchical = AgglomerativeClustering(n_clusters=n_clusters)
hierarchical.fit(data)
labels_hier = hierarchical.labels_
labels_hier_df =
|
pd.DataFrame(labels_hier)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 15:46:07 2019
@author:
"""
import logging
import os
import sys
import numpy as np
import pickle
import csv
import datetime as dt
import pandas as pd
import matplotlib.pylab as plt
file_dir = os.path.dirname(__file__)
sys.path.append(file_dir)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
_local_dir = False
CUP_WORK = True
#%% Dirs variables
if _local_dir:
working_dir = os.path.join(file_dir, os.pardir)
else:
working_dir = os.path.abspath(os.path.join(file_dir, f"../../../../Pycodes/"))
if CUP_WORK:
working_dir = os.path.join(working_dir, "cup_kg")
_model_path = os.path.abspath(os.path.join(working_dir, "models"))
_3ples_directory = os.path.abspath(os.path.join(working_dir, "3ples"))
_dictionay_directory = os.path.abspath(os.path.join(working_dir, "dicts"))
_pickle_path = os.path.abspath(os.path.join(working_dir, os.pardir, "pickle"))
_csv_path = os.path.abspath(
os.path.join(
working_dir, os.pardir, "csv", "random_1747_1_True"
) # random_1747_1_True random_100_1_False
)
_cup_datasets_path = os.path.abspath(
os.path.join(working_dir, os.pardir, "cup_datasets")
)
_txt_path = os.path.abspath(os.path.join(working_dir, os.pardir, "txt"))
os.makedirs(_txt_path, exist_ok=True)
_image_path = os.path.abspath(os.path.join(working_dir, os.pardir, "images"))
os.makedirs(_image_path, exist_ok=True)
#%% GLOBAL VARIABLES
KEYSPACE_NAME = "cup_1747_1" # cup_1747_1 cup_100_1
dict_name_id = {
"medical-branch": "medical-branch-id",
"appointment-provider": "referral-centre-id",
"practitioner": "practitioner-id",
"health-service-provision": "refined-health-service-id",
"patient": "encrypted-nin-id",
"booking-staff": "booking-agent-id",
}
dict_id_col = {
"medical-branch-id": "sa_branca_id",
"referral-centre-id": "sa_uop_codice_id",
"practitioner-id": "sa_med_id",
"refined-health-service-id": "indx_prestazione",
"encrypted-nin-id": "sa_ass_cf",
"booking-agent-id": "sa_utente_id",
}
dict_col_name = {
"sa_branca_id": "medical-branch",
"sa_uop_codice_id": "appointment-provider",
"sa_med_id": "practitioner",
"indx_prestazione": "health-service-provision",
"sa_ass_cf": "patient",
"sa_utente_id": "booking-staff",
"sa_ut_id": "booking-staff",
}
dict_col_id = {
"sa_branca_id": "medical-branch-id",
"sa_uop_codice_id": "referral-centre-id",
"sa_med_id": "practitioner-id",
"indx_prestazione": "refined-health-service-id",
"sa_ass_cf": "encrypted-nin-id",
"sa_utente_id": "booking-agent-id",
"sa_data_ins": "last-reservation-change-date",
"sa_data_prescr": "referral-date",
"sa_data_app": "booked-date",
"sa_data_pren": "reservation-date",
"sa_comune_id": "nuts-istat-code",
"sa_branca_id": "medical-branch-id",
"sa_sesso_id": "gender",
"sa_ut_id": "updating-booking-agent-id",
"sa_num_prestazioni": "number-of-health-services",
"sa_classe_priorita": "priority-code",
"sa_asl": "local-health-department-id",
"sa_eta_id": "patient-age",
"sa_gg_attesa": "res-waiting-days",
"sa_gg_attesa_pdisp": "first-res-waiting-days",
}
dict_table = {
"date_col": ["sa_data_ins", "sa_data_prescr", "sa_data_app", "sa_data_pren"],
"category_col": [
"sa_ass_cf",
"sa_utente_id",
"sa_uop_codice_id",
"sa_comune_id",
"sa_branca_id",
"sa_med_id",
"sa_sesso_id",
"sa_ut_id",
"sa_num_prestazioni",
"sa_classe_priorita",
"sa_asl",
"indx_prestazione",
],
"number_col": ["sa_eta_id", "sa_gg_attesa", "sa_gg_attesa_pdisp"],
}
cup_date_id = "sa_data_pren"
time_limit_training = "{year}-{month}-{day}".format(year=2018, month=1, day=1)
time_limit_training = dt.datetime.strptime(time_limit_training, "%Y-%m-%d")
time_period_days = 365
time_limit_test = time_limit_training + dt.timedelta(days=time_period_days)
RSEED = 0
entities_names = [
"medical-branch",
"appointment-provider",
"practitioner",
"health-service-provision",
"patient",
"booking-staff",
]
relations_names = ["referral", "reservation", "health-care", "provision"]
cols_first_table = [
"encrypted-nin-id",
"gender",
"priority-code",
"nuts-istat-code",
"practitioner-id",
"booking-agent-id",
"updating-booking-agent-id",
]
cols_second_table = [
"referral-centre-id",
"number-of-health-services",
"local-health-department-id",
"refined-health-service-id",
]
dict_colors = {
"appointment-provider": "#66C2A5",
"booking-staff": "#FC8D62",
"health-service-provision": "#8DA0CB",
"medical-branch": "#E78AC3",
"patient": "#A6D854",
"practitioner": "#FFD92F",
}
#%% IMPORT
dictionay_name = f"{KEYSPACE_NAME}_dict_embeddings.pkl"
dictionay_file = os.path.join(_dictionay_directory, dictionay_name)
with open(dictionay_file, "rb") as open_file:
dict_emb_concepts = pickle.load(open_file)
dictionay_name = f"{KEYSPACE_NAME}_dict_concepts.pkl"
dictionay_file = os.path.join(_dictionay_directory, dictionay_name)
with open(dictionay_file, "rb") as open_file:
dict_entities = pickle.load(open_file)
csv_name = f"triples_{KEYSPACE_NAME}.csv"
csv_file = os.path.join(_3ples_directory, csv_name)
with open(csv_file) as open_file:
triples = list(csv.reader(open_file, delimiter=","))
csv_name = f"df_aslC.csv"
csv_file = os.path.join(_csv_path, csv_name)
df_cup = pd.read_csv(csv_file, index_col=0)
# transform to datetime
for date_col in ["sa_data_ins", "sa_data_prescr", "sa_data_app", "sa_data_pren"]:
df_cup[date_col] = pd.to_datetime(df_cup[date_col], errors="coerce")
del date_col
csv_name = "prestazioni.csv"
csv_file = os.path.join(_cup_datasets_path, csv_name)
df_prest = pd.read_csv(csv_file, sep=";", index_col=0)
dict_prest = df_prest["descrizione"].to_dict()
csv_name = "prestazioni_to_branche_cup3.csv"
csv_file = os.path.join(_cup_datasets_path, csv_name)
df_prest_to_branche = pd.read_csv(csv_file, sep=",")
df_prest_to_branche = (
df_prest_to_branche.fillna("")
.groupby("id_prestazione")["id_branca"]
.apply(list)
.reset_index(name="list_branca")
)
df_prest_to_branche = df_prest_to_branche.append(
{"id_prestazione": 99999999, "list_branca": []}, ignore_index=True
)
df_prest_to_branche = df_prest_to_branche.set_index("id_prestazione")[
"list_branca"
].to_dict()
#%% FUNCTIONS
# from id to identification
def from_id_to_identification(triples, dict_entities, entity_id):
dict_map = {}
for row in triples:
if row[1] == f"@has-{entity_id}":
dict_map[row[0]] = dict_entities[row[2]]["value"]
return dict_map
def create_embedding_dataframe(
dict_emb_concepts,
entity_name,
new_column=False,
with_mapping=False,
dict_map=None,
index_col=None,
):
if with_mapping:
# create embedding dict with identification
dict_new = {}
for key, value in dict_emb_concepts[entity_name].items():
dict_new[dict_map[key]] = value
# create dataFrame of embedding
df_emb = pd.DataFrame.from_dict(dict_new, orient="index")
df_emb.index.name = index_col
else:
# create dataFrame of embedding
df_emb =
|
pd.DataFrame.from_dict(dict_emb_concepts[entity_name], orient="index")
|
pandas.DataFrame.from_dict
|
"""
"""
"""
>>> # ---
>>> # SETUP
>>> # ---
>>> import os
>>> import logging
>>> logger = logging.getLogger('PT3S.Rm')
>>> # ---
>>> # path
>>> # ---
>>> if __name__ == "__main__":
... try:
... dummy=__file__
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ','path = os.path.dirname(__file__)'," ."))
... path = os.path.dirname(__file__)
... except NameError:
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ',"path = '.' because __file__ not defined and: "," from Rm import Rm"))
... path = '.'
... from Rm import Rm
... else:
... path = '.'
... logger.debug("{0:s}{1:s}".format('Not __main__ Context: ',"path = '.' ."))
>>> try:
... from PT3S import Mx
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Mx: ImportError: ","trying import Mx instead ... maybe pip install -e . is active ..."))
... import Mx
>>> try:
... from PT3S import Xm
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Xm: ImportError: ","trying import Xm instead ... maybe pip install -e . is active ..."))
... import Xm
>>> # ---
>>> # testDir
>>> # ---
>>> # globs={'testDir':'testdata'}
>>> try:
... dummy= testDir
... except NameError:
... testDir='testdata'
>>> # ---
>>> # dotResolution
>>> # ---
>>> # globs={'dotResolution':''}
>>> try:
... dummy= dotResolution
... except NameError:
... dotResolution=''
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> pd.set_option('display.max_columns',None)
>>> pd.set_option('display.width',666666666)
>>> # ---
>>> # LocalHeatingNetwork SETUP
>>> # ---
>>> xmlFile=os.path.join(os.path.join(path,testDir),'LocalHeatingNetwork.XML')
>>> xm=Xm.Xm(xmlFile=xmlFile)
>>> mx1File=os.path.join(path,os.path.join(testDir,'WDLocalHeatingNetwork\B1\V0\BZ1\M-1-0-1'+dotResolution+'.MX1'))
>>> mx=Mx.Mx(mx1File=mx1File,NoH5Read=True,NoMxsRead=True)
>>> mx.setResultsToMxsFile(NewH5Vec=True)
5
>>> xm.MxSync(mx=mx)
>>> rm=Rm(xm=xm,mx=mx)
>>> # ---
>>> # Plot 3Classes False
>>> # ---
>>> plt.close('all')
>>> ppi=72 # matplotlib default
>>> dpi_screen=2*ppi
>>> fig=plt.figure(dpi=dpi_screen,linewidth=1.)
>>> timeDeltaToT=mx.df.index[2]-mx.df.index[0]
>>> # 3Classes und FixedLimits sind standardmaessig Falsch; RefPerc ist standardmaessig Wahr
>>> # die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt immer ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66,pFWVBGCategory=['BLNZ1u5u7'],pVICsDf=pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']}))
>>> # ---
>>> # Check pFWVB Return
>>> # ---
>>> f=lambda x: "{0:8.5f}".format(x)
>>> print(pFWVB[['Measure','MCategory','GCategory','VIC']].round(2).to_string(formatters={'Measure':f}))
Measure MCategory GCategory VIC
0 0.81000 Top BLNZ1u5u7 NaN
1 0.67000 Middle NaN
2 0.66000 Middle BLNZ1u5u7 NaN
3 0.66000 Bottom BLNZ1u5u7 VIC1
4 0.69000 Middle NaN
>>> # ---
>>> # Print
>>> # ---
>>> (wD,fileName)=os.path.split(xm.xmlFile)
>>> (base,ext)=os.path.splitext(fileName)
>>> plotFileName=wD+os.path.sep+base+'.'+'pdf'
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
>>> plt.savefig(plotFileName,dpi=2*dpi_screen)
>>> os.path.exists(plotFileName)
True
>>> # ---
>>> # Plot 3Classes True
>>> # ---
>>> plt.close('all')
>>> # FixedLimits wird automatisch auf Wahr gesetzt wenn 3Classes Wahr ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasure3Classes=True,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66)
>>> # ---
>>> # LocalHeatingNetwork Clean Up
>>> # ---
>>> if os.path.exists(mx.h5File):
... os.remove(mx.h5File)
>>> if os.path.exists(mx.mxsZipFile):
... os.remove(mx.mxsZipFile)
>>> if os.path.exists(mx.h5FileVecs):
... os.remove(mx.h5FileVecs)
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
"""
__version__='172.16.58.3.dev1'
import warnings # 3.6
#...\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
# from ._conv import register_converters as _register_converters
warnings.simplefilter(action='ignore', category=FutureWarning)
#C:\Users\Wolters\Anaconda3\lib\site-packages\matplotlib\cbook\deprecation.py:107: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# warnings.warn(message, mplDeprecation, stacklevel=1)
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import os
import sys
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import pandas as pd
import h5py
from collections import namedtuple
from operator import attrgetter
import subprocess
import warnings
import tables
import math
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.colorbar import make_axes
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
from matplotlib import markers
from matplotlib.path import Path
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import scipy
import networkx as nx
from itertools import chain
import math
import sys
from copy import deepcopy
from itertools import chain
import scipy
from scipy.signal import savgol_filter
import logging
# ---
# --- PT3S Imports
# ---
logger = logging.getLogger('PT3S')
if __name__ == "__main__":
logger.debug("{0:s}{1:s}".format('in MODULEFILE: __main__ Context','.'))
else:
logger.debug("{0:s}{1:s}{2:s}{3:s}".format('in MODULEFILE: Not __main__ Context: ','__name__: ',__name__," ."))
try:
from PT3S import Mx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Mx - trying import Mx instead ... maybe pip install -e . is active ...'))
import Mx
try:
from PT3S import Xm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Xm - trying import Xm instead ... maybe pip install -e . is active ...'))
import Xm
try:
from PT3S import Am
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Am - trying import Am instead ... maybe pip install -e . is active ...'))
import Am
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
# ---
# --- main Imports
# ---
import argparse
import unittest
import doctest
import math
from itertools import tee
# --- Parameter Allgemein
# -----------------------
DINA6 = (4.13 , 5.83)
DINA5 = (5.83 , 8.27)
DINA4 = (8.27 , 11.69)
DINA3 = (11.69 , 16.54)
DINA2 = (16.54 , 23.39)
DINA1 = (23.39 , 33.11)
DINA0 = (33.11 , 46.81)
DINA6q = ( 5.83, 4.13)
DINA5q = ( 8.27, 5.83)
DINA4q = ( 11.69, 8.27)
DINA3q = ( 16.54,11.69)
DINA2q = ( 23.39,16.54)
DINA1q = ( 33.11,23.39)
DINA0q = ( 46.81,33.11)
dpiSize=72
DINA4_x=8.2677165354
DINA4_y=11.6929133858
DINA3_x=DINA4_x*math.sqrt(2)
DINA3_y=DINA4_y*math.sqrt(2)
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
ylimpD=(-5,70)
ylimpDmlc=(600,1350) #(300,1050)
ylimQD=(-75,300)
ylim3rdD=(0,3)
yticks3rdD=[0,1,2,3]
yGridStepsD=30
yticksALD=[0,3,4,10,20,30,40]
ylimALD=(yticksALD[0],yticksALD[-1])
yticksRD=[0,2,4,10,15,30,45]
ylimRD=(-yticksRD[-1],yticksRD[-1])
ylimACD=(-5,5)
yticksACD=[-5,0,5]
yticksTVD=[0,100,135,180,200,300]
ylimTVD=(yticksTVD[0],yticksTVD[-1])
plotTVAmLabelD='TIMER u. AM [Sek. u. (N)m3*100]'
def getDerivative(df,col,shiftSize=1,windowSize=60,fct=None,savgol_polyorder=None):
"""
returns a df
df: the df
col: the col of df to be derived
shiftsize: the Difference between 2 indices for dValue and dt
windowSize: size for rolling mean or window_length of savgol_filter; choosen filtertechnique is applied after fct
windowsSize must be an even number
for savgol_filter windowsSize-1 is used
fct: function to be applied on dValue/dt
savgol_polyorder: if not None savgol_filter is applied; pandas' rolling.mean() is applied otherwise
new cols:
dt (with shiftSize)
dValue (from col)
dValueDt (from col); fct applied
dValueDtFiltered; choosen filtertechnique is applied
"""
mDf=df.dropna().copy(deep=True)
try:
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
if savgol_polyorder == None:
mDf['dValueDtFiltered']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
else:
mDf['dValueDtFiltered']=savgol_filter(mDf['dValueDt'].values,windowSize-1, savgol_polyorder)
mDf=mDf.iloc[windowSize/2+1+savgol_polyorder-1:]
#mDf=mDf.iloc[windowSize-1:]
except Exception as e:
raise e
finally:
return mDf
def fCVDNodesFromName(x):
Nodes=x.replace('°','~')
Nodes=Nodes.split('~')
Nodes =[Node.lstrip().rstrip() for Node in Nodes if len(Node)>0]
return Nodes
def fgetMaxpMinFromName(CVDName,dfSegsNodesNDataDpkt):
"""
returns max. pMin for alle NODEs in CVDName
"""
nodeLst=fCVDNodesFromName(CVDName)
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['NODEsName'].isin(nodeLst)][['pMin','pMinMlc']]
s=df.max()
return s.pMin
# --- Funktionen Allgemein
# -----------------------
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def genTimespans(timeStart
,timeEnd
,timeSpan=pd.Timedelta('12 Minutes')
,timeOverlap=pd.Timedelta('0 Seconds')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
):
# generates timeSpan-Sections
# if timeStart is
# an int, it is considered as the number of desired Sections before timeEnd; timeEnd must be a time
# a time, it is considered as timeStart
# if timeEnd is
# an int, it is considered as the number of desired Sections after timeStart; timeStart must be a time
# a time, it is considered as timeEnd
# if timeSpan is
# an int, it is considered as the number of desired Sections
# a time, it is considered as timeSpan
# returns an array of tuples
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
xlims=[]
try:
if type(timeStart) == int:
numOfDesiredSections=timeStart
timeStartEff=timeEnd+timeEndPostfix-numOfDesiredSections*timeSpan+(numOfDesiredSections-1)*timeOverlap-timeStartPraefix
else:
timeStartEff=timeStart-timeStartPraefix
logger.debug("{0:s}timeStartEff: {1:s}".format(logStr,str(timeStartEff)))
if type(timeEnd) == int:
numOfDesiredSections=timeEnd
timeEndEff=timeStart-timeStartPraefix+numOfDesiredSections*timeSpan-(numOfDesiredSections-1)*timeOverlap+timeEndPostfix
else:
timeEndEff=timeEnd+timeEndPostfix
logger.debug("{0:s}timeEndEff: {1:s}".format(logStr,str(timeEndEff)))
if type(timeSpan) == int:
numOfDesiredSections=timeSpan
dt=timeEndEff-timeStartEff
timeSpanEff=dt/numOfDesiredSections+(numOfDesiredSections-1)*timeOverlap
else:
timeSpanEff=timeSpan
logger.debug("{0:s}timeSpanEff: {1:s}".format(logStr,str(timeSpanEff)))
logger.debug("{0:s}timeOverlap: {1:s}".format(logStr,str(timeOverlap)))
timeStartAct = timeStartEff
while timeStartAct < timeEndEff:
logger.debug("{0:s}timeStartAct: {1:s}".format(logStr,str(timeStartAct)))
timeEndAct=timeStartAct+timeSpanEff
xlim=(timeStartAct,timeEndAct)
xlims.append(xlim)
timeStartAct = timeEndAct - timeOverlap
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def gen2Timespans(
timeStart # Anfang eines "Prozesses"
,timeEnd # Ende eines "Prozesses"
,timeSpan=pd.Timedelta('12 Minutes')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
,roundStr=None # i.e. '5min': timeStart.round(roundStr) und timeEnd dito
):
"""
erzeugt 2 gleich lange Zeitbereiche
1 um timeStart herum
1 um timeEnd herum
"""
#print("timeStartPraefix: {:s}".format(str(timeStartPraefix)))
#print("timeEndPostfix: {:s}".format(str(timeEndPostfix)))
xlims=[]
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if roundStr != None:
timeStart=timeStart.round(roundStr)
timeEnd=timeEnd.round(roundStr)
xlims.append((timeStart-timeStartPraefix,timeStart-timeStartPraefix+timeSpan))
xlims.append((timeEnd+timeEndPostfix-timeSpan,timeEnd+timeEndPostfix))
return xlims
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def fTotalTimeFromPairs(
x
,denominator=None # i.e. pd.Timedelta('1 minute') for totalTime in Minutes
,roundToInt=True # round to and return as int if denominator is specified; else td is rounded by 2
):
tdTotal=pd.Timedelta('0 seconds')
for idx,tPairs in enumerate(x):
t1,t2=tPairs
if idx==0:
tLast=t2
else:
if t1 <= tLast:
print("Zeitpaar überlappt?!")
td=t2-t1
if td < pd.Timedelta('1 seconds'):
pass
#print("Zeitpaar < als 1 Sekunde?!")
tdTotal=tdTotal+td
if denominator==None:
return tdTotal
else:
td=tdTotal / denominator
if roundToInt:
td=int(round(td,0))
else:
td=round(td,2)
return td
def findAllTimeIntervalls(
df
,fct=lambda row: True if row['col'] == 46 else False
,tdAllowed=None
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
rows,cols=df.shape
if df.empty:
logger.debug("{:s}df ist leer".format(logStr))
elif rows == 1:
logger.debug("{:s}df hat nur 1 Zeile: {:s}".format(logStr,df.to_string()))
rowValue=fct(df.iloc[0])
if rowValue:
tPair=(df.index[0],df.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
# paarweise über alle Zeilen
for (i1, row1), (i2, row2) in pairwise(df.iterrows()):
row1Value=fct(row1)
row2Value=fct(row2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not row1Value and row2Value:
tEin=i2
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif row1Value and not row2Value:
if tEin != None:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
pass # sonst: Bed. ist jetzt Aus und war nicht Ein
# Bed. kann nur im ersten Fall Ein gehen
# wenn 1 x und 2 x
elif row1Value and row2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# letztes Paar
if row1Value and row2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
if tdAllowed != None:
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def findAllTimeIntervallsSeries(
s=pd.Series()
,fct=lambda x: True if x == 46 else False
,tdAllowed=None # if not None all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=True
):
"""
# if fct:
# alle [Zeitbereiche] finden fuer die fct Wahr ist; diese Zeitbereiche werden geliefert; es werden nur Paare geliefert; Wahr-Solitäre gehen nicht verloren sondern werden als Paar (t,t) geliefert
# Wahr-Solitäre sind NUR dann enthalten, wenn s nur 1 Wert enthält und dieser Wahr ist; das 1 gelieferte Paar enthaelt dann den Solitär-Zeitstempel für beide Zeiten
# tdAllowed can be be specified
# dann im Anschluss in Zeitbereiche zusammenfassen, die nicht mehr als tdAllowed auseinander liegen; diese Zeitbereiche werden dann geliefert
# if fct None:
# tdAllowed must be specified
# in Zeitbereiche zerlegen, die nicht mehr als Schwellwert tdAllowed auseinander liegen; diese Zeitbereiche werden geliefert
# generell hat jeder gelieferte Zeitbereich Anfang und Ende (d.h. 2 Zeiten), auch dann, wenn dadurch ein- oder mehrfach der Schwellwert ignoriert werden muss
# denn es soll kein Zeitbereich verloren gehen, der in s enthalten ist
# wenn s nur 1 Wert enthält, wird 1 Zeitpaar mit demselben Zeitstempel für beide Zeiten geliefert, wenn Wert nicht Null
# returns array of Time-Pair-Tuples
>>> import pandas as pd
>>> t=pd.Timestamp('2021-03-19 01:02:00')
>>> t1=t +pd.Timedelta('1 second')
>>> t2=t1+pd.Timedelta('1 second')
>>> t3=t2+pd.Timedelta('1 second')
>>> t4=t3+pd.Timedelta('1 second')
>>> t5=t4+pd.Timedelta('1 second')
>>> t6=t5+pd.Timedelta('1 second')
>>> t7=t6+pd.Timedelta('1 second')
>>> d = {t1: 46, t2: 0} # geht aus - kein Paar
>>> s1PaarGehtAus=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 46} # geht ein - kein Paar
>>> s1PaarGehtEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t5: 46, t6: 0} # geht ausE - kein Paar
>>> s1PaarGehtAusE=pd.Series(data=d, index=[t5, t6])
>>> d = {t5: 0, t6: 46} # geht einE - kein Paar
>>> s1PaarGehtEinE=pd.Series(data=d, index=[t5, t6])
>>> d = {t1: 46, t2: 46} # geht aus - ein Paar
>>> s1PaarEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 0} # geht aus - kein Paar
>>> s1PaarAus=pd.Series(data=d, index=[t1, t2])
>>> s2PaarAus=pd.concat([s1PaarGehtAus,s1PaarGehtAusE])
>>> s2PaarEin=pd.concat([s1PaarGehtEin,s1PaarGehtEinE])
>>> s2PaarAusEin=pd.concat([s1PaarGehtAus,s1PaarGehtEinE])
>>> s2PaarEinAus=pd.concat([s1PaarGehtEin,s1PaarGehtAusE])
>>> # 1 Wert
>>> d = {t1: 46} # 1 Wert - Wahr
>>> s1WertWahr=pd.Series(data=d, index=[t1])
>>> d = {t1: 44} # 1 Wert - Falsch
>>> s1WertFalsch=pd.Series(data=d, index=[t1])
>>> d = {t1: None} # 1 Wert - None
>>> s1WertNone=pd.Series(data=d, index=[t1])
>>> ###
>>> # 46 0
>>> # 0 46
>>> # 0 0
>>> # 46 46 !1 Paar
>>> # 46 0 46 0
>>> # 46 0 0 46
>>> # 0 46 0 46
>>> # 0 46 46 0 !1 Paar
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus)
[]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin)
[]
>>> findAllTimeIntervallsSeries(s1PaarEin)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarAusEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarEinAus)
[(Timestamp('2021-03-19 01:02:02'), Timestamp('2021-03-19 01:02:05'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertFalsch)
[]
>>> ###
>>> # 46 0 !1 Paar
>>> # 0 46 !1 Paar
>>> # 0 0 !1 Paar
>>> # 46 46 !1 Paar
>>> # 46 0 46 0 !2 Paare
>>> # 46 0 0 46 !2 Paare
>>> # 0 46 0 46 !2 Paare
>>> # 0 46 46 0 !2 Paare
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s2PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarAusEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEinAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr,fct=None)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertNone,fct=None)
[]
>>> ###
>>> d = {t1: 0, t3: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t1, t3])
>>> findAllTimeIntervallsSeries(s1PaarmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:03'))]
>>> d = {t4: 0, t5: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t4, t5])
>>> s2PaarmZoZ=pd.concat([s1PaarmZ,s1PaaroZ])
>>> findAllTimeIntervallsSeries(s2PaarmZoZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t1: 0, t2: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t1, t2])
>>> d = {t3: 0, t5: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t3, t5])
>>> s2PaaroZmZ=pd.concat([s1PaaroZ,s1PaarmZ])
>>> findAllTimeIntervallsSeries(s2PaaroZmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t6: 0, t7: 0}
>>> s1PaaroZ2=pd.Series(data=d, index=[t6, t7])
>>> d = {t4: 0}
>>> solitaer=pd.Series(data=d, index=[t4])
>>> s5er=pd.concat([s1PaaroZ,solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s5er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
>>> s3er=pd.concat([s1PaaroZ,solitaer])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:04'))]
>>> s3er=pd.concat([solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
if s.empty:
logger.debug("{:s}Series {!s:s} ist leer".format(logStr,s.name))
elif s.size == 1:
logger.debug("{:s}Series {!s:s} hat nur 1 Element: {:s}".format(logStr,s.name,s.to_string()))
if fct != None:
# 1 Paar mit selben Zeiten wenn das 1 Element Wahr
sValue=fct(s.iloc[0])
if sValue:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
# 1 Paar mit selben Zeiten wenn das 1 Element nicht None
sValue=s.iloc[0]
if sValue != None:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
if fct != None:
# paarweise über alle Zeiten
for idx,((i1, s1), (i2, s2)) in enumerate(pairwise(s.iteritems())):
s1Value=fct(s1)
s2Value=fct(s2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not s1Value and s2Value:
tEin=i2
if idx > 0: # Info
pass
else:
# beim ersten Paar "geht Ein"
pass
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif s1Value and not s2Value:
if tEin != None:
if tEin<i1:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
# singulaeres Ereignis
# Paar mit selben Zeiten
tPair=(tEin,i1)
tPairs.append(tPair)
pass
else: # geht Aus ohne Ein zu sein
if idx > 0: # Info
pass
else:
# im ersten Paar
pass
# wenn 1 x und 2 x
elif s1Value and s2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# Behandlung letztes Paar
# bleibt Ein am Ende der Series: Paar speichern
if s1Value and s2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
# Behandlung tdAllowed
if tdAllowed != None:
if debugOutput:
logger.debug("{:s}Series {!s:s}: Intervalle werden mit {!s:s} zusammengefasst ...".format(logStr,s.name,tdAllowed))
tPairsOld=tPairs.copy()
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed,debugOutput=debugOutput)
if debugOutput:
tPairsZusammengefasst=sorted(list(set(tPairsOld) - set(tPairs)))
if len(tPairsZusammengefasst)>0:
logger.debug("{:s}Series {!s:s}: Intervalle wurden wg. {!s:s} zusammengefasst. Nachfolgend die zusgefassten Intervalle: {!s:s}. Sowie die entsprechenden neuen: {!s:s}".format(
logStr
,s.name
,tdAllowed
,tPairsZusammengefasst
,sorted(list(set(tPairs) - set(tPairsOld)))
))
else:
# paarweise über alle Zeiten
# neues Paar beginnen
anzInPair=1 # Anzahl der Zeiten in aktueller Zeitspanne
for (i1, s1), (i2, s2) in pairwise(s.iteritems()):
td=i2-i1
if td > tdAllowed: # Zeit zwischen 2 Zeiten > als Schwelle: Zeitspanne ist abgeschlossen
if tEin==None:
# erstes Paar liegt bereits > als Schwelle auseinander
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
# aktuelle Zeitspanne beginnt beim 1. Wert und geht über Schwellwert
tEin=i1
anzInPair=2
else:
if anzInPair>=2:
# Zeitspanne abschließen
tPair=(tEin,i1)
tPairs.append(tPair)
# neue Zeitspanne beginnen
tEin=i2
anzInPair=1
else:
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
anzInPair=2
else: # Zeitspanne zugelassen, weiter ...
if tEin==None:
tEin=i1
anzInPair=anzInPair+1
# letztes Zeitpaar behandeln
if anzInPair>=2:
tPair=(tEin,i2)
tPairs.append(tPair)
else:
# ein letzter Wert wuerde ueber bleiben, letzte Zeitspanne verlängern ...
tPair=tPairs[-1]
tPair=(tPair[0],i2)
tPairs[-1]=tPair
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def fCombineSubsequenttPairs(
tPairs
,tdAllowed=pd.Timedelta('1 second') # all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=False
):
# returns tPairs
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
for idx,(tp1,tp2) in enumerate(pairwise(tPairs)):
t1Ende=tp1[1]
t2Start=tp2[0]
if t2Start-t1Ende <= tdAllowed:
if debugOutput:
logger.debug("{:s} t1Ende: {!s:s} t2Start: {!s:s} Gap: {!s:s}".format(logStr,t1Ende,t2Start,t2Start-t1Ende))
tPairs[idx]=(tp1[0],tp2[1]) # Folgepaar in vorheriges Paar integrieren
tPairs.remove(tp2) # Folgepaar löschen
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed) # Rekursion
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
class RmError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
# --- Parameter und Funktionen LDS Reports
# ----------------------------------------
def pltMakeCategoricalColors(color,nOfSubColorsReq=3,reversedOrder=False):
"""
Returns an array of rgb colors derived from color.
Parameter:
color: a rgb color
nOfSubColorsReq: number of SubColors requested
Raises:
RmError
>>> import matplotlib
>>> color='red'
>>> c=list(matplotlib.colors.to_rgb(color))
>>> import Rm
>>> Rm.pltMakeCategoricalColors(c)
array([[1. , 0. , 0. ],
[1. , 0.375, 0.375],
[1. , 0.75 , 0.75 ]])
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
rgb=None
try:
chsv = matplotlib.colors.rgb_to_hsv(color[:3])
arhsv = np.tile(chsv,nOfSubColorsReq).reshape(nOfSubColorsReq,3)
arhsv[:,1] = np.linspace(chsv[1],0.25,nOfSubColorsReq)
arhsv[:,2] = np.linspace(chsv[2],1,nOfSubColorsReq)
rgb = matplotlib.colors.hsv_to_rgb(arhsv)
if reversedOrder:
rgb=list(reversed(rgb))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return rgb
# Farben fuer Druecke
SrcColorp='green'
SrcColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorp)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorp='blue'
SnkColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorp)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
# Farben fuer Fluesse
SrcColorQ='red'
SrcColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorQ)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorQ='orange'
SnkColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorQ)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
lwBig=4.5
lwSmall=2.5
attrsDct={ 'p Src':{'color':SrcColorp,'lw':lwBig,'where':'post'}
,'p Snk':{'color':SnkColorp,'lw':lwSmall+1.,'where':'post'}
,'p Snk 2':{'color':'mediumorchid','where':'post'}
,'p Snk 3':{'color':'darkviolet','where':'post'}
,'p Snk 4':{'color':'plum','where':'post'}
,'Q Src':{'color':SrcColorQ,'lw':lwBig,'where':'post'}
,'Q Snk':{'color':SnkColorQ,'lw':lwSmall+1.,'where':'post'}
,'Q Snk 2':{'color':'indianred','where':'post'}
,'Q Snk 3':{'color':'coral','where':'post'}
,'Q Snk 4':{'color':'salmon','where':'post'}
,'Q Src RTTM':{'color':SrcColorQ,'lw':matplotlib.rcParams['lines.linewidth']+1.,'ls':'dotted','where':'post'}
,'Q Snk RTTM':{'color':SnkColorQ,'lw':matplotlib.rcParams['lines.linewidth'] ,'ls':'dotted','where':'post'}
,'Q Snk 2 RTTM':{'color':'indianred','ls':'dotted','where':'post'}
,'Q Snk 3 RTTM':{'color':'coral','ls':'dotted','where':'post'}
,'Q Snk 4 RTTM':{'color':'salmon','ls':'dotted','where':'post'}
,'p ISrc 1':{'color':SrcColorsp[-1],'ls':'dashdot','where':'post'}
,'p ISrc 2':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 3':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISrc 4':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 5':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 6':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISnk 1':{'color':SnkColorsp[0],'ls':'dashdot','where':'post'}
,'p ISnk 2':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 3':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISnk 4':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 5':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 6':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'Q xSrc 1':{'color':SrcColorsQ[-1],'ls':'dashdot','where':'post'}
,'Q xSrc 2':{'color':SrcColorsQ[-2],'ls':'dashdot','where':'post'}
,'Q xSrc 3':{'color':SrcColorsQ[-3],'ls':'dashdot','where':'post'}
,'Q xSnk 1':{'color':SnkColorsQ[0],'ls':'dashdot','where':'post'}
,'Q xSnk 2':{'color':SnkColorsQ[1],'ls':'dashdot','where':'post'}
,'Q xSnk 3':{'color':SnkColorsQ[2],'ls':'dashdot','where':'post'}
,'Q (DE) Me':{'color': 'indigo','ls': 'dashdot','where': 'post','lw':1.5}
,'Q (DE) Re':{'color': 'cyan','ls': 'dashdot','where': 'post','lw':3.5}
,'p (DE) SS Me':{'color': 'magenta','ls': 'dashdot','where': 'post'}
,'p (DE) DS Me':{'color': 'darkviolet','ls': 'dashdot','where': 'post'}
,'p (DE) SS Re':{'color': 'magenta','ls': 'dotted','where': 'post'}
,'p (DE) DS Re':{'color': 'darkviolet','ls': 'dotted','where': 'post'}
,'p OPC LDSErgV':{'color':'olive'
,'lw':lwSmall-.5
,'ms':matplotlib.rcParams['lines.markersize']
,'marker':'x'
,'mec':'olive'
,'mfc':'olive'
,'where':'post'}
,'p OPC Src':{'color':SrcColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorp
,'mfc':SrcColorQ
,'where':'post'}
,'p OPC Snk':{'color':SnkColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorp
,'mfc':SnkColorQ
,'where':'post'}
,'Q OPC Src':{'color':SrcColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorQ
,'mfc':SrcColorp
,'where':'post'}
,'Q OPC Snk':{'color':SnkColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorQ
,'mfc':SnkColorp
,'where':'post'}
}
attrsDctLDS={
'Seg_AL_S_Attrs':{'color':'blue','lw':3.,'where':'post'}
,'Druck_AL_S_Attrs':{'color':'blue','lw':3.,'ls':'dashed','where':'post'}
,'Seg_MZ_AV_Attrs':{'color':'orange','zorder':3,'where':'post'}
,'Druck_MZ_AV_Attrs':{'color':'orange','zorder':3,'ls':'dashed','where':'post'}
,'Seg_LR_AV_Attrs':{'color':'green','zorder':1,'where':'post'}
,'Druck_LR_AV_Attrs':{'color':'green','zorder':1,'ls':'dashed','where':'post'}
,'Seg_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'where':'post'}
,'Druck_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'ls':'dashed','where':'post'}
,'Seg_NG_AV_Attrs':{'color':'red','zorder':2,'where':'post'}
,'Druck_NG_AV_Attrs':{'color':'red','zorder':2,'ls':'dashed','where':'post'}
,'Seg_SB_S_Attrs':{'color':'black','alpha':.5,'where':'post'}
,'Druck_SB_S_Attrs':{'color':'black','ls':'dashed','alpha':.75,'where':'post','lw':1.0}
,'Seg_AC_AV_Attrs':{'color':'indigo','where':'post'}
,'Druck_AC_AV_Attrs':{'color':'indigo','ls':'dashed','where':'post'}
,'Seg_ACF_AV_Attrs':{'color':'blueviolet','where':'post','lw':1.0}
,'Druck_ACF_AV_Attrs':{'color':'blueviolet','ls':'dashed','where':'post','lw':1.0}
,'Seg_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_TIMER_AV_Attrs':{'color':'chartreuse','where':'post'}
,'Druck_TIMER_AV_Attrs':{'color':'chartreuse','ls':'dashed','where':'post'}
,'Seg_AM_AV_Attrs':{'color':'chocolate','where':'post'}
,'Druck_AM_AV_Attrs':{'color':'chocolate','ls':'dashed','where':'post'}
#
,'Seg_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_DPDT_AV_Attrs':{'color':'fuchsia','where':'post','lw':2.0}
,'Druck_DPDT_AV_Attrs':{'color':'fuchsia','ls':'dashed','where':'post','lw':2.0}
,'Seg_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[6][1],'where':'post','lw':1.0} # 'loosely dashdotted'
,'Druck_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[10][1],'where':'post','lw':1.0} # 'loosely dashdotdotted'
}
pSIDEvents=re.compile('(?P<Prae>IMDI\.)?Objects\.(?P<colRegExMiddle>3S_FBG_ESCHIEBER|FBG_ESCHIEBER{1})\.(3S_)?(?P<colRegExSchieberID>[a-z,A-Z,0-9,_]+)\.(?P<colRegExEventID>(In\.ZUST|In\.LAEUFT|In\.LAEUFT_NICHT|In\.STOER|Out\.AUF|Out\.HALT|Out\.ZU)$)')
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
eventCCmds={ 'Out.AUF':0
,'Out.ZU':1
,'Out.HALT':2}
eventCStats={'In.LAEUFT':3
,'In.LAEUFT_NICHT':4
,'In.ZUST':5
,'Out.AUF':6
,'Out.ZU':7
,'Out.HALT':8
,'In.STOER':9}
valRegExMiddleCmds='3S_FBG_ESCHIEBER' # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
LDSParameter=[
'ACC_SLOWTRANSIENT'
,'ACC_TRANSIENT'
,'DESIGNFLOW'
,'DT'
,'FILTERWINDOW'
#,'L_PERCENT'
,'L_PERCENT_STDY'
,'L_PERCENT_STRAN'
,'L_PERCENT_TRANS'
,'L_SHUTOFF'
,'L_SLOWTRANSIENT'
,'L_SLOWTRANSIENTQP'
,'L_STANDSTILL'
,'L_STANDSTILLQP'
,'L_TRANSIENT'
,'L_TRANSIENTQP'
,'L_TRANSIENTVBIGF'
,'L_TRANSIENTPDNTF'
,'MEAN'
,'NAME'
,'ORDER'
,'TIMER'
,'TTIMERTOALARM'
,'TIMERTOLISS'
,'TIMERTOLIST'
]
LDSParameterDataD={
'ACC_SLOWTRANSIENT':0.1
,'ACC_TRANSIENT':0.8
,'DESIGNFLOW':250.
,'DT':1
,'FILTERWINDOW':180
#,'L_PERCENT':1.6
,'L_PERCENT_STDY':1.6
,'L_PERCENT_STRAN':1.6
,'L_PERCENT_TRANS':1.6
,'L_SHUTOFF':2.
,'L_SLOWTRANSIENT':4.
,'L_SLOWTRANSIENTQP':4.
,'L_STANDSTILL':2.
,'L_STANDSTILLQP':2.
,'L_TRANSIENT':10.
,'L_TRANSIENTQP':10.
,'L_TRANSIENTVBIGF':3.
,'L_TRANSIENTPDNTF':1.5
,'MEAN':1
,'ORDER':1
,'TIMER':180
,'TTIMERTOALARM':45 # TIMER/4
,'TIMERTOLISS':180
,'TIMERTOLIST':180
,'NAME':''
}
def fSEGNameFromPV_2(Beschr):
# fSEGNameFromSWVTBeschr
# 2,3,4,5
if Beschr in ['',None]:
return None
m=re.search(Lx.pID,Beschr)
if m == None:
return Beschr
return m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')
def fSEGNameFromPV_3(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
def fSEGNameFromPV_3m(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
#print("C4: {:s} C6: {:s}".format(m.group('C4'),m.group('C6')))
if m.group('C4')=='AAD' and m.group('C6')=='_OHN':
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_OHV1'
elif m.group('C4')=='OHN' and m.group('C6')=='_NGD':
return m.group('C3')+'_'+'OHV2'+'_'+m.group('C5')+m.group('C6')
else:
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
# Ableitung eines DIVPipelineNamens von PV
def fDIVNameFromPV(PV):
m=re.search(Lx.pID,PV)
return m.group('C2')+'-'+m.group('C4')
# Ableitung eines DIVPipelineNamens von SEGName
def fDIVNameFromSEGName(SEGName):
if pd.isnull(SEGName):
return None
# dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
m=re.search('(\d+)_(\w+)_(\w+)_(\w+)',SEGName)
if m == None:
return SEGName
return m.group(1)+'_'+m.group(3)
#def getNamesFromOPCITEM_ID(dfSegsNodesNDataDpkt
# ,OPCITEM_ID):
# """
# Returns tuple (DIVPipelineName,SEGName) from OPCITEM_ID PH
# """
# df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['OPCITEM_ID']==OPCITEM_ID]
# if not df.empty:
# return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def fGetBaseIDFromResID(
ID='Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.MW.value'
):
"""
Returns 'Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.'
funktioniert im Prinzip fuer SEG- und Druck-Ergs: jede Erg-PV eines Vektors liefert die Basis gueltig fuer alle Erg-PVs des Vektors
d.h. die Erg-PVs eines Vektors unterscheiden sich nur hinten
siehe auch fGetSEGBaseIDFromSEGName
"""
if pd.isnull(ID):
return None
m=re.search(Lx.pID,ID)
if m == None:
return None
try:
base=m.group('A')+'.'+m.group('B')\
+'.'+m.group('C1')\
+'_'+m.group('C2')\
+'_'+m.group('C3')\
+'_'+m.group('C4')\
+'_'+m.group('C5')\
+m.group('C6')
#print(m.groups())
#print(m.groupdict())
if 'C7' in m.groupdict().keys():
if m.group('C7') != None:
base=base+m.group('C7')
base=base+'.'+m.group('D')\
+'.'
#print(base)
except:
base=m.group(0)+' (Fehler in fGetBaseIDFromResID)'
return base
def fGetSEGBaseIDFromSEGName(
SEGName='6_AAD_41_OHV1'
):
"""
Returns 'Objects.3S_FBG_SEG_INFO.3S_L_'+SEGName+'.In.'
In some cases SEGName is manipulated ...
siehe auch fGetBaseIDFromResID
"""
if SEGName == '6_AAD_41_OHV1':
x='6_AAD_41_OHN'
elif SEGName == '6_OHV2_41_NGD':
x='6_OHN_41_NGD'
else:
x=SEGName
return 'Objects.3S_FBG_SEG_INFO.3S_L_'+x+'.In.'
def getNamesFromSEGResIDBase(dfSegsNodesNDataDpkt
,SEGResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName) from SEGResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==SEGResIDBase]
if not df.empty:
return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt
,DruckResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) from DruckResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase']==DruckResIDBase]
if not df.empty:
#return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0],df['SEGResIDBase'].iloc[0])
tupleLst=[]
for index,row in df.iterrows():
tupleItem=(row['DIVPipelineName'],row['SEGName'],row['SEGResIDBase'],row['SEGOnlyInLDSPara'])
tupleLst.append(tupleItem)
return tupleLst
else:
return []
def fGetErgIDsFromBaseID(
baseID='Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_BUA.In.'
,dfODI=pd.DataFrame() # df mit ODI Parametrierungsdaten
,strSep=' '
,patternPat='^IMDI.' #
,pattern=True # nur ergIDs, fuer die 2ndPatternPat zutrifft liefern
):
"""
returns string
mit strSep getrennten IDs aus dfODI, welche baseID enthalten (und bei pattern WAHR patternPat matchen)
baseID (und group(0) von patternPat bei pattern WAHR) sind in den IDs entfernt
"""
if baseID in [None,'']:
return None
df=dfODI[dfODI.index.str.contains(baseID)]
if df.empty:
return None
if pattern:
ergIDs=''.join([e.replace(baseID,'').replace(re.search(patternPat,e).group(0),'')+' ' for e in df.index if re.search(patternPat,e) != None])
else:
ergIDs=''.join([e.replace(baseID,'')+' ' for e in df.index if re.search(patternPat,e) == None])
return ergIDs
def dfSegsNodesNDataDpkt(
VersionsDir=r"C:\3s\Projekte\Projekt\04 - Versionen\Version82.3"
,Model=r"MDBDOC\FBG.mdb" # a Access Model
,am=None # a Access Model already processed
,SEGsDefPattern='(?P<SEG_Ki>\S+)~(?P<SEG_Kk>\S+)$' # RSLW-Beschreibung: liefert die Knotennamen der Segmentdefinition ()
,RIDefPattern='(?P<Prae>\S+)\.(?P<Post>RICHT.S)$' # SWVT-Beschreibung (RICHT-DP): liefert u.a. SEGName
,fSEGNameFromPV_2=fSEGNameFromPV_2 # Funktion, die von SWVT-Beschreibung (RICHT-DP) u.a. SEGName liefert
,fGetBaseIDFromResID=fGetBaseIDFromResID # Funktion, die von OPCITEM-ID des PH-Kanals eines KNOTens den Wortstamm der Knotenergebnisse liefert
,fGetSEGBaseIDFromSEGName=fGetSEGBaseIDFromSEGName # Funktion, die aus SEGName den Wortstamm der Segmentergebnisse liefert
,LDSPara=r"App LDS\Modelle\WDFBG\B1\V0\BZ1\LDS_Para.xml"
,LDSParaPT=r"App LDS\SirOPC\AppLDS_DPDTParams.csv"
,ODI=r"App LDS\SirOPC\AppLDS_ODI.csv"
,LDSParameter=LDSParameter
,LDSParameterDataD=LDSParameterDataD
):
"""
alle Segmente mit Pfaddaten (Kantenzuege) mit Kanten- und Knotendaten sowie Parametrierungsdaten
returns df:
DIVPipelineName
SEGName
SEGNodes (Ki~Kk; Schluessel in LDSPara)
SEGOnlyInLDSPara
NODEsRef
NODEsRef_max
NODEsSEGLfdNr
NODEsSEGLfdNrType
NODEsName
OBJTYPE
ZKOR
Blockname
ATTRTYPE (PH)
CLIENT_ID
OPCITEM_ID
NAME (der DPKT-Gruppe)
DruckResIDBase
SEGResIDBase
SEGResIDs
SEGResIDsIMDI
DruckResIDs
DruckResIDsIMDI
NODEsSEGDruckErgLfdNr
# LDSPara
ACC_SLOWTRANSIENT
ACC_TRANSIENT
DESIGNFLOW
DT
FILTERWINDOW
L_PERCENT_STDY
L_PERCENT_STRAN
L_PERCENT_TRANS
L_SHUTOFF
L_SLOWTRANSIENT
L_SLOWTRANSIENTQP
L_STANDSTILL
L_STANDSTILLQP
L_TRANSIENT
L_TRANSIENTPDNTF
L_TRANSIENTQP
L_TRANSIENTVBIGF
MEAN
ORDER
TIMER
TIMERTOLISS
TIMERTOLIST
TTIMERTOALARM
# LDSParaPT
#ID
pMin
DT_Vorhaltemass
TTimer_PMin
Faktor_PMin
MaxL_PMin
pMinMlc
pMinMlcMinSEG
pMinMlcMaxSEG
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfSegsNodesNDataDpkt=pd.DataFrame()
try:
###### --- LDSPara
LDSParaFile=os.path.join(VersionsDir,LDSPara)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSPara',LDSPara))
with open(LDSParaFile) as f:
xml = f.read()
xmlWellFormed='<root>'+xml+'</root>'
root=ET.fromstring(xmlWellFormed)
LDSParameterData={}
for key in LDSParameterDataD.keys():
LDSParameterData[key]=[]
logger.debug("{:s}LDSParameter: {!s:s}.".format(logStr,LDSParameter))
for idx,element in enumerate(root.iter(tag='LDSI')):
attribKeysMute=[]
for key,value in element.attrib.items():
if key not in LDSParameter:
logger.warning("{:s}{:s}: Parameter: {:s} undefiniert.".format(logStr,element.attrib['NAME'],key))
attribKeysMute.append(key)
keysIst=element.attrib.keys()
keysSoll=set(LDSParameter)
keysExplizitFehlend=keysSoll-keysIst
LDSIParaDct=element.attrib
for key in keysExplizitFehlend:
if key=='ORDER':
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
elif key=='TTIMERTOALARM':
LDSIParaDct[key]=int(LDSIParaDct['TIMER'])/4
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
else:
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
keyListToProcess=[key for key in LDSIParaDct.keys() if key not in attribKeysMute]
for key in keyListToProcess:
LDSParameterData[key].append(LDSIParaDct[key])
df=pd.DataFrame.from_dict(LDSParameterData)
df=df.set_index('NAME').sort_index()
df.index.rename('SEGMENT', inplace=True)
df=df[sorted(df.columns.to_list())]
df = df.apply(pd.to_numeric)
#logger.debug("{:s}df: {:s}".format(logStr,df.to_string()))
logger.debug("{:s}Parameter, die nicht auf Standardwerten sind:".format(logStr))
for index, row in df.iterrows():
for colName, colValue in zip(df.columns.to_list(),row):
if colValue != LDSParameterDataD[colName]:
logger.debug("Segment: {:30s}: Parameter: {:20s} Wert: {:10s} (Standard: {:s})".format(index,colName,str(colValue),str(LDSParameterDataD[colName])))
dfPara=df
# --- Einlesen Modell
if am == None:
accFile=os.path.join(VersionsDir,Model)
logger.info("{:s}###### {:10s}: {:s}: Lesen und verarbeiten ...".format(logStr,'Modell',Model))
am=Am.Am(accFile=accFile)
V_BVZ_RSLW=am.dataFrames['V_BVZ_RSLW']
V_BVZ_SWVT=am.dataFrames['V_BVZ_SWVT']
V3_KNOT=am.dataFrames['V3_KNOT']
V3_VBEL=am.dataFrames['V3_VBEL']
V3_DPKT=am.dataFrames['V3_DPKT']
V3_RSLW_SWVT=am.dataFrames['V3_RSLW_SWVT']
# --- Segmente ermitteln
# --- per Modell
SEGsDefinesPerRICHT=V3_RSLW_SWVT[
(V3_RSLW_SWVT['BESCHREIBUNG'].str.match(SEGsDefPattern).isin([True])) # Muster Ki~Kk ...
& #!
(V3_RSLW_SWVT['BESCHREIBUNG_SWVT'].str.match(RIDefPattern).isin([True])) # Muster Förderrichtungs-PV ...
].copy(deep=True)
SEGsDefinesPerRICHT=SEGsDefinesPerRICHT[['BESCHREIBUNG','BESCHREIBUNG_SWVT']]
# --- nur per LDS Para
lSEGOnlyInLDSPara=[str(SEGNodes) for SEGNodes in dfPara.index if str(SEGNodes) not in SEGsDefinesPerRICHT['BESCHREIBUNG'].values]
for SEGNodes in lSEGOnlyInLDSPara:
logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
# --- zusammenfassen
SEGsDefines=pd.concat([SEGsDefinesPerRICHT,pd.DataFrame(lSEGOnlyInLDSPara,columns=['BESCHREIBUNG'])])
# Knotennamen der SEGDef ergänzen
df=SEGsDefines['BESCHREIBUNG'].str.extract(SEGsDefPattern,expand=True)
dfCols=df.columns.to_list()
SEGsDefines=pd.concat([SEGsDefines,df],axis=1)
# ausduennen
SEGsDefines=SEGsDefines[dfCols+['BESCHREIBUNG_SWVT','BESCHREIBUNG']]
# sortieren
SEGsDefines=SEGsDefines.sort_values(by=['BESCHREIBUNG_SWVT','BESCHREIBUNG']).reset_index(drop=True)
# SEGName
SEGsDefines['BESCHREIBUNG_SWVT']=SEGsDefines.apply(lambda row: row['BESCHREIBUNG_SWVT'] if not pd.isnull(row['BESCHREIBUNG_SWVT']) else row['BESCHREIBUNG'] ,axis=1)
#print(SEGsDefines)
SEGsDefines['SEGName']=SEGsDefines['BESCHREIBUNG_SWVT'].apply(lambda x: fSEGNameFromPV_2(x))
# --- Segmentkantenzuege ermitteln
dfSegsNodeLst={} # nur zu Kontrollzwecken
dfSegsNode=[]
for index,row in SEGsDefines[~SEGsDefines[dfCols[-1]].isnull()].iterrows():
df=Xm.Xm.constructShortestPathFromNodeList(df=V3_VBEL.reset_index()
,sourceCol='NAME_i'
,targetCol='NAME_k'
,nl=[row[dfCols[0]],row[dfCols[-1]]]
,weight=None,query=None,fmask=None,filterNonQ0Rows=True)
s=pd.concat([pd.Series([row[dfCols[0]]]),df['nextNODE']])
s.name=row['SEGName']
dfSegsNodeLst[row['SEGName']]=s.reset_index(drop=True)
df2=pd.DataFrame(s.reset_index(drop=True)).rename(columns={s.name:'NODEs'})
df2['SEGName']=s.name
df2=df2[['SEGName','NODEs']]
sObj=pd.concat([pd.Series(['None']),df['OBJTYPE']])
sObj.name='OBJTYPE'
df3=pd.concat([df2,pd.DataFrame(sObj.reset_index(drop=True))],axis=1)
df4=df3.reset_index().rename(columns={'index':'NODEsLfdNr','NODEs':'NODEsName'})[['SEGName','NODEsLfdNr','NODEsName','OBJTYPE']]
df4['NODEsType']=df4.apply(lambda row: row['NODEsLfdNr'] if row['NODEsLfdNr'] < df4.index[-1] else -1, axis=1)
df4=df4[['SEGName','NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
df4['SEGNodes']=row[dfCols[0]]+'~'+row[dfCols[-1]]
dfSegsNode.append(df4)
dfSegsNodes=pd.concat(dfSegsNode).reset_index(drop=True)
# ---
dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes['NODEsRef']=dfSegsNodes.sort_values(
by=['NODEsName','SEGOnlyInLDSPara','NODEsType','SEGName']
,ascending=[True,True,False,True]).groupby(['NODEsName']).cumcount() + 1
dfSegsNodes=pd.merge(dfSegsNodes,dfSegsNodes.groupby(['NODEsName']).max(),left_on='NODEsName',right_index=True,suffixes=('','_max'))
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
dfSegsNodes=dfSegsNodes.rename(columns={'NODEsLfdNr':'NODEsSEGLfdNr','NODEsType':'NODEsSEGLfdNrType'})
### # ---
### dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsSEGLfdNr','NODEsSEGLfdNrType','NODEsName','OBJTYPE']]
# --- Knotendaten ergaenzen
dfSegsNodesNData=pd.merge(dfSegsNodes,V3_KNOT, left_on='NODEsName',right_on='NAME',suffixes=('','KNOT'))
dfSegsNodesNData=dfSegsNodesNData.filter(items=dfSegsNodes.columns.to_list()+['ZKOR','NAME_CONT','NAME_VKNO','pk'])
dfSegsNodesNData=dfSegsNodesNData.rename(columns={'NAME_CONT':'Blockname','NAME_VKNO':'Bl.Kn. fuer Block'})
# --- Knotendatenpunktdaten ergänzen
V3_DPKT_KNOT=pd.merge(V3_DPKT,V3_KNOT,left_on='fkOBJTYPE',right_on='pk',suffixes=('','_KNOT'))
V3_DPKT_KNOT_PH=V3_DPKT_KNOT[V3_DPKT_KNOT['ATTRTYPE'].isin(['PH'])]
# Mehrfacheintraege sollte es nicht geben ...
# V3_DPKT_KNOT_PH[V3_DPKT_KNOT_PH.duplicated(subset=['fkOBJTYPE'])]
df=pd.merge(dfSegsNodesNData,V3_DPKT_KNOT_PH,left_on='pk',right_on='fkOBJTYPE',suffixes=('','_DPKT'),how='left')
cols=dfSegsNodesNData.columns.to_list()
cols.remove('pk')
df=df.filter(items=cols+['ATTRTYPE','CLIENT_ID','OPCITEM_ID','NAME'])
dfSegsNodesNDataDpkt=df
#dfSegsNodesNDataDpkt
# ---
colList=dfSegsNodesNDataDpkt.columns.to_list()
dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fDIVNameFromSEGName(x))
### dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.filter(items=['DIVPipelineName']+colList)
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.sort_values(by=['DIVPipelineName','SEGName','NODEsSEGLfdNr']).reset_index(drop=True)
dfSegsNodesNDataDpkt['DruckResIDBase']=dfSegsNodesNDataDpkt['OPCITEM_ID'].apply(lambda x: fGetBaseIDFromResID(x) )
dfSegsNodesNDataDpkt['SEGResIDBase']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fGetSEGBaseIDFromSEGName(x) )
###### --- ODI
ODIFile=os.path.join(VersionsDir,ODI)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'ODI',ODI))
dfODI=Lx.getDfFromODI(ODIFile)
dfSegsNodesNDataDpkt['SEGResIDs']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['SEGResIDsIMDI']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
dfSegsNodesNDataDpkt['DruckResIDs']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['DruckResIDsIMDI']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
# --- lfd. Nr. der Druckmessstelle im Segment ermitteln
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase'].notnull()].copy()
df['NODEsSEGDruckErgLfdNr']=df.groupby('SEGName').cumcount() + 1
df['NODEsSEGDruckErgLfdNr']=df['NODEsSEGDruckErgLfdNr'].astype(int)
cols=dfSegsNodesNDataDpkt.columns.to_list()
cols.append('NODEsSEGDruckErgLfdNr')
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt
,df
,left_index=True
,right_index=True
,how='left'
,suffixes=('','_df')
).filter(items=cols)
dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr']=dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr'].astype(int,errors='ignore')
# LDSPara ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfPara,left_on='SEGNodes',right_index=True,suffixes=('','_LDSPara'),how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
#for SEGNodes in [str(SEGNodes) for SEGNodes in df.index if str(SEGNodes) not in dfSegsNodesNDataDpkt['SEGNodes'].values]:
# logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
###### --- LDSParaPT
LDSParaPTFile=os.path.join(VersionsDir,LDSParaPT)
if os.path.exists(LDSParaPTFile):
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSParaPT',LDSParaPT))
dfDPDTParams=pd.read_csv(LDSParaPTFile,delimiter=';',error_bad_lines=False,warn_bad_lines=True)
dfMehrfach=dfDPDTParams.groupby(by='#ID').filter(lambda x: len(x) > 1)
rows,cols=dfMehrfach.shape
if rows > 0:
logger.warning("{:s}Mehrfachkonfigurationen:".format(logStr))
logger.warning("{:s}".format(dfMehrfach.to_string()))
dfDPDTParams=dfDPDTParams.groupby(by='#ID').first()
# LDSParaPT ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfDPDTParams,left_on='CLIENT_ID',right_on='#ID',how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfOhne=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID']].reset_index(drop=True)
rows,cols=dfOhne.shape
if rows > 0:
logger.debug("{:s}Druckmessstellen ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(dfOhne.to_string()))
dfSegsNodesNDataDpkt['pMinMlc']=dfSegsNodesNDataDpkt.apply(lambda row: row['ZKOR']+row['pMin']*100000/(794.*9.81),axis=1)
g=dfSegsNodesNDataDpkt.groupby(by='SEGName')
df=g.pMinMlc.agg(pMinMlcMinSEG=np.min,pMinMlcMaxSEG=np.max)
# pMinMlcMinSEG, pMinMlcMaxSEG ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,df,left_on='SEGName',right_index=True,how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
# Segmente ohne Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt.groupby(['SEGName']).first()
df=df[pd.isnull(df['pMinMlcMinSEG'])][['DIVPipelineName','SEGNodes']]
rows,cols=df.shape
if rows > 0:
logger.debug("{:s}ganze Segmente ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(df.to_string()))
# Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (~pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID','pMin']].reset_index(drop=True)
logger.debug("{:s}dfSegsNodesNDataDpkt: Mindestdrücke: {!s:s}".format(logStr,df.to_string()))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfSegsNodesNDataDpkt
def fResValidSeriesSTAT_S(x): # STAT_S
if pd.isnull(x)==False:
if x >=0:
return True
else:
return False
else:
return False
def fResValidSeriesSTAT_S601(x): # STAT_S
if pd.isnull(x)==False:
if x==601:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S(x,value=20): # AL_S
if pd.isnull(x)==False:
if x==value:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S10(x):
return fResValidSeriesAL_S(x,value=10)
def fResValidSeriesAL_S4(x):
return fResValidSeriesAL_S(x,value=4)
def fResValidSeriesAL_S3(x):
return fResValidSeriesAL_S(x,value=3)
ResChannelFunctions=[fResValidSeriesSTAT_S,fResValidSeriesAL_S,fResValidSeriesSTAT_S601]
ResChannelResultNames=['Zustaendig','Alarm','Stoerung']
ResChannelTypes=['STAT_S','AL_S','STAT_S']
# (fast) alle verfuegbaren Erg-Kanaele
ResChannelTypesAll=['AL_S','STAT_S','SB_S','MZ_AV','LR_AV','NG_AV','LP_AV','AC_AV','ACCST_AV','ACCTR_AV','ACF_AV','TIMER_AV','AM_AV','DNTD_AV','DNTP_AV','DPDT_AV'
,'DPDT_REF_AV'
,'DPDT_REF' # Workaround
,'QM_AV','ZHKNR_S']
baseColorsSchieber=[ # Schieberfarben
'g' # 1
,'b' # 2
,'m' # 3
,'r' # 4
,'c' # 5
# alle Basisfarben außer y gelb
,'tab:blue' # 6
,'tab:orange' # 7
,'tab:green' # 8
,'tab:red' # 9
,'tab:purple' # 10
,'tab:brown' # 11
,'tab:pink' # 12
,'gold' # 13
,'fuchsia' # 14
,'coral' # 15
]
markerDefSchieber=[ # Schiebersymobole
'^' # 0 Auf
,'v' # 1 Zu
,'>' # 2 Halt
# ab hier Zustaende
,'4' # 3 Laeuft
,'3' # 4 Laeuft nicht
,'P' # 5 Zust
,'1' # 6 Auf
,'2' # 7 Zu
,'+' # 8 Halt
,'x' # 9 Stoer
]
# --- Reports LDS: Funktionen und Hilfsfunktionen
# -----------------------------------------------
def getLDSResVecDf(
ResIDBase='ID.' # i.e. for Segs Objects.3S_XYZ_SEG_INFO.3S_L_6_EL1_39_TUD.In. / i.e. for Drks Objects.3S_XYZ_DRUCK.3S_6_EL1_39_PTI_02_E.In.
,LDSResBaseType='SEG' # or Druck
,lx=None
,timeStart=None,timeEnd=None
,ResChannelTypes=ResChannelTypesAll
,timeShiftPair=None
):
"""
returns a df: the specified LDSResChannels (AL_S, ...) for an ResIDBase
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfResVec=pd.DataFrame()
try:
# zu lesende IDs basierend auf ResIDBase bestimmen
ErgIDs=[ResIDBase+ext for ext in ResChannelTypes]
IMDIErgIDs=['IMDI.'+ID for ID in ErgIDs]
ErgIDsAll=[*ErgIDs,*IMDIErgIDs]
# Daten lesen von TC-H5s
dfFiltered=lx.getTCsFromH5s(timeStart=timeStart,timeEnd=timeEnd,LDSResOnly=True,LDSResColsSpecified=ErgIDsAll,LDSResTypeSpecified=LDSResBaseType,timeShiftPair=timeShiftPair)
# Spalten umbenennen
colDct={}
for col in dfFiltered.columns:
m=re.search(Lx.pID,col)
colDct[col]=m.group('E')
dfResVec=dfFiltered.rename(columns=colDct)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfResVec
def fGetResTimes(
ResIDBases=[] # Liste der Wortstaemme der Ergebnisvektoren
,df=pd.DataFrame() # TCsLDSRes...
,ResChannelTypes=ResChannelTypes # ['STAT_S','AL_S','STAT_S'] # Liste der Ergebnisvektoren Postfixe
,ResChannelFunctions=ResChannelFunctions # [fResValidSeriesSTAT_S,ResValidSeriesAL_S,fResValidSeriesSTAT_S601] # Liste der Ergebnisvektoren Funktionen
,ResChannelResultNames=ResChannelResultNames # ['Zustaendig','Alarm','Stoerung'] # Liste der key-Namen der Ergebnisse
,tdAllowed=pd.Timedelta('1 second') # erlaubte Zeitspanne zwischen geht und kommt (die beiden an diese Zeitspanne angrenzenden Zeitbereiche werden als 1 Zeit gewertet)
):
"""
Return: dct
key: ResIDBase
value: dct:
key: ResChannelResultName
Value: Liste mit Zeitpaaren (oder leere Liste)
"""
resTimesDct={}
for ResIDBase in ResIDBases:
tPairsDct={}
for idx,ext in enumerate(ResChannelTypes):
ID=ResIDBase+ext
if ext == 'AL_S':
debugOutput=True
else:
debugOutput=False
if ID in df:
#print("{:s} in Ergliste".format(ID))
tPairs=findAllTimeIntervallsSeries(
s=df[ID].dropna() #!
,fct=ResChannelFunctions[idx]
,tdAllowed=tdAllowed#pd.Timedelta('1 second')
,debugOutput=debugOutput
)
else:
#print("{:s} nicht in Ergliste".format(ID))
tPairs=[]
tPairsDct[ResChannelResultNames[idx]]=tPairs
resTimesDct[ResIDBase]=tPairsDct
return resTimesDct
def getAlarmStatistikData(
h5File='a.h5'
,dfSegsNodesNDataDpkt=pd.DataFrame()
,timeShiftPair=None # z.B. (1,'H') bei Replay
):
"""
Returns TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
TCsLDSRes1=pd.DataFrame()
TCsLDSRes2=pd.DataFrame()
try:
# "connect" to the App Logs
lx=Lx.AppLog(h5File=h5File)
if hasattr(lx, 'h5FileLDSRes'):
logger.error("{0:s}{1:s}".format(logStr,'In den TCs nur Res und nicht Res1 und Res2?!'))
raise RmError
# zu lesende Daten ermitteln
l=dfSegsNodesNDataDpkt['DruckResIDBase'].unique()
l = l[~pd.isnull(l)]
DruckErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
#
l=dfSegsNodesNDataDpkt['SEGResIDBase'].unique()
l = l[~pd.isnull(l)]
SEGErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
ErgIDs=[*DruckErgIDs,*SEGErgIDs]
# Daten lesen
TCsLDSRes1,TCsLDSRes2=lx.getTCsFromH5s(LDSResOnly=True,LDSResColsSpecified=ErgIDs,timeShiftPair=timeShiftPair)
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
dfCVDataOnly=lx.getCVDFromH5(timeDelta=timeDelta,returnDfCVDataOnly=True)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
def processAlarmStatistikData(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,tdAllowed=None # pd.Timedelta('1 second')
# Alarm geht ... Alarm kommt (wieder): wenn Zeitspanne ... <= tdAllowed, dann wird dies _gewertet als dieselbe Alarmzeitspanne
# d.h. es handelt sich _gewertet inhaltlich um denselben Alarm
# None zählt die Alarme strikt getrennt
):
"""
Returns: SEGResDct,DruckResDct
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.<KEY>.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: <KEY>S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Zeiten SEGErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['SEGResIDBase'].unique() if not pd.isnull(baseID)]
SEGResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes1,tdAllowed=tdAllowed)
logger.debug("{:s}SEGResDct: {!s:s}".format(logStr,SEGResDct))
# Zeiten DruckErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['DruckResIDBase'].unique() if not pd.isnull(baseID)]
DruckResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes2,tdAllowed=tdAllowed)
logger.debug("{:s}DruckResDct: {!s:s}".format(logStr,DruckResDct))
# verschiedene Auspraegungen pro Alarmzeit ermitteln
for ResDct, ResSrc, LDSResBaseType in zip([SEGResDct, DruckResDct],[TCsLDSRes1,TCsLDSRes2],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
for keyStr, colExt in zip(['AL_S_SB_S','<KEY>'],['SB_S','ZHKNR_S']):
lGes=[]
if tPairs != []:
for tPair in tPairs:
col=ID+colExt
lSingle=ResSrc.loc[tPair[0]:tPair[1],col]
lSingle=[int(x) for x in lSingle if pd.isnull(x)==False]
lSingle=[lSingle[0]]+[lSingle[i] for i in range(1,len(lSingle)) if lSingle[i]!=lSingle[i-1]]
lGes.append(lSingle)
IDDct[keyStr]=lGes
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def addNrToAlarmStatistikData(
SEGResDct={}
,DruckResDct={}
,dfAlarmEreignisse=pd.DataFrame()
):
"""
Returns: SEGResDct,DruckResDct added with key AL_S_NR
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_SPV.In.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: AL_S_ZHKNR_S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
# ergänzt:
key: AL_S_NR: value: Liste mit der Nr. (aus dfAlarmEreignisse) pro Alarm (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
#
for ResDct, LDSResBaseType in zip([SEGResDct, DruckResDct],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
lNr=[]
if tPairs != []:
ZHKNRnListen=IDDct['AL_S_ZHKNR_S']
for idxAlarm,tPair in enumerate(tPairs):
ZHKNRnListe=ZHKNRnListen[idxAlarm]
ZHKNR=ZHKNRnListe[0]
ae=AlarmEvent(tPair[0],tPair[1],ZHKNR,LDSResBaseType)
Nr=dfAlarmEreignisse[dfAlarmEreignisse['AlarmEvent']==ae]['Nr'].iloc[0]
lNr.append(Nr)
IDDct['AL_S_NR']=lNr
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def processAlarmStatistikData2(
DruckResDct=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
):
"""
Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
Returns: SEGDruckResDct
ResDct:
key: baseID
value: dct
sortiert und direkt angrenzende oder gar ueberlappende Zeiten aus Druckergebnissen zusammenfasst
key: Zustaendig: value: Zeitbereiche, in denen ein Druckergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen ein Druckergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen ein Druckergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
voneiander verschiedene Ausprägungen (sortiert) aus Druckergebnissen
key: AL_S_SB_S: Liste
key: AL_S_ZHKNR_S: Liste
key: AL_S_NR: Liste
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
SEGDruckResDct={}
# merken, ob eine ID bereits bei einem SEG gezählt wurde; die Alarme einer ID sollen nur bei einem SEG gezaehlt werden
IDBereitsGezaehlt={}
# über alle DruckErgs
for idx,(ID,tPairsDct) in enumerate(DruckResDct.items()):
# SEG ermitteln
# ein DruckErg kann zu mehreren SEGs gehoeren z.B. gehoert ein Verzweigungsknoten i.d.R. zu 3 versch. SEGs
tupleLst=getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt,ID)
for idxTuple,(DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) in enumerate(tupleLst):
# wenn internes SEG
if SEGOnlyInLDSPara:
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt da dieses SEG intern.".format(logStr,ID,SEGName))
continue
# ID wurde bereits gezählt
if ID in IDBereitsGezaehlt.keys():
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern wurde bereits bei SEGName {:s} gezaehlt.".format(logStr,ID,SEGName,IDBereitsGezaehlt[ID]))
continue
else:
# ID wurde noch nicht gezaehlt
IDBereitsGezaehlt[ID]=SEGName
#if idxTuple>0:
# logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern nur bei SEGName {:s}.".format(logStr,ID,SEGName,tupleLst[0][1]))
# continue
if len(tPairsDct['Alarm'])>0:
logger.debug("{:s}SEGName {:20s}: durch ID {:40s} mit Alarm. Nr des Verweises von ID auf ein Segment: {:d}".format(logStr,SEGName,ID, idxTuple+1))
if SEGResIDBase not in SEGDruckResDct.keys():
# auf dieses SEG wurde noch nie verwiesen
SEGDruckResDct[SEGResIDBase]=deepcopy(tPairsDct) # das Segment erhält die Ergebnisse des ersten Druckvektors der zum Segment gehört
else:
# ergaenzen
# Zeitlisten ergänzen
for idx2,ext in enumerate(ResChannelTypes):
tPairs=tPairsDct[ResChannelResultNames[idx2]]
for idx3,tPair in enumerate(tPairs):
if True: #tPair not in SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]]: # keine identischen Zeiten mehrfach zaehlen
# die Ueberlappung von Zeiten wird weiter unten behandelt
SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]].append(tPair)
# weitere Listen ergaenzen
for ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']:
SEGDruckResDct[SEGResIDBase][ext]=SEGDruckResDct[SEGResIDBase][ext]+tPairsDct[ext]
# Ergebnis: sortieren und dann direkt angrenzende oder gar ueberlappende Zeiten zusammenfassen
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']: # keine Zeiten
pass
else:
tPairs=tPairsDct[ResChannelResultNames[idx2]]
tPairs=sorted(tPairs,key=lambda tup: tup[0])
tPairs=fCombineSubsequenttPairs(tPairs)
SEGDruckResDct[ID][ResChannelResultNames[idx2]]=tPairs
# voneiander verschiedene Ausprägungen (sortiert)
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
v=tPairsDct[ext]
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S']: # Liste von Listen
l=[*{*chain.from_iterable(v)}]
l=sorted(pd.unique(l))
SEGDruckResDct[ID][ext]=l
elif ext in ['AL_S_NR']: # Liste
l=sorted(pd.unique(v))
SEGDruckResDct[ID][ext]=l
else:
pass
logger.debug("{:s}SEGDruckResDct: {!s:s}".format(logStr,SEGDruckResDct))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGDruckResDct
def buildAlarmDataframes(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,dfCVDataOnly=pd.DataFrame()
,SEGResDct={}
,DruckResDct={}
,replaceTup=('2021-','')
,NrBy=['LDSResBaseType','SEGName','Ort','tA','ZHKNR']
,NrAsc=[False]+4*[True]
):
"""
Returns dfAlarmStatistik,dfAlarmEreignisse,SEGDruckResDct
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfAlarmStatistik=pd.DataFrame()
dfAlarmEreignisse=pd.DataFrame()
try:
# Ereignisse
dfAlarmEreignisse=buildDfAlarmEreignisse(
SEGResDct=SEGResDct
,DruckResDct=DruckResDct
,TCsLDSRes1=TCsLDSRes1
,TCsLDSRes2=TCsLDSRes2
,dfCVDataOnly=dfCVDataOnly
,dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt
,replaceTup=replaceTup
,NrBy=NrBy
,NrAsc=NrAsc
)
# in dfAlarmEreignisse erzeugte Alarm-Nr. an Dct merken
SEGResDct,DruckResDct=addNrToAlarmStatistikData(
SEGResDct
,DruckResDct
,dfAlarmEreignisse
)
# BZKat der Alarme
def fGetAlarmKat(row):
"""
"""
# baseID des Alarms
baseID=row['OrteIDs'][0]
# dct des Alarms
if row['LDSResBaseType']=='SEG':
dct=SEGResDct[baseID]
else:
dct=DruckResDct[baseID]
# Nrn der baseID
Nrn=dct['AL_S_NR']
# idx dieses Alarms innerhalb der Alarme der baseID
idxAl=Nrn.index(row['Nr'])
# Zustaende dieses alarms
SB_S=dct['AL_S_SB_S'][idxAl]
kat=''
if 3 in SB_S:
kat='instationär'
else:
if 2 in SB_S:
kat = 'schw. instationär'
else:
if 1 in SB_S:
kat = 'stat. Fluss'
elif 4 in SB_S:
kat = 'stat. Ruhe'
return kat
dfAlarmEreignisse['BZKat']=dfAlarmEreignisse.apply(lambda row: fGetAlarmKat(row),axis=1)
# Segment-verdichtete Druckergebnisse
SEGDruckResDct=processAlarmStatistikData2(
DruckResDct
,TCsLDSRes2
,dfSegsNodesNDataDpkt
)
# Alarmstatistik bilden
dfAlarmStatistik=dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]
dfAlarmStatistik=dfAlarmStatistik[['DIVPipelineName','SEGName','SEGNodes','SEGResIDBase']].drop_duplicates(keep='first').reset_index(drop=True)
dfAlarmStatistik['Nr']=dfAlarmStatistik.apply(lambda row: "{:2d}".format(int(row.name)),axis=1)
# SEG
dfAlarmStatistik['FörderZeiten']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Zustaendig'])
dfAlarmStatistik['FörderZeitenAl']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Alarm'])
dfAlarmStatistik['FörderZeitenSt']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Stoerung'])
dfAlarmStatistik['FörderZeitenAlAnz']=dfAlarmStatistik['FörderZeitenAl'].apply(lambda x: len(x))
dfAlarmStatistik['FörderZeitenAlSbs']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['AL_S_SB_S'])
dfAlarmStatistik['FörderZeitenAlNrn']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['AL_S_NR'])
# Druck (SEG-verdichtet)
dfAlarmStatistik['RuheZeiten']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Zustaendig'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAl']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Alarm'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenSt']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Stoerung'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAlSbs']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['AL_S_SB_S'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAlNrn']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['AL_S_NR'] if x in SEGDruckResDct.keys() else [])
#dfAlarmStatistik['RuheZeitenAlAnz']=dfAlarmStatistik['RuheZeitenAl'].apply(lambda x: len(x))
dfAlarmStatistik['RuheZeitenAlAnz']=dfAlarmStatistik['RuheZeitenAlNrn'].apply(lambda x: len(x))
# je 3 Zeiten bearbeitet
dfAlarmStatistik['FörderZeit']=dfAlarmStatistik['FörderZeiten'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeit']=dfAlarmStatistik['RuheZeiten'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['FörderZeitAl']=dfAlarmStatistik['FörderZeitenAl'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeitAl']=dfAlarmStatistik['RuheZeitenAl'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['FörderZeitSt']=dfAlarmStatistik['FörderZeitenSt'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeitSt']=dfAlarmStatistik['RuheZeitenSt'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse, dfAlarmStatistik,SEGDruckResDct
def plotDfAlarmStatistik(
dfAlarmStatistik=pd.DataFrame()
):
"""
Returns the plt.table
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
df=dfAlarmStatistik[[
'Nr'
,'DIVPipelineName'
,'SEGName'
,'FörderZeit'
,'FörderZeitenAlAnz'
,'FörderZeitAl'
,'FörderZeitSt'
,'RuheZeit'
,'RuheZeitenAlAnz'
,'RuheZeitAl'
,'RuheZeitSt'
]].copy()
# diese Zeiten um (Störzeiten) annotieren
df['FörderZeit']=df.apply(lambda row: "{!s:s} ({!s:s})".format(row['FörderZeit'],row['FörderZeitSt']) if row['FörderZeitSt'] > 0. else row['FörderZeit'] ,axis=1)
df['RuheZeit']=df.apply(lambda row: "{!s:s} ({!s:s})".format(row['RuheZeit'],row['RuheZeitSt']) if row['RuheZeitSt'] > 0. else row['RuheZeit'],axis=1)
# LfdNr. annotieren
df['LfdNr']=df.apply(lambda row: "{:2d} - {:s}".format(int(row.Nr)+1,str(row.DIVPipelineName)),axis=1)
# Zeiten Alarm um Alarm-Nrn annotieren
def fAddZeitMitNrn(zeit,lAlNr):
if len(lAlNr) > 0:
if len(lAlNr) <= 3:
return "{!s:s} (Nrn.: {!s:s})".format(zeit,lAlNr)
else:
# mehr als 3 Alarme...
return "{!s:s} (Nrn.: {!s:s}, ...)".format(zeit,lAlNr[0])
else:
return zeit
df['FörderZeitAl']=dfAlarmStatistik.apply(lambda row: fAddZeitMitNrn(row['FörderZeitAl'],row['FörderZeitenAlNrn']),axis=1)
df['RuheZeitAl']=dfAlarmStatistik.apply(lambda row: fAddZeitMitNrn(row['RuheZeitAl'],row['RuheZeitenAlNrn']),axis=1)
df=df[[
'LfdNr'
,'SEGName'
,'FörderZeit'
,'FörderZeitenAlAnz'
,'FörderZeitAl'
,'RuheZeit'
,'RuheZeitenAlAnz'
,'RuheZeitAl'
]]
try:
t=plt.table(cellText=df.values, colLabels=df.columns, loc='center')
cols=df.columns.to_list()
colIdxLfdNr=cols.index('LfdNr')
colIdxFoerderZeit=cols.index('FörderZeit')
colIdxFoerderZeitenAlAnz=cols.index('FörderZeitenAlAnz')
colIdxFoerderZeitAl=cols.index('FörderZeitAl')
colIdxRuheZeit=cols.index('RuheZeit')
colIdxRuheZeitenAlAnz=cols.index('RuheZeitenAlAnz')
colIdxRuheZeitAl=cols.index('RuheZeitAl')
cells = t.properties()["celld"]
for cellTup,cellObj in cells.items():
cellObj.set_text_props(ha='left')
row,col=cellTup # row: 0 fuer Ueberschrift bei Ueberschrift; col mit 0
if row == 0:
if col in [colIdxRuheZeit,colIdxRuheZeitenAlAnz,colIdxRuheZeitAl]:
pass
cellObj.set_text_props(backgroundcolor='plum')
elif col in [colIdxFoerderZeit,colIdxFoerderZeitenAlAnz,colIdxFoerderZeitAl]:
pass
cellObj.set_text_props(backgroundcolor='lightsteelblue')
if col == colIdxLfdNr:
if row==0:
continue
if 'color' in dfAlarmStatistik.columns.to_list():
color=dfAlarmStatistik['color'].iloc[row-1]
cellObj.set_text_props(backgroundcolor=color)
if col == colIdxFoerderZeit:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'FörderZeit']==0:
pass
else:
if dfAlarmStatistik.loc[row-1,'FörderZeitSt']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
cellObj.set_text_props(backgroundcolor='goldenrod')
if col == colIdxFoerderZeitenAlAnz:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'FörderZeit']==0:
cellObj.set_text_props(backgroundcolor='lightgrey')
else: # hat Förderzeit
if df.loc[row-1,'FörderZeitenAlAnz']==0:
cellObj.set_text_props(backgroundcolor='springgreen')
else:
cellObj.set_text_props(ha='center')
cellObj.set_text_props(backgroundcolor='navajowhite') # palegoldenrod
#if df.loc[row-1,'FörderZeitAl']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
if dfAlarmStatistik.loc[row-1,'FörderZeitAl']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
cellObj.set_text_props(backgroundcolor='tomato')
if col == colIdxRuheZeit:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'RuheZeit']==0:
pass
else:
if dfAlarmStatistik.loc[row-1,'RuheZeitSt']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
cellObj.set_text_props(backgroundcolor='goldenrod')
if col == colIdxRuheZeitenAlAnz:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'RuheZeit']==0:
cellObj.set_text_props(backgroundcolor='lightgrey')
else: # hat Ruhezeit
if df.loc[row-1,'RuheZeitenAlAnz']==0:
cellObj.set_text_props(backgroundcolor='springgreen')
else:
pass
cellObj.set_text_props(ha='center')
cellObj.set_text_props(backgroundcolor='navajowhite') # # palegoldenrod
#if df.loc[row-1,'RuheZeitAl']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
if dfAlarmStatistik.loc[row-1,'RuheZeitAl']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
cellObj.set_text_props(backgroundcolor='tomato')
plt.axis('off')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return t
def fOrteStripped(LDSResBaseType,OrteIDs):
"""
returns Orte stripped
"""
if LDSResBaseType == 'SEG': # 'Objects.3S_FBG_SEG_INFO.3S_L_6_MHV_02_FUD.In.']
orteStripped=[]
for OrtID in OrteIDs:
pass
m=re.search(Lx.pID,OrtID+'dummy')
ortStripped=m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_'+m.group('C6')
orteStripped.append(ortStripped)
return orteStripped
elif LDSResBaseType == 'Druck': # Objects.3S_FBG_DRUCK.3S_6_BNV_01_PTI_01.In
orteStripped=[]
for OrtID in OrteIDs:
pass
m=re.search(Lx.pID,OrtID+'dummy')
ortStripped=m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
orteStripped.append(ortStripped)
return orteStripped
else:
return None
def fCVDTime(row,dfSEG,dfDruck,replaceTup=('2021-','')):
"""
in:
dfSEG/dfDruck: TCsLDSRes1/TCsLDSRes2
row: Zeile aus dfAlarmEreignisse
von row verwendet:
LDSResBaseType: SEG (dfSEG) oder nicht (dfDruck)
OrteIDs: ==> ID von ZHKNR_S in dfSEG/dfDruck
ZHKNR: ZHKNR
returns:
string: xZeitA - ZeitEx
ZeitA: erste Zeit in der ZHKNR_S in dfSEG/dfDruck den Wert von ZHKNR trägt
ZeitE: letzte Zeit in der ZHKNR_S in dfSEG/dfDruck den Wert von ZHKNR trägt
xZeitA, wenn ZeitA die erste Zeit in dfSEG/dfDruck ist mit einem von Null verschiedenen Wert
xZeitE, wenn ZeitE die letzte Zeit in dfSEG/dfDruck ist mit einem von Null verschiedenen Wert
in Zeit wurde replaceTup angewendet
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
Time=""
ID=row['OrteIDs'][0]+'ZHKNR_S'
ZHKNR=row['ZHKNR']
if row['LDSResBaseType']=='SEG':
df=dfSEG
else:
df=dfDruck
s=df[df[ID]==ZHKNR][ID] # eine Spalte; Zeilen in denen ZHKNR_S den Wert von ZHKNR trägt
tA=s.index[0] # 1. Zeit
tE=s.index[-1] # letzte Zeit
Time=" {!s:s} - {!s:s} ".format(tA,tE)
try:
if tA==df[ID].dropna().index[0]:
Time='x'+Time.lstrip()
except:
logger.debug("{0:s}Time: {1:s}: x-tA Annotation Fehler; keine Annotation".format(logStr,Time))
try:
if tE==df[ID].dropna().index[-1]:
Time=Time.rstrip()+'x'
except:
logger.debug("{0:s}Time: {1:s}: x-tE Annotation Fehler; keine Annotation".format(logStr,Time))
Time=Time.replace(replaceTup[0],replaceTup[1])
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return Time
def buildDfAlarmEreignisse(
SEGResDct={}
,DruckResDct={}
,TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfCVDataOnly=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,replaceTup=('2021-','')
,NrBy=['LDSResBaseType','SEGName','Ort','tA','ZHKNR'] # Sortierspalten für die Nr. der Ereignisse
,NrAsc=[False]+4*[True] # aufsteigend j/n für die o.g. Sortierspalten
):
"""
Returns dfAlarmEreignisse:
Nr: lfd. Nr (gebildet gem. NrBy und NrAsc)
tA: Anfangszeit
tE: Endezeit
tD: Dauer des Alarms
ZHKNR: ZHKNR (die zeitlich 1., wenn der Alarm sich über mehrere ZHKNRn erstreckt)
tD_ZHKNR: Lebenszeit der ZHKNR; x-Annotationen am Anfang/Ende, wenn ZHK beginnt bei Res12-Anfang / andauert bei Res12-Ende; '-1', wenn Lebenszeit nicht ermittelt werden konnte
ZHKNRn: sortierte Liste der ZHKNRn des Alarms; eine davon ist ZHKNR; typischerweise die 1. der Liste
LDSResBaseType: SEG oder Druck
OrteIDs: OrteIDs des Alarms
Orte: Kurzform von OrteIDs des Alarms
Ort: der 1. Ort von Orte
SEGName: Segment zu dem der 1. Ort des Alarms gehört
DIVPipelineName:
Voralarm: ermittelter Vorlalarm des Alarms; -1, wenn kein Voralarm in Res12 gefunden werden konnte
Type: Typ des Kontrollraumns; z.B. p-p für vollständige Flussbilanzen; '', wenn kein Typ gefunden werden konnte
Name: Name des Bilanzraumes
NrSD: lfd. Nr Alarm BaseType
NrName: lfd. Nr Alarm Name
NrSEGName: lfd. Nr Alarm SEGName
AlarmEvent: AlarmEvent-Objekt
###BZKat: Betriebszustandskategorie des Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfAlarmEreignisse=pd.DataFrame()
try:
AlarmEvents=[] # Liste von AlarmEvent
AlarmEventsOrte={} # dct der Orte, die diesen (key) AlarmEvent melden
AlarmEventsZHKNRn={} # dct der ZHKNRn, die zu diesem (key) gehoeren
# über SEG- und Druck-Ergebnisvektoren
for ResDct, ResSrc, LDSResBaseType in zip([SEGResDct, DruckResDct],[TCsLDSRes1,TCsLDSRes2],['SEG','Druck']):
for ResIDBase,dct in ResDct.items():
AL_S=dct['Alarm']
if len(AL_S) > 0:
# eine Erg-ID weist Alarme [(tA,tE),...] auf
# korrespondiernede Liste der ZHKs: [(999,1111),...]
ZHKNRnListen=dct['AL_S_ZHKNR_S']
ID=ResIDBase+'ZHKNR_S' # fuer nachfolgende Ausgabe
# ueber alle Alarme der Erg-ID
for idx,AL_S_Timepair in enumerate(AL_S):
(t1,t2)=AL_S_Timepair # tA, tE
ZHKNR_S_Lst=ZHKNRnListen[idx] # Liste der ZHKs in dieser Zeit
if len(ZHKNR_S_Lst) != 1:
logger.warning(("{:s}ID:\n\t {:s}: Alarm {:d} der ID\n\t Zeit von {!s:s} bis {!s:s}:\n\t Anzahl verschiedener ZHKNRn !=1: {:d} {:s}:\n\t ZHKNR eines Alarms wechselt waehrend eines Alarms. Alarm wird identifiziert mit 1. ZHKNR.".format(logStr,ID
,idx
,t1
,t2
,len(ZHKNR_S_Lst)
,str(ZHKNR_S_Lst)
)))
# die erste wird verwendet
ZHKNR=int(ZHKNR_S_Lst[0])
# AlarmEvent erzeugen
alarmEvent=AlarmEvent(t1,t2,ZHKNR,LDSResBaseType)
if alarmEvent not in AlarmEvents:
# diesen Alarm gibt es noch nicht in der Ereignisliste ...
AlarmEvents.append(alarmEvent)
AlarmEventsOrte[alarmEvent]=[]
AlarmEventsZHKNRn[alarmEvent]=[]
else:
pass
# Ort ergaenzen (derselbe Alarm wird erst ab V83.5.3 nur an einem Ort - dem lexikalisch kleinsten des Bilanzraumes - ausgegeben; zuvor konnte derselbe Alarm an mehreren Orten auftreten)
AlarmEventsOrte[alarmEvent].append(ResIDBase)
# ZHKNR(n) ergaenzen (ein Alarm wird unter 1 ZHKNR geführt)
AlarmEventsZHKNRn[alarmEvent].append(ZHKNR_S_Lst)
# df erzeugen
dfAlarmEreignisse=pd.DataFrame.from_records(
[alarmEvent for alarmEvent in AlarmEvents],
columns=AlarmEvent._fields
)
# Liste der EventOrte erstellen, zuweisen
l=[]
for idx,alarmEvent in enumerate(AlarmEvents):
l.append(AlarmEventsOrte[alarmEvent])
dfAlarmEreignisse['OrteIDs']=l
# abgekuerzte Orte
dfAlarmEreignisse['Orte']=dfAlarmEreignisse.apply(lambda row: fOrteStripped(row.LDSResBaseType,row.OrteIDs),axis=1)
dfAlarmEreignisse['Ort']=dfAlarmEreignisse['Orte'].apply(lambda x: x[0])
# Liste der ZHKNRn erstellen, zuweisen
l=[]
for idx,alarmEvent in enumerate(AlarmEvents):
lOfZl=AlarmEventsZHKNRn[alarmEvent]
lOfZ=[*{*chain.from_iterable(lOfZl)}]
lOfZ=sorted(pd.unique(lOfZ))
l.append(lOfZ)
dfAlarmEreignisse['ZHKNRn']=l
# Segmentname eines Ereignisses
dfAlarmEreignisse['SEGName']=dfAlarmEreignisse.apply(lambda row:
dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==row['OrteIDs'][0]]['SEGName'].iloc[0] if row['LDSResBaseType']=='SEG'
else [tuple for tuple in getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt,row['OrteIDs'][0]) if not tuple[-1]][0][1],axis=1)
# DIVPipelineName eines Ereignisses
dfAlarmEreignisse['DIVPipelineName']=dfAlarmEreignisse.apply(lambda row:
dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGName']==row['SEGName']]['DIVPipelineName'].iloc[0]
,axis=1)
# Alarm: ---
#tA: Anfangszeit
#tE: Endezeit
#ZHKNR: ZHKNR (1. bei mehreren Alarmen)
#LDSResBaseType: SEG oder Druck
# Orte: ---
#OrteIDs: OrteIDs des Alarms
#Orte: Kurzform von OrteIDs des Alarms
#ZHKNRn:
#SEGName: Segmentname
#DIVPipelineName
## Nr.
dfAlarmEreignisse.sort_values(by=NrBy,ascending=NrAsc,inplace=True)
dfAlarmEreignisse['Nr']=dfAlarmEreignisse.index+1
#dfAlarmEreignisse['Nr']=dfAlarmEreignisse['Nr']+1
logger.debug("{0:s}{1:s}: {2:s}".format(logStr,'dfAlarmEreignisse',dfAlarmEreignisse.to_string()))
# Voralarm
VoralarmTypen=[]
for index, row in dfAlarmEreignisse.iterrows():
# zur Information bei Ausgaben
OrteIDs=row['OrteIDs']
OrtID=OrteIDs[0]
VoralarmTyp=None
try:
if row['LDSResBaseType']=='SEG':
VoralarmTyp=TCsLDSRes1.loc[:row['tA']-pd.Timedelta('1 second'),OrtID+'AL_S'].iloc[-1]
elif row['LDSResBaseType']=='Druck':
VoralarmTyp=TCsLDSRes2.loc[:row['tA']-pd.Timedelta('1 second'),OrtID+'AL_S'].iloc[-1]
except:
pass
if pd.isnull(VoralarmTyp): # == None: #?! - ggf. Nachfolger eines neutralen Bilanzraumwechsels
VoralarmTyp=-1
logger.warning("{:s}PV: {:40s} Alarm Nr. {:d} ZHKNR {:d}\n\t tA {!s:s}: kein (isnull) Vorlalarm gefunden?! (ggf. neutraler BRWechsel) - Voralarm gesetzt auf: {:d}".format(logStr
,row['OrteIDs'][0]
,int(row['Nr'])
,row['ZHKNR']
,row['tA'],int(VoralarmTyp)))
if int(VoralarmTyp)==0: # == 0: #?! - ggf. Nachfolger eines neutralen Bilanzraumwechsels
VoralarmTyp=0
logger.warning("{:s}PV: {:40s} Alarm Nr. {:d} ZHKNR {:d}\n\t tA {!s:s}: Vorlalarm 0?! (ggf. war Bilanz in Stoerung)".format(logStr
,row['OrteIDs'][0]
,int(row['Nr'])
,row['ZHKNR']
,row['tA']))
if int(VoralarmTyp) not in [-1,0,3,4,10]:
logger.warning("{:s}PV: {:s} Alarm Nr. {:d} {:d} tA {!s:s}: unbekannter Vorlalarm gefunden: {:d}".format(logStr,row['OrteIDs'][0],int(row['Nr']),row['ZHKNR'],row['tA'],int(VoralarmTyp)))
logger.debug("{:s}{:d} {!s:s} VoralarmTyp:{:d}".format(logStr,int(row['Nr']),row['tA'],int(VoralarmTyp)))
VoralarmTypen.append(VoralarmTyp)
dfAlarmEreignisse['Voralarm']=[int(x) for x in VoralarmTypen]
# Type (aus dfCVDataOnly) und Erzeugungszeit (aus dfCVDataOnly) und Name (aus dfCVDataOnly)
dfAlarmEreignisse['ZHKNR']=dfAlarmEreignisse['ZHKNR'].astype('int64')
dfAlarmEreignisse['ZHKNRStr']=dfAlarmEreignisse['ZHKNR'].astype('string')
dfCVDataOnly['ZHKNRStr']=dfCVDataOnly['ZHKNR'].astype('string')
# wg. aelteren App-Log Versionen in denen ZHKNR in dfCVDataOnly nicht ermittelt werden konnte
# Type,ScenTime,Name sind dann undefiniert
dfAlarmEreignisse=pd.merge(dfAlarmEreignisse,dfCVDataOnly,on='ZHKNRStr',suffixes=('','_CVD'),how='left').filter(items=dfAlarmEreignisse.columns.to_list()+['Type'
#,'ScenTime'
,'Name'])
dfAlarmEreignisse=dfAlarmEreignisse.drop(['ZHKNRStr'],axis=1)
dfAlarmEreignisse=dfAlarmEreignisse.fillna(value='')
# lfd. Nummern
dfAlarmEreignisse['NrSD']=dfAlarmEreignisse.groupby(['LDSResBaseType']).cumcount() + 1
dfAlarmEreignisse['NrName']=dfAlarmEreignisse.groupby(['Name']).cumcount() + 1
dfAlarmEreignisse['NrSEGName']=dfAlarmEreignisse.groupby(['SEGName']).cumcount() + 1
# Lebenszeit der ZHKNR
try:
dfAlarmEreignisse['tD_ZHKNR']=dfAlarmEreignisse.apply(lambda row: fCVDTime(row,TCsLDSRes1,TCsLDSRes2,replaceTup),axis=1)
except:
logger.debug("{:s}Spalte tD_ZHKNR (Lebenszeit einer ZHKNR) konnte nicht ermittelt werden. Vmtl. aeltere App-Log Version.".format(logStr))
dfAlarmEreignisse['tD_ZHKNR']='-1'
# Dauer des Alarms
dfAlarmEreignisse['tD']=dfAlarmEreignisse.apply(lambda row: row['tE']-row['tA'],axis=1)
dfAlarmEreignisse['tD']= dfAlarmEreignisse['tD'].apply(lambda x: "{!s:s}".format(x).replace('days','Tage').replace('0 Tage','').replace('Tage','T'))
# AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
dfAlarmEreignisse=dfAlarmEreignisse[['Nr','tA', 'tE','tD','ZHKNR','tD_ZHKNR','ZHKNRn','LDSResBaseType'
,'OrteIDs', 'Orte', 'Ort', 'SEGName','DIVPipelineName'
,'Voralarm', 'Type', 'Name'
,'NrSD', 'NrName', 'NrSEGName'
]]
dfAlarmEreignisse['AlarmEvent']=dfAlarmEreignisse.apply(lambda row: AlarmEvent(row['tA'],row['tE'],row['ZHKNR'],row['LDSResBaseType']),axis=1)
# unklar, warum erforderlich
dfAlarmEreignisse['Nr']=dfAlarmEreignisse.index+1
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse
def fCVDName(Name
):
"""
"""
lName=len(Name)
if len(Name)==0:
Name='ZHKName vmtl. nicht in Log'
lNameMaxH=20
if lName > 2*lNameMaxH:
Name=Name[:lNameMaxH-2]+'....'+Name[lName-lNameMaxH+2:]
Name=Name.replace('°','|')
return Name
def plotDfAlarmEreignisse(
dfAlarmEreignisse=pd.DataFrame()
,sortBy=[]
,replaceTup=('2021-','')
,replaceTuptD=('0 days','')
):
"""
Returns the plt.table
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
df=dfAlarmEreignisse[['Nr','LDSResBaseType','Voralarm','Type','NrSD','tA','tE','tD','ZHKNR','Name','Orte','tD_ZHKNR','NrName','NrSEGName','SEGName','BZKat']].copy()
df['tA']=df['tA'].apply(lambda x: str(x).replace(replaceTup[0],replaceTup[1]))
df['tE']=df['tE'].apply(lambda x: str(x).replace(replaceTup[0],replaceTup[1]))
###df['Anz']=df['Orte'].apply(lambda x: len(x))
##df['Orte']=df['Orte'].apply(lambda x: str(x).replace('[','').replace(']','').replace("'",""))
df['Orte']=df['Orte'].apply(lambda x: str(x[0]))
df['LDSResBaseType']=df.apply(lambda row: "{:s} {:s} - {:d}".format(row['LDSResBaseType'],row['Type'],row['Voralarm']),axis=1)
df=df[['Nr','LDSResBaseType','NrSD','tA','tE','tD','ZHKNR','Name','NrName','NrSEGName','SEGName','tD_ZHKNR','Orte','BZKat']]
df.rename(columns={'LDSResBaseType':'ResTyp - Voralarm'},inplace=True)
df.rename(columns={'tD_ZHKNR':'ZHKZeit','Name':'ZHKName'},inplace=True)
###df['ZHKName']=df['ZHKName'].apply(lambda x: fCVDName(x))
####df['ZHKName']=df['Orte'].apply(lambda x: x[0])
df['NrSEGName (SEGName)']=df.apply(lambda row: "{!s:2s} ({!s:s})".format(row['NrSEGName'],row['SEGName']),axis=1)
df=df[['Nr','ResTyp - Voralarm','NrSD','tA','tD','ZHKNR'
,'Orte' #'ZHKName'
,'BZKat'
,'NrName','NrSEGName (SEGName)','ZHKZeit']]
df.rename(columns={'Orte':'ID'},inplace=True)
df['tD']=df['tD'].apply(lambda x: str(x).replace(replaceTuptD[0],replaceTuptD[1]))
def fGetZHKNRStr(row,dfOrig):
"""
returns:
ZHKNStr in Abhängigkeit der aktuellen Zeile und dfOrig
"""
s=dfOrig[dfOrig['Nr']==row['Nr']].iloc[0]
if len(s.ZHKNRn)>1:
if len(s.ZHKNRn)==2:
return "{:d} ({!s:s})".format(row['ZHKNR'],s.ZHKNRn[1:])
else:
return "{:d} (+{:d})".format(row['ZHKNR'],len(s.ZHKNRn)-1)
else:
return "{:d}".format(row['ZHKNR'])
df['ZHKNR']=df.apply(lambda row: fGetZHKNRStr(row,dfAlarmEreignisse),axis=1)
if sortBy!=[]:
df=df.sort_values(by=sortBy)
t=plt.table(cellText=df.values, colLabels=df.columns
,colWidths=[.03,.1 # Nr ResTyp-Voralarm
,.04 # NrSD
,.08,.08 # tA tD
,.085 # ZHKNR
,.1125,.07 #.1125 # ID BZKat
,.04 # NrName
,.14 # NrSEGName (SEGName)
,.2125] # ZHKZeit
, cellLoc='left'
, loc='center')
t.auto_set_font_size(False)
t.set_fontsize(10)
cols=df.columns.to_list()
#colIdxOrte=cols.index('Orte')
#colIdxName=cols.index('ZHKName')
colIdxNrSD=cols.index('NrSD')
colIdxNrSEG=cols.index('NrSEGName (SEGName)')
# ResTyp - Voralarm
colIdxResTypVA=cols.index('ResTyp - Voralarm')
cells = t.properties()["celld"]
for cellTup,cellObj in cells.items():
cellObj.set_text_props(ha='left')
row,col=cellTup # row: 0 fuer Ueberschrift bei Ueberschrift; col mit 0
#if col == colIdxName:
# cellObj.set_text_props(ha='left')
if col == colIdxNrSD:
if row > 0:
if dfAlarmEreignisse.loc[row-1,'LDSResBaseType']=='SEG':
cellObj.set_text_props(backgroundcolor='lightsteelblue')
else:
cellObj.set_text_props(backgroundcolor='plum')
elif col == colIdxNrSEG:
if row==0:
continue
if 'color' in dfAlarmEreignisse.columns.to_list():
color=dfAlarmEreignisse['color'].iloc[row-1]
cellObj.set_text_props(backgroundcolor=color)
elif col == colIdxResTypVA and row > 0:
pass
if dfAlarmEreignisse.loc[row-1,'Voralarm'] in [10]:
cellObj.set_text_props(backgroundcolor='sandybrown')
elif dfAlarmEreignisse.loc[row-1,'Voralarm'] in [4]:
cellObj.set_text_props(backgroundcolor='pink')
elif dfAlarmEreignisse.loc[row-1,'Voralarm'] in [3]:
cellObj.set_text_props(backgroundcolor='lightcoral')
else:
pass
#cellObj.set_text_props(fontsize=16)
plt.axis('off')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return t
def plotDfAlarmStatistikReportsSEGErgs(
h5File='a.h5'
,dfAlarmStatistik=pd.DataFrame()
,SEGResDct={}
,timeStart=None,timeEnd=None
,SEGErgsFile='SEGErgs.pdf'
,stopAtSEGNr=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,byhour=[0,3,6,9,12,15,18,21]
,byminute=None
,bysecond=None
,timeFloorCeilStr=None #'1H' # Runden (1 Stunde)
,timeFloorCeilStrDetailPre='6T' # Runden (3 Minuten)
,timeFloorCeilStrDetailPost='3T'
,timeShiftPair=None
):
"""
Creates PDF for all SEGs with FörderZeitenAlAnz>0
1 Base Plot and Detail Plots for the Alarms
Creates corresponding Single-PNGs
Returns xlimsDct:
key: BaseID
value: list of Timepairs of the Detail Plots for the Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lx=Lx.AppLog(h5File=h5File)
firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal=lx.getTotalLogTime()
if timeStart==None:
if timeFloorCeilStr != None:
timeStart = firstTime.floor(freq=timeFloorCeilStr)
else:
timeStart = firstTime
if timeEnd==None:
if timeFloorCeilStr != None:
timeEnd = lastTime.ceil(freq=timeFloorCeilStr)
else:
timeEnd = lastTime
logger.debug("{0:s}timeStart (ohne timeShift): {1:s} timeEnd (ohne timeShift): {2:s}".format(logStr,str(timeStart),str(timeEnd)))
xlimsDct={}
pdf=PdfPages(SEGErgsFile)
(fileNameBase,ext)= os.path.splitext(SEGErgsFile)
if timeShiftPair != None:
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
else:
timeDelta=pd.Timedelta('0 Seconds')
idxSEGPlotted=0
for idx,(index,row) in enumerate(dfAlarmStatistik.iterrows()):
if stopAtSEGNr != None:
if idxSEGPlotted>=stopAtSEGNr:
break
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,row['SEGResIDBase'])
if row['FörderZeitenAlAnz']==0: # and row['RuheZeitenAlAnz']==0:
logger.info("{:s}: FörderZeitenAlAnz: 0".format(titleStr))
continue # keine SEGs ohne Alarme drucken
# Erg lesen
ResIDBase=row['SEGResIDBase']
dfSegReprVec=getLDSResVecDf(ResIDBase=ResIDBase,LDSResBaseType='SEG',lx=lx,timeStart=timeStart,timeEnd=timeEnd,timeShiftPair=timeShiftPair)
ID='AL_S'
if ID not in dfSegReprVec.keys():
continue
idxSEGPlotted=idxSEGPlotted+1
xlimsDct[ResIDBase]=[]
logger.debug("{:s}ResIDBase: {:s} dfSegReprVec: Spalten: {!s:s}".format(logStr,ResIDBase,dfSegReprVec.columns.to_list()))
# Plot Basis ###########################################################
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStart+timeDelta,timeEnd+timeDelta)
,dateFormat=dateFormat
,byhour=byhour
,byminute=byminute
,bysecond=bysecond
,plotLegend=True
)
backgroundcolor='white'
if row['FörderZeit']==0:
backgroundcolor='lightgrey'
else: # hat Förderzeit
if row['FörderZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['FörderZeitAl']/row['FörderZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: FörderZeit: {:8.2f} FörderZeitenAlAnz: {:d}".format(row['SEGNodes'],row['FörderZeit'],row['FörderZeitenAlAnz'])
if row['FörderZeitenAlAnz'] > 0:
if row['FörderZeitenAlAnz'] <= 3:
txtNr=" Nrn.: {!s:s}".format(row['FörderZeitenAlNrn'])
else:
txtNr=" Nrn.: {!s:s} u.w.".format(row['FörderZeitenAlNrn'][0])
txt=txt+txtNr
else:
txtNr=''
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['FörderZeitSt']>0:
backgroundcolor='white'
if row['FörderZeitSt']/row['FörderZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: FörderZeitSt: {:8.2f})".format(row['FörderZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
fileName="{:s} {:2d} - {:s} {:s} {:s}.png".format(fileNameBase
,int(row.Nr)+1
,str(row.DIVPipelineName)
,row['SEGName']
,txtNr.replace('Nrn.: ','Nrn ').replace(',','').replace('[','').replace(']','').replace('u.w.','u w'))
plt.savefig(fileName)
plt.show()
###plt.clf()
plt.close()
# Plot Alarme ###########################################################
dct=SEGResDct[row['SEGResIDBase']]
timeFirstAlarmStarts,dummy=dct['Alarm'][0]
dummy,timeLastAlarmEnds=dct['Alarm'][-1]
for idxAl,AlNr in enumerate(row['FörderZeitenAlNrn']):
timeAlarmStarts,timeAlarmEnds=dct['Alarm'][idxAl]
timeStartDetail = timeAlarmStarts.floor(freq=timeFloorCeilStrDetailPre)
timeEndDetail = timeAlarmEnds.ceil(freq=timeFloorCeilStrDetailPost)
# wenn AlarmRand - PlotRand < 3 Minuten: um 3 Minuten erweitern
if timeAlarmStarts-timeStartDetail<pd.Timedelta('3 Minutes'):
timeStartDetail=timeStartDetail-
|
pd.Timedelta('3 Minutes')
|
pandas.Timedelta
|
from abc import ABCMeta, abstractmethod
from abc import ABC
import warnings
from decimal import Decimal
from tqdm import tqdm
import numpy as np
import pandas as pd
import pandas_ta as ta
from ..util import huf, pdiff
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
FEE = Decimal(0.6/100)
class Stats:
wallet: Decimal = Decimal('1000.00')
losses: int = 0
wins: int = 0
per_day: dict = {
'fee':{}, 'net_profit':{}, 'percent':{}
}
sell_log: list = []
class BacktestBase(ABC):
def __init__(self, csv_file: str, progress_bar: bool = True, debug: bool = False):
self._csv_file = csv_file
self._progress_bar = progress_bar
self._debug = debug
self._df = self._get_dataframe()
self._last_timestamp = None
self.buys = []
self.fee = FEE
self.stats = Stats()
@abstractmethod
def init(self, *args, **kwargs) -> None:
"""Setup any extra initialization here (e.g. apply ema to dataframe)"""
pass
@abstractmethod
def backtest(self, timestamp, row) -> None:
"""Each row is fed into here for applying a backtest strategy on a stream of data"""
pass
def run(self) -> None:
"""Run the backtest"""
for (timestamp, row) in self._next_row():
self.backtest(timestamp, row)
if self.stats.wallet < 1:
break
def _next_row(self) -> tuple:
"""Generate from self._df.iterrows()"""
if self._progress_bar:
progress_bar = tqdm(total=len(self._df))
for (timestamp, row) in self._df.iterrows():
progress_bar.update(1)
self._last_timestamp = timestamp
yield timestamp, row
else:
for (timestamp, row) in self._df.iterrows():
self._last_timestamp = timestamp
yield timestamp, row
def _get_dataframe(self) -> pd.DataFrame:
"""Read csv file using pandas and convert and set index to timestamp col
Expects form:
"timestamp","low","high","open","close","volume"
"""
df =
|
pd.read_csv(self._csv_file)
|
pandas.read_csv
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from tasrif.processing_pipeline.custom import MergeFragmentedActivityOperator
from tasrif.processing_pipeline.custom import CreateFeatureOperator
# +
df = pd.DataFrame([
[0,2,354,27,5,386,0.91,'2016-03-27 03:33:00', '2016-03-27 09:02:00'],
[0,4,312,23,7,321,0.93,'2016-03-28 00:40:00', '2016-03-28 01:56:00'],
[0,5,312,35,7,193,0.93,'2016-03-29 00:40:00', '2016-03-29 01:56:00'],
[0,7,312,52,7,200,0.93,'2016-05-21 00:40:00', '2016-05-21 01:56:00'],
[0,8,312,12,7,43,0.93,'2016-05-23 01:57:00', '2016-05-23 01:58:00'],
[0,9,312,42,7,100,0.93,'2016-05-23 01:59:00', '2016-05-23 01:59:30'],
[0,9,312,21,7,302,0.93,'2016-05-23 03:00:00', '2016-05-23 03:59:30'],
[0,10,312,16,7,335,0.93,'2016-05-23 10:57:00', '2016-05-23 20:58:00'],
[0,11,312,16,7,335,0.93,'2016-10-24 00:58:00', '2016-05-24 01:58:00'],
[1,3,312,16,7,335,0.93,"2016-03-14 08:12:00","2016-03-14 10:15:00"],
[1,4,272,26,5,303,0.89,"2016-03-16 03:12:00","2016-03-16 08:14:00"],
[1,5,61,2,0,63,0.96,"2016-03-16 19:43:00","2016-03-16 20:45:00"],
[1,6,402,34,1,437,0.91,"2016-03-17 01:16:00","2016-03-17 08:32:00"],
],
columns=["Id",
"logId",
"Total Minutes Asleep",
"Total Minutes Restless",
"Total Minutes Awake",
"Total Minutes in Bed",
"Sleep Efficiency",
"Sleep Start",
"Sleep End"])
df['Sleep Start'] = pd.to_datetime(df['Sleep Start'])
df['Sleep End'] = pd.to_datetime(df['Sleep End'])
# -
df
# +
aggregation_definition = {
'logId': lambda df: df.iloc[0],
'Total Minutes Asleep': np.sum,
'Total Minutes Restless': np.sum,
'Total Minutes Awake': np.sum,
'Total Minutes in Bed': np.sum,
}
operator = MergeFragmentedActivityOperator(
participant_identifier='Id',
start_date_feature_name='Sleep Start',
end_date_feature_name='Sleep End',
threshold="3 hour",
aggregation_definition=aggregation_definition)
df = operator.process(df)[0]
df
# +
# Test cases:
# seq1: a sequence of [0, 0] no merge
# seq2: a sequence of [1, 0] no merge
# seq3: a sequence of [0, 1] merge
# seq4: a sequence of [1, 1] no merge
operator = MergeFragmentedActivityOperator(
participant_identifier='Id',
start_date_feature_name='Sleep Start',
end_date_feature_name='Sleep End',
threshold="3 hour",
return_before_merging=False)
seq1 = pd.DataFrame([
[0, '2016-03-27 03:33:00', '2016-03-27 03:34:00'],
[0, '2016-03-27 03:34:00', '2016-03-28 03:54:00'],
], columns=["Id", "Sleep Start", "Sleep End"])
seq1['Sleep Start'] = pd.to_datetime(seq1['Sleep Start'])
seq1['Sleep End'] =
|
pd.to_datetime(seq1['Sleep End'])
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib import learn
import tensorflow as tf
def input_fn(df):
feature_cols = {}
feature_cols['Weight'] = tf.constant(df['Weight'].values)
feature_cols['Species'] = tf.SparseTensor(
indices=[[i, 0] for i in range(df['Species'].size)],
values=df['Species'].values,
dense_shape=[df['Species'].size, 1]
)
labels = tf.constant(df['Height'].values)
return feature_cols, labels
N = 10000
weight = np.random.randn(N) * 5 + 70
spec_id = np.random.randint(0, 3, N)
bias = [0.9, 1., 1.1]
height = np.array([weight[i]/100 + bias[b] for i,b in enumerate(spec_id)])
spec_name = ['Goblin', 'Human', 'ManBears']
spec = [spec_name[s] for s in spec_id]
df =
|
pd.DataFrame({'Species': spec, 'Weight': weight, 'Height': height})
|
pandas.DataFrame
|
import logging
import os
import re
import shutil
import warnings
from datetime import datetime
from typing import Union
import h5py
import numpy as np
import pandas as pd
from omegaconf import DictConfig
from deepethogram.utils import get_subfiles
from deepethogram.zscore import zscore_video
from . import utils
from .file_io import read_labels
log = logging.getLogger(__name__)
required_keys = ['project', 'augs']
projects_file_directory = os.path.dirname(os.path.abspath(__file__))
def initialize_project(directory: Union[str, os.PathLike], project_name: str, behaviors: list = None,
make_subdirectory: bool=True,
labeler: str = None):
"""Initializes a DeepEthogram project.
Copies the default configuration file and updates it with the directory, name, and behaviors specified.
Makes directories where project info, data, and models will live.
Args:
directory: str, os.PathLike
Directory where DeepEthogram data and models will be made / copied. Should be on an SSD. Should
also have plenty of space.
project_name: str
name of the deepethogram project
behaviors: optional list.
First should be background.
make_subdirectory: bool
if True, make a subdirectory like "/path/to/DATA/project_name_deepethogram"
if False, keep as the input directory: "/path/to/DATA"
Example:
intialize_project('C:\DATA', 'grooming', ['background', 'face_groom', 'body_groom', 'rear'])
"""
assert os.path.isdir(directory), 'Directory does not exist: {}'.format(directory)
if behaviors is not None:
assert behaviors[0] == 'background'
root = os.path.dirname(os.path.abspath(__file__))
project_config = utils.load_yaml(os.path.join(root, 'conf', 'project', 'project_config.yaml'))
project_name = project_name.replace(' ', '_')
project_config['project']['name'] = project_name
project_config['project']['class_names'] = behaviors
if make_subdirectory:
project_dir = os.path.join(directory, '{}_deepethogram'.format(project_name))
else:
project_dir = directory
project_config['project']['path'] = project_dir
project_config['project']['data_path'] = 'DATA'
project_config['project']['model_path'] = 'models'
project_config['project']['labeler'] = labeler
if not os.path.isdir(project_config['project']['path']):
os.makedirs(project_config['project']['path'])
os.chdir(project_config['project']['path'])
if not os.path.isdir(project_config['project']['data_path']):
os.makedirs(project_config['project']['data_path'])
if not os.path.isdir(project_config['project']['model_path']):
os.makedirs(project_config['project']['model_path'])
fname = os.path.join(project_dir, 'project_config.yaml')
project_config['project']['config_file'] = fname
utils.save_dict_to_yaml(project_config, fname)
return project_config
def add_video_to_project(project: dict, path_to_video: Union[str, os.PathLike], mode: str = 'copy') -> str:
"""
Adds a video file to a DEG project.
1. Copies the video file to the project's data directory
2. initializes a record.yaml file
3. Computes per-channel image statistics (for input normalization)
Parameters
----------
project: dict
pre-loaded configuration dictionary
path_to_video: str, PathLike
absolute path to a video file. Filetype must be acceptable to deepethogram.file_io.VideoReader
mode: str
if 'copy': copies files to new directory
if 'symlink': tries to make a symlink from the old location to the new location. NOT RECOMMENDED. if you delete
the video in its current location, the symlink will break, and we will have errors during training or
inference
if 'move': moves the file
Returns
-------
new_path: str
path to the video file after moving to the DEG project data directory.
"""
# assert (os.path.isdir(project_directory))
assert os.path.isfile(path_to_video), 'video not found! {}'.format(path_to_video)
assert mode in ['copy', 'symlink', 'move']
# project = utils.load_yaml(os.path.join(project_directory, 'project_config.yaml'))
# project = convert_config_paths_to_absolute(project)
log.debug('configuration file when adding video: {}'.format(project))
datadir = os.path.join(project['project']['path'], project['project']['data_path'])
assert os.path.isdir(datadir), 'data path not found: {}'.format(datadir)
basename = os.path.basename(path_to_video)
vidname = os.path.splitext(basename)[0]
video_directory = os.path.join(datadir, vidname)
if os.path.isdir(video_directory):
raise ValueError('Directory {} already exists in your data dir! ' \
'Please rename the video to a unique name'.format(vidname))
os.makedirs(video_directory)
new_path = os.path.join(video_directory, basename)
if mode == 'copy':
shutil.copy(path_to_video, new_path)
elif mode == 'symlink':
os.symlink(path_to_video, new_path)
elif mode == 'move':
shutil.move(path_to_video, new_path)
record = parse_subdir(video_directory)
log.debug('New record after adding: {}'.format(record))
utils.save_dict_to_yaml(record, os.path.join(video_directory, 'record.yaml'))
zscore_video(os.path.join(video_directory, basename), project)
return new_path
def add_label_to_project(path_to_labels: Union[str, os.PathLike],
path_to_video) -> str:
"""Adds an externally created label file to the project. Updates record"""
assert os.path.isfile(path_to_labels)
assert os.path.isfile(path_to_video)
assert is_deg_file(path_to_video)
viddir = os.path.dirname(path_to_video)
label_dst = os.path.join(viddir, os.path.basename(path_to_labels))
if os.path.isfile(label_dst):
warnings.warn('Label already exists in destination {}, overwriting...'.format(label_dst))
df = pd.read_csv(path_to_labels, index_col=0)
if 'none' in list(df.columns):
df = df.rename(columns={'none': 'background'})
if 'background' not in list(df.columns):
array = df.values
is_background = np.logical_not(np.any(array == 1, axis=1)).astype(int)
df2 = pd.DataFrame(data=is_background, columns=['background'])
df = pd.concat([df2, df], axis=1)
df.to_csv(label_dst)
record = parse_subdir(viddir)
utils.save_dict_to_yaml(record, os.path.join(viddir, 'record.yaml'))
return label_dst
def add_file_to_subdir(file: Union[str, os.PathLike], subdir: Union[str, os.PathLike]):
"""If you save or move a file into a DEG subdirectory, update the record"""
if not is_deg_file(subdir):
raise ValueError('directory is not a DEG subdir: {}'.format(subdir))
assert (os.path.isfile(file))
if os.path.dirname(file) != subdir:
shutil.copy(file, os.path.join(subdir, os.path.basename(file)))
record = parse_subdir(subdir)
utils.save_dict_to_yaml(record, os.path.join(subdir, 'record.yaml'))
def change_project_directory(config_file: Union[str, os.PathLike], new_directory: Union[str, os.PathLike]):
"""If you move the project directory to some other location, updates the config file to have the new directories"""
assert os.path.isfile(config_file)
assert os.path.isdir(new_directory)
# make sure that new directory is properly formatted for deepethogram
datadir = os.path.join(new_directory, 'DATA')
model_path = os.path.join(new_directory, 'models')
assert os.path.isdir(datadir)
assert os.path.isdir(model_path)
project_config = utils.load_yaml(config_file)
project_config['project']['path'] = new_directory
project_config['project']['model_path'] = os.path.basename(model_path)
project_config['project']['data_path'] = os.path.basename(datadir)
project_config['project']['config_file'] = os.path.join(new_directory, 'project_config.yaml')
utils.save_dict_to_yaml(project_config, project_config['project']['config_file'])
def remove_video_from_project(config_file, video_file=None, record_directory=None):
# TODO: remove video from split dictionary, remove mean and std from project statistics
raise NotImplementedError
def is_deg_file(filename: Union[str, os.PathLike]) -> bool:
"""Quickly assess if a file is part of a well-formatted subdirectory with a records.yaml"""
if os.path.isdir(filename):
basedir = filename
elif os.path.isfile(filename):
basedir = os.path.dirname(filename)
else:
raise ValueError('submit directory or file to is_deg_file, not {}'.format(filename))
recordfile = os.path.join(basedir, 'record.yaml')
return os.path.isfile(recordfile)
def add_behavior_to_project(config_file: Union[str, os.PathLike], behavior_name: str):
""" Adds a behavior (class) to the project.
Adds this behavior to the class_names field of your project configuration.
Adds -1 column in all labelfiles in current project.
Saves the altered project_config to disk.
Parameters
----------
config_file: str, PathLike
path to the project config file
behavior_name: str
behavior to add to the project.
"""
assert (os.path.isfile(config_file))
project_config = utils.load_yaml(config_file)
assert 'class_names' in list(project_config['project'].keys())
classes = project_config['project']['class_names']
assert behavior_name not in classes
classes.append(behavior_name)
records = get_records_from_datadir(os.path.join(project_config['project']['path'],
project_config['project']['data_path']))
for key, record in records.items():
labelfile = record['label']
if labelfile is None:
continue
if os.path.isfile(labelfile):
df = pd.read_csv(labelfile, index_col=0)
label = df.values
N, K = label.shape
# label = np.concatenate((label, np.ones((N, 1))*-1), axis=1)
df2 = pd.DataFrame(data=np.ones((N, 1)) * -1, columns=[behavior_name])
df = pd.concat([df, df2], axis=1)
df.to_csv(labelfile)
project_config['project']['class_names'] = classes
utils.save_dict_to_yaml(project_config, config_file)
def remove_behavior_from_project(config_file: Union[str, os.PathLike], behavior_name: str):
"""Removes behavior (class) from existing project.
Removes behavior name from project configuration file.
Decrements num_behaviors by one.
Removes this column from existing label files.
Saves altered project configuration to disk.
Parameters
----------
config_file: str, PathLike
path to a deepethogram configuration file
behavior_name: str
One of the existing behavior_names.
"""
if behavior_name == 'background':
raise ValueError('Cannot remove background class.')
assert (os.path.isfile(config_file))
project_config = utils.load_yaml(config_file)
assert 'class_names' in list(project_config['project'].keys())
classes = project_config['project']['class_names']
assert behavior_name in classes
records = get_records_from_datadir(os.path.join(project_config['project']['path'],
project_config['project']['data_path']))
for key, record in records.items():
labelfile = record['label']
if labelfile is None:
continue
if os.path.isfile(labelfile):
df =
|
pd.read_csv(labelfile, index_col=0)
|
pandas.read_csv
|
import os
import numpy as np
import pandas as pd
from common.data_source_from_bundle import __td__, __ds__
def dataframe_to_ndarray(df):
"""
pd.DataFrame to ndarray, 除去trade_date, wind_code, 其他n列变成n*4000的ndarray
:param df: 数据
:return: ndarray
"""
columns = df.columns
assert ("wind_code" in columns)
result = pd.merge(pd.DataFrame({'wind_code': __ds__.codes}), df, on=['wind_code'], how="left")
res = np.r_[[np.array(result[col]) for col in columns if col not in ["wind_code", "trade_date"]]]
if res.shape[0] == 1:
return res.ravel()
else:
return res
def ndarray_to_dataframe(array, **kwargs):
"""
ndarray to pd.DataFrame
:param array: 数据
:param kwargs: column_name = str
:return: pd.DataFrame
"""
return pd.DataFrame({'wind_code': __ds__.codes, kwargs.get("column_name", "value"): array})
def read_holding(trade_date, strategy_path, offset=0, return_type="numpy"):
"""
读持仓数据
:param trade_date:
:param strategy_path:
:param offset: 持仓日期的偏移,offset=1为昨日
:param return_type: numpy or pandas
:return:
"""
date = __td__.get_previous_trading_date(trade_date, offset)
holding_path = os.path.join(strategy_path, "holding", "%s.csv" % date)
# 为了确保中文路径也可以读
with open(holding_path, "r") as infile:
holding_df =
|
pd.read_csv(infile)
|
pandas.read_csv
|
# encoding: utf-8
from opendatatools.common import RestAgent
from progressbar import ProgressBar
import demjson
import json
import pandas as pd
fund_type = {
"全部开放基金" : {"t": 1, "lx": 1},
"股票型基金" : {"t": 1, "lx": 2},
"混合型基金" : {"t": 1, "lx": 3},
"债券型基金" : {"t": 1, "lx": 4},
"指数型基金" : {"t": 1, "lx": 5},
"ETF联接基金" : {"t": 1, "lx": 6},
"LOF基金" : {"t": 1, "lx": 8},
"分级基金" : {"t": 1, "lx": 9},
"FOF基金" : {"t": 1, "lx": 15},
"理财基金" : {"t": 5},
"分级A" : {"t": 6},
"货币基金" : {"t": 7},
}
class EastMoneyAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def _get_and_parse_js(self, url, prefix, param=None):
response = self.do_request(url, param=param)
if not response.startswith(prefix):
return None
else:
return response[len(prefix):]
def get_fund_company(self):
url = 'http://fund.eastmoney.com/Data/Fund_JJJZ_Data.aspx?t=3'
prefix = 'var gs='
response = self._get_and_parse_js(url, prefix)
if response is None:
return None, '获取数据失败'
jsonobj = demjson.decode(response)
df = pd.DataFrame(jsonobj['op'])
df.columns = ['companyid', 'companyname']
return df, ''
def _get_fund_list_onepage(self, company='', page_no = 1, page_size = 100):
url = 'http://fund.eastmoney.com/Data/Fund_JJJZ_Data.aspx?page=%d,%d&gsid=%s' % (page_no, page_size, company)
prefix = 'var db='
response = self.do_request(url)
response = self._get_and_parse_js(url, prefix)
if response is None:
return None, '获取数据失败'
jsonobj = demjson.decode(response)
rsp = jsonobj['datas']
datestr = jsonobj['showday']
df = pd.DataFrame(rsp)
if len(df) > 0:
df.drop(df.columns[5:], axis=1, inplace=True)
df.columns = ['fundcode', 'fundname', 'pingyin', 'nav', 'accu_nav']
df['date'] = datestr[0]
return df, ''
else:
return None, ''
def get_fundlist_by_company(self, companyid):
page_no = 1
page_size = 1000
df_result = []
while True:
df, msg = self._get_fund_list_onepage(company=companyid, page_no=page_no, page_size=page_size)
if df is not None:
df_result.append(df)
if df is None or len(df) < page_size:
break
page_no = page_no + 1
if len(df_result) > 0:
return pd.concat(df_result), ''
else:
return None, ''
def get_fund_list(self):
df_company, msg = self.get_fund_company()
if df_company is None:
return None, msg
df_result = []
process_bar = ProgressBar().start(max_value=len(df_company))
for index, row in df_company.iterrows():
companyid = row['companyid']
companyname = row['companyname']
df, msg = self.get_fundlist_by_company(companyid)
if df is not None:
df['companyname'] = companyname
df['companyid'] = companyid
df_result.append(df)
process_bar.update(index+1)
return pd.concat(df_result), ''
def get_fund_type(self):
return fund_type.keys()
def _get_fundlist_by_type_page(self, type, page_no = 1, page_size = 100):
url = 'http://fund.eastmoney.com/Data/Fund_JJJZ_Data.aspx?page=%d,%d' % (page_no, page_size)
prefix = 'var db='
type_param = fund_type[type]
response = self._get_and_parse_js(url, prefix, param=type_param)
jsonobj = demjson.decode(response)
rsp = jsonobj['datas']
datestr = jsonobj['showday']
df =
|
pd.DataFrame(rsp)
|
pandas.DataFrame
|
# %%%%
import pandas as pd
import numpy as np
import re
# %%%% functions
## Fill missing values
def fillmissing(x,col,index,benchmark):
for i in range(index,len(x)):
# find missing value
if x.loc[i,col] == benchmark:
# if first is missing, fill using the value next to it
if i == index:
x.loc[i,col] = x.loc[i+1,col]
# if the last one is missing, fill using the value preceeds it
elif i == len(x)-1:
x.loc[i,col] = x.loc[i-1,col]
# otherwise, fill using the average of the two not null values above and after
else:
j = i-1
k = i+1
while x.loc[j,col] == benchmark:
j -= 1
while x.loc[k,col] == benchmark:
k += 1
x.loc[i,col] = np.mean([x.loc[j,col],x.loc[k,col]])
return x
## Data Preprocess
def preprocess(x,name,Date,column,index,benchmark,q):
# select the valid starting day
x = x[x['Date'] > Date].copy()
x = x.reset_index().copy()
x = x.drop('index',axis = 1).copy()
# fill na with benchmark we chose
x[column] = x[column].fillna(benchmark).copy()
# fill missing values
x = fillmissing(x,column,index,benchmark).copy()
# calculate daily return
x['lag_'+column] = x[column].shift(1)
x = x.iloc[1:,:].copy().reset_index()
x = x.drop('index',axis = 1).copy()
x['log_ret'] = np.log(x[column])-np.log(x['lag_'+column])
retm = np.mean(x['log_ret'])
x['retv'] = np.square(x['log_ret']-retm)*100
# estimate volatility
x[name+'_20day_vol'] = np.sqrt(x['retv'].rolling(window=20,win_type="boxcar").mean())/10
# estimate quantiles of the distribution of log-returns
x[name+'_quant_ret'] = np.nan
for r in range(len(x)-20):
R_quant = np.quantile(x['log_ret'][r:r+20],q)
x.loc[r+19,name+'_quant_ret'] = R_quant
return x
# %%%% Main Dataset: csi300
csi = pd.read_csv('/Users/msstark/Desktop/project/Shanghai Shenzhen CSI 300 Historical Data.csv')
# setting date format
csi['Date'] = csi['Date'].apply(lambda x: re.sub(r',',r'',x))
csi['Day'] = csi['Date'].apply(lambda x: x.split(' ')[1]).astype(int)
csi['Month'] = csi['Date'].apply(lambda x: x.split(' ')[0])
csi['Month'].unique()
csi['Month'] = csi['Month'].map({'Jan':1,'Feb':2,'Mar':3,'Apr':4,'May':5,'Jun':6,
'Jul':7,'Aug':8,'Sep':9,'Oct':10,'Nov':11,'Dec':12})
csi['Year'] = csi['Date'].apply(lambda x: x.split(' ')[2]).astype(int)
csi['Date'] = csi['Year'].astype(str) +'-'+csi['Month'].astype(str)+'-'+csi['Day'].astype(str)
csi['Date'] =
|
pd.to_datetime(csi['Date'], format='%Y-%m-%d')
|
pandas.to_datetime
|
import requests
import json
import pandas as pd
from apscheduler.schedulers.blocking import BlockingScheduler
import apscheduler.schedulers.blocking
from datetime import datetime,timedelta
import time
import sqlalchemy
import sys
import numpy as np
# taxa de periodicidade para realizar a operação
periodicidade = 1
# indica se a aplicação está rodando dentro ou fora do docker True = docker False = Fora do docker
interno = False
# Se verdadeiro, deleta os dados da tabela temporaria apos trata-los
deleteAfterInsert = False
# Engine de conexão com banco de dados
database_connection = 'mysql+mysqldb://{user}:{password}@{server}:{port}/{database}'
def perMinuteTicker():
#Obtem a data UTC atual (Data de fechamento do candle)
dtfechamento = datetime.utcnow() - timedelta(seconds=1)
#Obtem a data de abertura do candle
dtabertura = dtfechamento - timedelta(minutes=periodicidade)
#Busca os dados na tabela TEMP do banco de dados
querySelect = "SELECT * \
FROM TEMP WHERE DATETIME BETWEEN '%Y-%m-%d %H:%M:%S' AND '#Y-#m-#d #H:#M:#S'"
querySelect = dtabertura.strftime(querySelect)
querySelect = querySelect.replace('#','%')
querySelect = dtfechamento.strftime(querySelect)
#Cria um dataframe dos dados obtidos
dataframe = pd.read_sql(querySelect,con=database_connection)
dataframe['DATETIME']= pd.to_datetime(dataframe['DATETIME'])
#Seleciona os valores de abertura dos candles de todas as crypto
abertura = dataframe[(dataframe['DATETIME'] == dtabertura.strftime('%Y-%m-%d %H:%M:%S'))]
abertura = abertura[['CRIPTOMOEDA', 'PRECO']]
abertura.rename(columns={'PRECO': 'ABERTURA'}, inplace=True)
#Seleciona os valores de fechamento dos candles de todas as crypto
fechamento = dataframe[(dataframe['DATETIME'] == (dtfechamento - timedelta(seconds=1)).strftime('%Y-%m-%d %H:%M:%S'))]
fechamento = fechamento[['CRIPTOMOEDA', 'PRECO']]
fechamento.rename(columns={'PRECO': 'FECHAMENTO'}, inplace=True)
#Seleciona os valores de maximo dos candles de todas as crypto
high = dataframe[['CRIPTOMOEDA', 'PRECO']]
high = high.groupby("CRIPTOMOEDA").max()
high.rename(columns={'PRECO': 'MAXIMO'}, inplace=True)
#Seleciona os valores de minimo dos candles de todas as crypto
low = dataframe[['CRIPTOMOEDA', 'PRECO']]
low = low.groupby("CRIPTOMOEDA").min()
low.rename(columns={'PRECO': 'MINIMO'}, inplace=True)
# Realiza o merge dos dataframes de abertura, fechamento, maximo e minimo
ticker = pd.merge(pd.merge(
|
pd.merge(abertura,fechamento,on='CRIPTOMOEDA')
|
pandas.merge
|
import requests
import base64
import gzip
import bz2
from pathlib import Path
import pandas as pd
from multiprocessing import Pool
magic_dict = {
b"\x1f\x8b\x08": (gzip.open, 'rb'),
b"\x42\x5a\x68": (bz2.BZ2File, 'r'),
}
max_len = max(len(x) for x in magic_dict)
def open_by_magic(filename):
with open(filename, "rb") as f:
file_start = f.read(max_len)
for magic, (fn, flag) in magic_dict.items():
if file_start.startswith(magic):
return fn(filename, flag)
return open(filename, 'r')
equiv = {key.split('\t')[0]: (key.split('\t')[1], key.split('\t')[2].strip()) for key in open('fastmlst_pubmlst_schnum.tsv').readlines()}
def get_st_pubmlst(assembly, scheme):
fasta = open_by_magic(assembly).read()
data = {
'base64': 'true',
'sequence': base64.b64encode(fasta).decode()
}
pasteur = ['kingella', 'staphlugdunensis', 'listeria']
if equiv[scheme][0] in pasteur:
endpoint = f'https://bigsdb.pasteur.fr/api/db/pubmlst_{equiv[scheme][0]}_seqdef/schemes/{equiv[scheme][1]}/sequence'
else:
endpoint = f'https://rest.pubmlst.org/db/pubmlst_{equiv[scheme][0]}_seqdef/schemes/{equiv[scheme][1]}/sequence'
r = requests.post(endpoint, json=data)
if 'fields' in r.json().keys(): # Found a ST, make a tuple (Genome, Scheme, ST, {alleles})
rjson = r.json()
response = {
'Genome':assembly.name,
'Scheme': scheme,
'ST': rjson['fields']['ST'],
}
for allel, var in rjson['exact_matches'].items():
response[allel] = var[0]['allele_id']
return(response)
elif 'exact_matches' in r.json().keys():
rjson = r.json()
response = {
'Genome':assembly.name,
'Scheme': scheme,
'ST': '-',
}
for allel, var in rjson['exact_matches'].items():
response[allel] = var[0]['allele_id']
return(response)
else:
print('wtf:',assembly , scheme)
print(endpoint)
print(r)
def process_dir(dirname):
directory = Path(str(dirname))
output = Path('pubmlst_output')
fastas = list(directory.glob('*.gz'))
schemename = dirname.name
outfile = f'{schemename}.csv'
datadic = []
i = 1
if not (output/outfile).is_file():
for fasta in fastas:
datadic.append(get_st_pubmlst(fasta, schemename))
print(f'{schemename}: {i} de {len(fastas)}')
i += 1
dfout =
|
pd.DataFrame(datadic)
|
pandas.DataFrame
|
'''This file holds all relevant functions necessary for starting the data analysis.
An object class for all account data is established, which will hold the raw data after import,
the processed data and all subdata configuration necessary for plotting.
The account data is provided through the account identification process in account_ident.py
Necessary functions for holiday extraction, roundies calculation as well as merging and cashbook linkage are provided in the Accounts class
Excel file is exported at the end exported.'''
import datetime
import locale
import os
import platform
import numpy as np
import pandas as pd
from basefunctions import account_ident
if platform.system() == 'Windows':
locale.setlocale(locale.LC_ALL, 'German')
FOLDER_SEP = '\\'
elif platform.system() == 'Darwin':
locale.setlocale(locale.LC_ALL, 'de_DE.utf-8')
FOLDER_SEP = '/'
else:
locale.setlocale(locale.LC_ALL, 'de_DE.utf8')
FOLDER_SEP = '/'
#_______________________________________ read in longterm data for training machine learning algorithm _______________
def longtermdata_import(path, decrypt_success):
if decrypt_success:
longterm_data = pd.read_csv(path, sep=';', parse_dates=[0, 1])
else:
empty_dataframe = {'time1':np.datetime64, 'time2':np.datetime64, 'act':str, 'text':str, 'val':float, 'month':str, 'cat':str, 'main cat':str, 'acc_name':str}
longterm_data = pd.DataFrame(columns=empty_dataframe.keys()).astype(empty_dataframe)
#extract saved account names in longterm_data
saved_accnames = list(longterm_data['acc_name'].unique())
saved_dataframe = {} #stored dataframes from import
for account_name in saved_accnames: #iterate through list with indices
saved_dataframe[account_name] = longterm_data.loc[longterm_data['acc_name'] == account_name] #get saved dataframes
return saved_dataframe
def longterm_export(path, saved_dataframe):#needs to be outside class in case program is closed before data integration
longterm_data = pd.DataFrame(columns=['time1', 'time2', 'act', 'text', 'val', 'month', 'cat', 'main cat', 'acc_name'])
for account_name in saved_dataframe.keys():
account_name_concat = saved_dataframe[account_name]
account_name_concat['acc_name'] = account_name #set account name in dataframe to be saved
longterm_data = pd.concat([longterm_data, account_name_concat]) #concatinated data
longterm_data.to_csv(path, index=False, sep=';') #export data
class AccountsData:
def __init__(self, dir_result, classifier_class, langdict, saved_dataframe):
self.langdict = langdict
## set language variable
if self.langdict['result_pathvars'][0] == 'Ergebnisse_':
self.lang_choice = 'deu'
else:
self.lang_choice = 'eng'
#change locale to English
if platform.system() == 'Windows':
locale.setlocale(locale.LC_ALL, 'English')
elif platform.system() == 'Darwin':
locale.setlocale(locale.LC_ALL, 'en_US.utf-8')
else:
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
self.current_date = datetime.datetime.now().strftime("%b'%y")
self.acc_names_rec = {} #longterm excel, excel and csv files
self.folder_sep = FOLDER_SEP
self.dir_result = dir_result+FOLDER_SEP+self.langdict['result_pathvars'][0]+self.current_date+FOLDER_SEP #adjusted complete path
self.folder_res = {}
self.raw_data = {}
self.raw_data_header = {}
self.basis_data = {}
self.month_data = {}
self.cat_data = {}
self.plotting_list = {}
self.error_codes = {}
self.classifier = classifier_class
self.saved_dataframe = saved_dataframe
def process_data(self, raw_fileinfo, import_type):
#unpack tuple with fileinformation
filename, filepath = raw_fileinfo
##read in csv-files from different account_types
##start functions for getting csv account type info and data input & adjustment
if import_type == 'csv_analyse':
#get account information
while True:
try:
acctypename_importedfile, raw_data, account_infos = account_ident.account_info_identifier(filepath)
except:
self.error_codes[filename] = 'Err01'
break
##unpack account read-in info tuple
header_columns, column_join, column_drop, acc_subtype, plot_info = account_infos #acc_subtype ('giro' or 'credit') currently not used, but kept in tuple list for possible later use
self.raw_data[filename] = raw_data
self.raw_data_header[filename] = header_columns
#data preprocess
try:
#select Euro entrys
if "Währung" in header_columns:
self.basis_data[filename] = self.raw_data[filename][self.raw_data[filename]["Währung"] == "EUR"].copy()
elif "currency" in header_columns:
self.basis_data[filename] = self.raw_data[filename][self.raw_data[filename]["currency"] == "EUR"].copy()
else:
self.basis_data[filename] = self.raw_data[filename].copy()
##do adjustment to transactions info (join columns to get more info) for categorization. Output is forced to be string datype
if column_join[0] == 'yes':
self.basis_data[filename]['text'] = self.basis_data[filename][self.basis_data[filename].columns[column_join[1]]].apply(lambda x: str(' || '.join(x.dropna())), axis=1)
else:
pass
##drop columns if necessary and reaarange columns
if column_drop[0] == 'yes':
self.basis_data[filename].drop(self.basis_data[filename].columns[column_drop[1]], axis=1, inplace=True)
self.basis_data[filename] = self.basis_data[filename].reindex(columns=self.basis_data[filename].columns[column_drop[2]])
else:
pass
##insert "act" column if necessary (currently only dkb_credit)
if len(self.basis_data[filename].columns) == 4:
self.basis_data[filename].insert(2, 'act', self.langdict['act_value'][0])
else:
pass
self.basis_data[filename].columns = ["time1", "time2", "act", "text", "val"]
#delete row with time1&time2 empty
self.basis_data[filename] = self.basis_data[filename].drop(self.basis_data[filename][(self.basis_data[filename]['time1'].isna())&(self.basis_data[filename]['time2'].isna())].index)
#adjust for missing time values in "time1"-columns
self.basis_data[filename]['time1'].mask(self.basis_data[filename]['time1'].isna(), self.basis_data[filename]['time2'], inplace=True) ##new
#make month-column and categorize
self.basis_data[filename]['month'] = self.basis_data[filename]['time1'].apply(lambda dates: dates.strftime('%b %Y'))
except:
self.error_codes[filename] = 'Err02'
break
#check if all transaction values is Null. If yes, abort and give errorcode '03'
if self.basis_data[filename]['val'].isna().all():
self.error_codes[filename] = 'Err03'
break
else:
pass
#try categorization else give error code '04'
try:
self.basis_data[filename] = self.classifier.categorize_rawdata(self.basis_data[filename], 'csvdata')
except:
self.error_codes[filename] = 'Err04'
break
#add account name to dictionary with imported data files and their respective account names (for cashbook, savecent and long term data saving)
self.acc_names_rec[filename] = acctypename_importedfile
#add variables for subsequent handling and plotting
self.plotting_list[filename] = plot_info
self.folder_res[filename] = self.dir_result+filename
break
#Plot manually manipulated excel files
elif import_type == 'xls_analyse':
#read excel
try:
raw_data = pd.read_excel(filepath, sheet_name=self.langdict['sheetname_basis'][0], engine='openpyxl') #main category is not read-in but separately assigned)
try:
testname = raw_data[raw_data.columns[8]][0] #get account name if existing
#check if testname is nan-value (as nan are not identical it can be checked with !=)
if testname != testname: #if imported account name field is Nan-value use "not assigned"
acctypename_importedfile = self.langdict['accname_labels'][1] #set acctypename to not assigned
else:
acctypename_importedfile = testname #take name which was read in
except: # if bank name is not existing set it to not assigned#
acctypename_importedfile = self.langdict['accname_labels'][1]
raw_data = raw_data[raw_data.columns[range(0, 7)]].copy()
raw_data.dropna(subset=raw_data.columns[[0, 4]], inplace=True) #delete rows where value in time1 or val column is empty
#check if raw data is in the right data format and contains at least one row
if (raw_data.columns.tolist() == self.langdict['sheetname_basis'][1][:-2]) and (len(raw_data) != 0):
#headers must be identical to those outputted via excel
raw_data.columns = ["time1", "time2", "act", "text", "val", 'month', 'cat']
#save histo data to saved file
histo_data = raw_data[['act', 'text', 'cat']].copy() #get a copy of relevant data for categorization
self.classifier.machineclassifier.adjust_histo_data(histo_data) # add data to existing history dataset
del histo_data
self.basis_data[filename] = raw_data.copy()
self.basis_data[filename] = self.classifier.assign_maincats(self.basis_data[filename]) #assign main categories
self.acc_names_rec[filename] = acctypename_importedfile #add account name to dictionary with imported data files and their respective account names (for cashbook, savecent and long term data saving)
self.plotting_list[filename] = 'normal'
self.folder_res[filename] = self.dir_result+filename
else:
self.error_codes[filename] = 'Err01'
del raw_data
except:
self.error_codes[filename] = 'Err01'
# Excel file for concatenation
elif import_type == 'xls_longterm':
#Longterm analysis: Read-in excel to concat csvs
try:
raw_data = pd.read_excel(filepath, sheet_name=self.langdict['sheetname_basis'][0], engine='openpyxl') #main category is not read-in but separately assigned)
#variabel "assigned_accname" does not need to be checked, as it is always 'use_acctype' for longterm excel concat
try:#try to get the account name from excel
testname = raw_data[raw_data.columns[8]][0] #get account name if existing
#check if testname is nan-value (as nan are not identical it can be checked with !=)
if testname != testname: #if imported account name field is Nan-value use "not assigned"
acctypename_importedfile = self.langdict['accname_labels'][1] #set acctypename to not assigned
else:
acctypename_importedfile = testname #take name which was read in
except: # if bank name is not existing set it to not assigned#
acctypename_importedfile = self.langdict['accname_labels'][1]
raw_data = raw_data[raw_data.columns[range(0, 7)]].copy()
raw_data.dropna(subset=raw_data.columns[[0, 4]], inplace=True) #delete rows where value in time1 or val column is empty
#check if raw data is in the right data format and contains at least one row
if (raw_data.columns.tolist() == self.langdict['sheetname_basis'][1][:-2]) and (len(raw_data) != 0):
#headers must be identical to those outputted via excel
raw_data.columns = ["time1", "time2", "act", "text", "val", 'month', 'cat']
#save histo data to saved file
histo_data = raw_data[['act', 'text', 'cat']].copy() #get a copy of relevant data for categorization
self.classifier.machineclassifier.adjust_histo_data(histo_data) # add data to existing history dataset
del histo_data
self.basis_data[filename] = raw_data.copy()
self.basis_data[filename] = self.classifier.assign_maincats(self.basis_data[filename]) #assign main categories
#account names for excel-extraimport only needed because of concatination function searching for identical "acc-name" values
self.basis_data[filename]['acc_name'] = None #create acctype column and set values to Nan
self.basis_data[filename].at[0, 'acc_name'] = acctypename_importedfile #set account name to basis dataframe for possible concatination
else:
self.error_codes[filename] = 'Err01'
del raw_data
except:
self.error_codes[filename] = 'Err01'
# Excel file for concatenation
elif import_type == 'xls_cashbook':
#cashbok analysis: Read-in to append info to csvs
try:
raw_data = pd.read_excel(filepath, sheet_name=self.langdict['cashbookvars_name'][0], usecols=[0, 1, 2, 3, 4, 5], engine='openpyxl')
raw_data.columns = ["time1", "text", "val", "cat", "acc_name", "cashcat"]
raw_data = raw_data[raw_data['time1'].isna() == False]
#adjust categories if no value is set
if raw_data[['time1', 'text', 'val']].isnull().values.any() == False:#reject cashbook if there are empty values in date, value or text
raw_data['val'] = -raw_data['val']
raw_data["time2"] = raw_data["time1"]
raw_data['month'] = raw_data['time1'].apply(lambda dates: dates.strftime('%b %Y'))
raw_data['act'] = self.langdict['cashbookvars_name'][1]
#do categorization for empty values in cashbook, get main cats and reorder dataframe
raw_data = self.classifier.categorize_rawdata(raw_data, 'cashbook')
raw_data = raw_data.reindex(columns=raw_data.columns[[0, 6, 8, 1, 2, 7, 3, 9, 4, 5]])
self.basis_data[self.langdict['cashbookvars_name'][0]] = raw_data.copy()
else:
self.error_codes[filename] = 'Err01'
del raw_data
except:
self.error_codes[filename] = 'Err01'
else:#no action needed
pass
def assign_fileaccnames(self, assign_list):
for entry in assign_list:
#entry[0] equals filename / entry[1] account name
#set account name to dictionary holding all account names (needed for cashbook, longterm and savecent)
self.acc_names_rec[entry[0]] = entry[1]
#create account name column for excel export
self.basis_data[entry[0]]['acc_name'] = None #create account name column and set values to Nan
self.basis_data[entry[0]].at[0, 'acc_name'] = entry[1] #set account name to basis dataframe (will be exported to excel)
def sorting_processor(self, element_name, balance_row_name, group_name, value_name):
basis_data_subset = self.basis_data[element_name].copy() #create subset
#make month data
self.month_data[element_name] = basis_data_subset.groupby('month', sort=False)[value_name].sum().reset_index() ##get monthly overview
if element_name == self.langdict['dataname_savecent'][0]: #do sorting of month data differently for savecents
self.month_data[element_name] = self.month_data[element_name].sort_values([value_name], ascending=False)
self.month_data[element_name].columns = ['month', 'val']
else: #sort monthly data for all other dataframes starting with first month up respective left in monthplot
self.month_data[element_name] = self.month_data[element_name][::-1]
month_number = self.month_data[element_name]['month'].nunique()
#process data and aggregate based on sorting type(category/main category)
grouped_data = basis_data_subset.groupby(group_name, sort=False)[value_name].sum().reset_index()
balance_row = pd.DataFrame([[balance_row_name, sum(basis_data_subset[value_name])]], columns=list(grouped_data.columns))
grouped_data = grouped_data.sort_values([value_name], ascending=False).reset_index(drop=True) #sort by values to have all positive values at top (necessary to get indices
income_data = grouped_data.loc[(grouped_data[value_name] > 0)].copy() #get positive valued entries
#get negative valued entries based on length of positive valued dataframe
if len(income_data.index) > 0:
cost_data = grouped_data[income_data.index[-1]+1:].copy()
else:
cost_data = grouped_data[0:].copy()
cost_data = cost_data.sort_values([value_name]) # sort negative valued dataframe, with most negative at top
result_data = income_data.append(cost_data, ignore_index=True) #append negative dataframe to positive dataframe
result_data = result_data.append(balance_row, ignore_index=True) # add balance row
result_data['val_month'] = result_data[value_name]/(month_number) #create value per month
return result_data
def month_cat_maker(self):
##categorize data and sort ascending. Same goes for monthly data
for element_name in list(self.folder_res.keys()):
if element_name == self.langdict['dataname_savecent'][0]:
main_cats = "empty"
subcats = self.sorting_processor(element_name, self.langdict['balance_savecent'][0], 'acc_origin', 'savecent')
subcats.columns = ['cat', 'val', 'val_month']
elif element_name == self.langdict['cashbookvars_name'][0]:
subcats = self.sorting_processor(element_name, self.langdict['balance_cashbook'][1], 'cat', 'val')
main_cats = self.sorting_processor(element_name, self.langdict['balance_cashbook'][1], 'acc_name', 'val') #main cats equals account name sorting
#rename columns maincat cashbook from 'acc_name' to 'cat'
main_cats.columns = ['cat', 'val', 'val_month']
elif element_name == self.langdict['holidayvars_name'][0]:
main_cats = "empty"
subcats = self.sorting_processor(element_name, self.langdict['balance_holiday'][0], 'cat', 'val')
else: #make cat data and month data for all other dataframes
main_cats = self.sorting_processor(element_name, self.langdict['balance_normal'][0], 'main cat', 'val') # create sorted dataframe for main categories
subcats = self.sorting_processor(element_name, self.langdict['balance_normal'][0], 'cat', 'val') # create sorted dataframe for categories
subcats = self.classifier.assign_maincats(subcats) #add main category column to cat data for later use
subcats.loc[subcats['cat'] == self.langdict['balance_normal'][0], 'main cat'] = self.langdict['balance_normal'][0] #adjust main category for balance category
subcats = subcats.reindex(columns=['main cat', 'cat', 'val', 'val_month'])#reorder columns
self.cat_data[element_name] = (subcats, main_cats)
#take saved long term data into data evaluation process
def longterm_evaluate(self): #this function is only called, when user opts for long term data evaluation
for account_name in self.saved_dataframe.keys():
account_dataframe = self.saved_dataframe[account_name].copy()
account_dataframe.reset_index(drop=True, inplace=True) #reset index to be able to place acc_name on index 0
account_dataframe['acc_name'] = None # clear account name
account_dataframe.at[0, 'acc_name'] = account_name#set new account type for this subframe
longterm_data_name = self.langdict['longterm_name'][0]+account_name # "Longterm_"+account fullname
self.basis_data[longterm_data_name] = account_dataframe #add longterm basis data to be analysed
self.acc_names_rec[longterm_data_name] = account_name #add longterm data name to recorded account names list--> makes cashbook evaluation possible
self.plotting_list[longterm_data_name] = 'normal' #set plotting info
self.folder_res[longterm_data_name] = self.dir_result+longterm_data_name # create export folder
# add newly added data to longterm data
def longterm_savedata(self):
#update saved longterm data, evaluate and output if user opted for it
#convert saved dataframes in dict entries into lists
for saved_element in self.saved_dataframe.keys():
self.saved_dataframe[saved_element] = [self.saved_dataframe[saved_element]]
#get basis dataframe for every assigned account name
for element_name in self.acc_names_rec.keys():
if self.acc_names_rec[element_name] != self.langdict['accname_labels'][1]: #check if the name is 'not assigned'. If yes skip, else import basis data
try: #if list with account name already exists, add dataframe
self.saved_dataframe[self.acc_names_rec[element_name]].append(self.basis_data[element_name])
except: #create list for account name with dataframe
self.saved_dataframe[self.acc_names_rec[element_name]] = [self.basis_data[element_name]]
else:
pass #nothing to do
#generate new dataframes
longterm_data_prep = {}
for account_name in self.saved_dataframe.keys():
account_name_concat =
|
pd.concat(self.saved_dataframe[account_name])
|
pandas.concat
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Detect audio recordings with rain using MFCC and logistic regression
Assuming that rain events are stable during a period of 60s or more, the detector analyzes
the first 10 seconds of each recording. It computes the MFCC and uses a trained model to
evaluate the probability of having rain in the recording.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from maad import sound
import glob
import os
from librosa import feature
from utils import find_file
#%% Set variables
fpath_csv = '../../anotaciones_manuales/anotaciones_pkl_consolidado.csv'
path_audio = '/Volumes/PAPAYA/ANH/'
target_fs = 16000
wl = 10
#%% Compute features
df = pd.read_csv(fpath_csv)
df_features = pd.DataFrame()
for idx_row, row in df.iterrows():
print(idx_row+1, '/', len(df))
full_path_audio = find_file(row.fname, path_audio)[0]
s, fs = sound.load(full_path_audio)
# transform
s = sound.trim(s, fs, 0, wl)
mfcc = feature.mfcc(s, sr=target_fs, n_mfcc=20, hop_length=1024, fmax=8000)
mfcc = np.median(mfcc, axis=1)
# format dataframe
idx_names = ['mfcc_' + str(idx).zfill(2) for idx in range(1,mfcc.size+1)]
row = row.append(pd.Series(mfcc, index=idx_names))
row.name = idx_row
df_features = df_features.append(row)
#%% Set train test datasets
# assign variables
X = df_features.loc[:,df_features.columns.str.startswith('mfcc')]
y = (df_features.label=='LLUVIA').astype(int)
y.value_counts()
#%% Tune classifier with cross validation
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.metrics import f1_score, confusion_matrix
from classif_fcns import misclassif_idx
import joblib
clf = LogisticRegression(solver='liblinear', max_iter=10000, class_weight='balanced')
clf = LogisticRegressionCV(Cs=1000, penalty='l2', solver='liblinear',
scoring='f1', max_iter=10000, class_weight='balanced',
cv=10, random_state=123)
# fit classifier
clf.fit(X, y)
#%% Final evaluation on test set and save model
# Note: Do not use the test set to select a model.
X = df_features.loc[:,df_features.columns.str.startswith('mfcc')]
y = (df_features.label=='LLUVIA').astype(int)
y_pred = clf.predict(X)
y_prob = clf.predict_proba(X)
df_features['y_prob'] = clf.predict_proba(X)[:,1]
f1_score(y, y_pred)
confusion_matrix(y, y_pred)
misclassified = misclassif_idx(y, y_pred)
df_features.loc[misclassified['fp'], ['fname', 'y_prob', 'label']]
df_features.loc[misclassified['fn'], ['fname', 'y_prob', 'label']]
# save model
joblib.dump(clf, 'clf_rain_logistic_regression.joblib')
#%% Deploy on new data
import glob
flist = pd.read_csv('../../audio_metadata/audio_metadata_lluvias.csv')
flist = flist.fname_audio.tolist()
df_pred = dict()
for idx, fname in enumerate(flist):
print(idx+1, '/', len(flist))
full_path_audio = find_file(fname, path_audio)[0]
s, fs = sound.load(full_path_audio)
# transform - must be the same as in training
s = sound.trim(s, fs, 0, wl)
mfcc = feature.mfcc(s, sr=target_fs, n_mfcc=20, hop_length=1024, fmax=8000)
mfcc = np.median(mfcc, axis=1)
# format dataframe
pred_clf = clf.predict_proba(mfcc.reshape(1,20))[:,1]
df_pred[os.path.basename(fname)] = np.round(pred_clf,2)
df_pred =
|
pd.DataFrame(df_pred, index=['proba_rain'])
|
pandas.DataFrame
|
################################################################
# ---------- Network Gene Name Conversion Functions ---------- #
################################################################
import requests
import re
import time
import pandas as pd
# Determine if id to be input is a valid gene name (does not contain parentheses or quotations or whitespace)
def exclude_id(name, bad_prefixes=None):
excluded_id_regex = re.compile('[(),\'\"\s\/\|\.<>]+')
# Remove genes that may also have prefixes that we do not want (e.g. CHEBI)
if bad_prefixes:
for prefix in bad_prefixes:
if name.startswith(prefix):
return True
return excluded_id_regex.search(name)
# Remove the naming system prefix, if there is one
def get_identifier_without_prefix(string):
elements = string.split(':')
length = len(elements)
if length is 2:
return str(elements[1])
elif length > 2:
return None
else:
return string
# Construct string for bach query to MyGene.Info v3.0.0 API
def query_constructor(gene_list, exclude_prefixes=None, print_invalid_genes=False):
# Find genes that are valid and return only gene identifiers
valid_query_genes = [get_identifier_without_prefix(
gene) for gene in gene_list if exclude_id(gene, exclude_prefixes) == None]
# Find all genes that have invalid names
invalid_query_genes = [gene for gene in gene_list if exclude_id(
gene, exclude_prefixes) != None]
print(len(valid_query_genes), "Valid Query Genes")
if print_invalid_genes:
print(len(invalid_query_genes), "Invalid Query Genes:")
print(invalid_query_genes)
else:
print(len(invalid_query_genes), "Invalid Query Genes")
# Build string of names to input into MyGene.Info
query_string = ' '.join(valid_query_genes)
return query_string, valid_query_genes, invalid_query_genes
# Function for posting batch query to MyGene.info v3.0.0 API
def query_batch(query_string, tax_id='9606', scopes="symbol, entrezgene, alias, uniprot", fields="symbol, entrezgene"):
query_split = query_string.split(' ')
query_n = len(query_split)
query_time = time.time()
if query_n <= 1000:
data = {'species': tax_id, # Human Only
'scopes': scopes, # Default symbol, entrez, alias, uniprot. Alias often returns more genes than needed, return only higest scoring genes
'fields': fields, # Which gene name spaces to convert to
'q': query_string}
res = requests.post('http://mygene.info/v3/query', data)
json = res.json()
else:
# If the query is too long, we will need to break it up into chunks of 1000 query genes (MyGene.info cap)
if query_n % 1000 == 0:
chunks = query_n / 1000
else:
chunks = (query_n / 1000) + 1
query_chunks = []
for i in range(chunks):
start_i, end_i = i*1000, (i+1)*1000
query_chunks.append(' '.join(query_split[start_i:end_i]))
json = []
for chunk in query_chunks:
data = {'species': '9606', # Human Only
# Default symbol, entrez, alias, uniprot. Alias often returns more genes than needed, return only higest scoring genes
'scopes': "entrezgene, retired",
'fields': "symbol, entrezgene", # Which gene name spaces to convert to
'q': chunk}
res = requests.post('http://mygene.info/v3/query', data)
json = json+res.json()
print(len(json), 'Matched query results')
print('Batch query complete:', round(time.time()-query_time, 2), 'seconds')
return json
# Construct matched queries maps
def construct_query_map_table(query_result, query_genes, display_unmatched_queries=False):
construction_time = time.time()
# Construct DataFrame of matched queries (only keep the results for each query where both symbol and entrez id were mapped)
matched_data, matched_genes = [], []
for match in query_result:
if match.get('entrezgene') and match.get('symbol'):
matched_data.append([match.get('query'), match.get(
'_score'), match.get('symbol'), str(match.get('entrezgene'))])
matched_genes.append(match.get('query'))
# Add all other partial mappings or non-mappings to the list
partial_match_genes = [
gene for gene in query_genes if gene not in matched_genes]
partial_match_results = []
for match in query_result:
if match.get('query') in partial_match_genes:
partial_match_results.append(match)
# If there if an entrez gene, we want that that in string form, otherwise we want None
if match.get('entrezgene'):
matched_data.append([match.get('query'), match.get(
'_score'), match.get('symbol'), str(match.get('entrezgene'))])
else:
matched_data.append([match.get('query'), match.get(
'_score'), match.get('symbol'), match.get('entrezgene')])
print('Queries with partial matching results found:',
len(partial_match_results))
if display_unmatched_queries:
for entry in partial_match_results:
print(entry)
# Convert matched data list into data frame table
match_table = pd.DataFrame(data=matched_data, columns=[
'Query', 'Score', 'Symbol', 'EntrezID'])
match_table = match_table.set_index('Query')
# Some genes will be matched in duplicates (due to alias mapping, generally the highest scoring matches will be correct)
# Therefore we remove duplicate mappings to create 1-to-1 mappings for query to genes.
duplicate_matched_genes = []
for gene in matched_genes:
if type(match_table.loc[gene]) == pd.DataFrame:
duplicate_matched_genes.append(gene)
print
print(len(duplicate_matched_genes), "Queries with mutliple matches found")
# Construct mapping table of genes with only one full result
single_match_genes = [
gene for gene in query_genes if gene not in duplicate_matched_genes]
match_table_single = match_table.loc[single_match_genes]
# Keep matches of queries matched only once if there are duplicate matches for genes
if len(duplicate_matched_genes) > 0:
# Keep maximum scored matches of queries matched more than once
max_score_matches = []
for gene in duplicate_matched_genes:
matched_duplicates = match_table.loc[gene]
max_score = max(matched_duplicates['Score'])
max_score_matches.append(
matched_duplicates[matched_duplicates['Score'] == max_score])
match_table_duplicate_max = pd.concat(max_score_matches)
# Construct Query maps for symbol and entrez
match_table_trim = pd.concat(
[match_table_single, match_table_duplicate_max])
else:
match_table_trim = match_table_single.copy(deep=True)
# Construct query map dictionaries
query_to_symbol = match_table_trim['Symbol'].to_dict()
query_to_entrez = match_table_trim['EntrezID'].to_dict()
print
print('Query mapping table/dictionary construction complete:',
round(time.time()-construction_time, 2), 'seconds')
return match_table_trim, query_to_symbol, query_to_entrez
# Filter edgelist to remove all genes that contain invalid query names
# This function is only required if there are any invalid genes found by query_constructor()
def filter_query_edgelist(query_edgelist, invalid_genes):
edgelist_filt = []
count = 0
for edge in query_edgelist:
if edge[0] in invalid_genes or edge[1] in invalid_genes:
count += 1
else:
edgelist_filt.append(edge)
print(count, '/', len(query_edgelist), 'edges with invalid nodes removed')
return edgelist_filt
# Convert network edge lists
# Third column is for weights if desired to pass weights forward
def convert_edgelist(query_edgelist, gene_map, weighted=False):
if weighted:
converted_edgelist = [sorted(
[gene_map[edge[0]], gene_map[edge[1]]])+[edge[2]] for edge in query_edgelist]
else:
converted_edgelist = [
sorted([gene_map[edge[0]], gene_map[edge[1]]]) for edge in query_edgelist]
return converted_edgelist
# Sometimes each node needs to be converted by its best match if there are multiple names per node
# This function uses the match_table constructed earlier to convert genes to either symbol or entrez format only
def convert_custom_namelist(names, field, match_table):
# Keep only mappings defined for field of interest
if field == 'symbol':
# Return match table values that have matched symbol
conversion = match_table.loc[names][~(
match_table.loc[names]['Symbol'].isnull())]
if conversion.shape[0] == 0:
return None
else:
# Return conversion with max score or None if no conversion
max_score = conversion['Score'].max()
converted_namelist = conversion[conversion['Score']
== max_score].loc[0]['Symbol']
elif field == 'entrez':
# Return match table values that have matched symbol
conversion = match_table.loc[names][~(
match_table.loc[names]['EntrezID'].isnull())]
if conversion.shape[0] == 0:
return None
else:
# Return conversion with max score or None if no conversion
max_score = conversion['Score'].max()
converted_namelist = conversion[conversion['Score']
== max_score].loc[0]['EntrezID']
return converted_namelist
# Filter converted edge lists
def filter_converted_edgelist(edgelist, remove_self_edges=True, weighted=False):
filter_time = time.time()
print(len(edgelist), 'input edges')
# Remove self-edges
if remove_self_edges:
edgelist_filt1 = [edge for edge in edgelist if edge[0] != edge[1]]
print(len(edgelist)-len(edgelist_filt1), 'self-edges removed')
else:
edgelist_filt1 = edgelist
print('Self-edges not removed')
if weighted:
# Remove edges where one or both nodes are "None"
edgelist_filt2 = pd.DataFrame(
data=edgelist_filt1).dropna().values.tolist()
print(len(edgelist_filt1)-len(edgelist_filt2),
'edges with un-mapped genes removed')
# Remove duplicates by keeping the max score
edgelist_filt3_scoremap = {}
for edge in edgelist_filt2:
if edge[0]+'+'+edge[1] not in edgelist_filt3_scoremap:
edgelist_filt3_scoremap[edge[0]+'+'+edge[1]] = edge[2]
else:
edgelist_filt3_scoremap[edge[0]+'+'+edge[1]] = max(
edgelist_filt3_scoremap[edge[0]+'+'+edge[1]], edge[2])
# Convert dictionary of scores to list
edgelist_filt3 = []
for edge in edgelist_filt3_scoremap:
edgelist_filt3.append(edge.split(
'+')+[edgelist_filt3_scoremap[edge]])
print(len(edgelist_filt2)-len(edgelist_filt3), 'duplicate edges removed')
else:
# Remove edges where one or both nodes are "None"
edgelist_filt2 =
|
pd.DataFrame(data=edgelist_filt1)
|
pandas.DataFrame
|
import glob
import numpy as np
import pandas as pd
import re
import sys
# generate aspect classes based on the MethodAspect0 template
def generate_aspects(df):
base_path = "./src/main/java/se/kth/castor/pankti/instrument/plugins/MethodAspect"
found_aspects = sorted(glob.glob(base_path + "*.java"), key=lambda x:float(re.findall("(\d+)",x)[0]))
count = int(re.search(r"(\d+)", found_aspects[-1]).group())
aspects = []
template_file_path = base_path + str(0) + ".java"
df.replace(np.nan, '', regex=True, inplace=True)
for index, row in df.iterrows():
with open(template_file_path) as template:
count += 1
aspect_string = "\"se.kth.castor.pankti.instrument.plugins.MethodAspect" + str(count) + "\""
aspects.append(aspect_string)
new_file_path = base_path + str(count) + ".java"
with open(new_file_path, "w") as f:
for line in template:
if ("public class MethodAspect0" in line):
line = line.replace("0", str(count))
if ("@Pointcut(className =" in line):
line = re.sub(r"=\s\"(.+)\",", "= \"" + row['parent-FQN'] + "\",", line)
if ("methodName = " in line):
line = re.sub(r"=\s\"(.+)\",", "= \"" + row['method-name'] + "\",", line)
if ("String rowInCSVFile" in line):
values = []
for col in df.columns:
col_value = str(row[col])
if "," in col_value:
col_value = "\\\"" + col_value.replace(" ", "") + "\\\""
values.append(col_value)
row_as_string = ','.join(values)
line = re.sub(r"\"\"", "\"" + row_as_string + "\"", line)
# Changes needed for void methods
if row['return-type'] == "void":
if "boolean isReturnTypeVoid" in line:
line = line.replace("false", "true")
# if "@OnReturn" in line:
if "onReturn(@BindReturn Object returnedObject," in line:
line = line.replace("@BindReturn Object returnedObject", "@BindReceiver Object receivingObjectPost")
if "writeObjectXMLToFile(returnedObject, returnedObjectFilePath)" in line:
line = line.replace("returnedObject", "receivingObjectPost")
if ("methodParameterTypes = " in line):
if (
|
pd.isnull(row['param-list'])
|
pandas.isnull
|
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.dummy import DummyClassifier
from pandas import DataFrame
import numpy as np
from sklearn.datasets import load_digits
import sys
from autoclf import auto_utils as au
from autoclf.classification import eval_utils as eu
from autoclf.classification import evaluate as eva
import autoclf.getargs as ga
import matplotlib.pyplot as plt
# starting program
if __name__ == '__main__':
plt.style.use('ggplot')
print()
print("### Probability Calibration Experiment -- CalibratedClassifierCV "
"with cv=cv (no prefit) ###")
print()
print(ga.get_n_epoch())
d_name = ga.get_name()
if d_name is None:
d_name = 'Digits'
seed = 7
np.random.seed(seed)
# load digits data
digits = load_digits()
df = DataFrame(data=digits.data)
target = 'class'
df_target =
|
DataFrame(data=digits.target, columns=[target])
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import pandas as pd
import pathlib, pickle, json, copy, yaml
from emhass.retrieve_hass import retrieve_hass
from emhass.forecast import forecast
from emhass.optimization import optimization
from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger
from emhass.command_line import dayahead_forecast_optim
# the root folder
root = str(get_root(__file__, num_parent=2))
# create logger
logger, ch = get_logger(__name__, root, save_to_file=False)
class TestForecast(unittest.TestCase):
def setUp(self):
self.get_data_from_file = True
params = None
retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(pathlib.Path(root+'/config_emhass.yaml'), use_secrets=False)
self.retrieve_hass_conf, self.optim_conf, self.plant_conf = \
retrieve_hass_conf, optim_conf, plant_conf
self.rh = retrieve_hass(self.retrieve_hass_conf['hass_url'], self.retrieve_hass_conf['long_lived_token'],
self.retrieve_hass_conf['freq'], self.retrieve_hass_conf['time_zone'],
params, root, logger)
if self.get_data_from_file:
with open(pathlib.Path(root+'/data/test_df_final.pkl'), 'rb') as inp:
self.rh.df_final, self.days_list, self.var_list = pickle.load(inp)
else:
self.days_list = get_days_list(self.retrieve_hass_conf['days_to_retrieve'])
self.var_list = [self.retrieve_hass_conf['var_load'], self.retrieve_hass_conf['var_PV']]
self.rh.get_data(self.days_list, self.var_list,
minimal_response=False, significant_changes_only=False)
self.rh.prepare_data(self.retrieve_hass_conf['var_load'], load_negative = self.retrieve_hass_conf['load_negative'],
set_zero_min = self.retrieve_hass_conf['set_zero_min'],
var_replace_zero = self.retrieve_hass_conf['var_replace_zero'],
var_interp = self.retrieve_hass_conf['var_interp'])
self.df_input_data = self.rh.df_final.copy()
self.fcst = forecast(self.retrieve_hass_conf, self.optim_conf, self.plant_conf,
params, root, logger, get_data_from_file=self.get_data_from_file)
self.df_weather_scrap = self.fcst.get_weather_forecast(method='scrapper')
self.P_PV_forecast = self.fcst.get_power_from_weather(self.df_weather_scrap)
self.P_load_forecast = self.fcst.get_load_forecast(method=optim_conf['load_forecast_method'])
self.df_input_data_dayahead = pd.concat([self.P_PV_forecast, self.P_load_forecast], axis=1)
self.df_input_data_dayahead.columns = ['P_PV_forecast', 'P_load_forecast']
self.opt = optimization(retrieve_hass_conf, optim_conf, plant_conf,
self.fcst.var_load_cost, self.fcst.var_prod_price,
'profit', root, logger)
self.input_data_dict = {
'root': root,
'retrieve_hass_conf': self.retrieve_hass_conf,
'df_input_data': self.df_input_data,
'df_input_data_dayahead': self.df_input_data_dayahead,
'opt': self.opt,
'rh': self.rh,
'fcst': self.fcst,
'P_PV_forecast': self.P_PV_forecast,
'P_load_forecast': self.P_load_forecast,
'params': params
}
def test_get_weather_forecast(self):
self.assertTrue(self.df_input_data.isnull().sum().sum()==0)
self.assertIsInstance(self.df_weather_scrap, type(
|
pd.DataFrame()
|
pandas.DataFrame
|
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
msg = 'cannot be converted to datetime64'
with pytest.raises(ValueError, match=msg):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with pytest.raises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7),
datetime(2013, 10, 8),
datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values
result = DatetimeIndex(data, freq=pd.offsets.BDay())
expected = DatetimeIndex(['2013-10-07',
'2013-10-08',
'2013-10-09'],
freq='B')
tm.assert_index_equal(result, expected)
def test_verify_integrity_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(['1/1/2000'], verify_integrity=False)
def test_range_kwargs_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000', freq='D')
def test_integer_values_and_tz_deprecated(self):
# GH-24559
values = np.array([946684800000000000])
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(values, tz='US/Central')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
tm.assert_index_equal(result, expected)
# but UTC is *not* deprecated.
with tm.assert_produces_warning(None):
result = DatetimeIndex(values, tz='UTC')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
tm.assert_index_equal(rng, exp)
msg = 'periods must be a number, got foo'
with pytest.raises(TypeError, match=msg):
date_range(start='1/1/2000', periods='foo', freq='D')
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000')
with pytest.raises(TypeError):
DatetimeIndex('1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# string with NaT
strings = np.array(['2000-01-01', '2000-01-02', 'NaT'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# non-conforming
pytest.raises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'], freq='D')
pytest.raises(ValueError, date_range, start='2011-01-01',
freq='b')
pytest.raises(ValueError, date_range, end='2011-01-01',
freq='B')
pytest.raises(ValueError, date_range, periods=10, freq='D')
@pytest.mark.parametrize('freq', ['AS', 'W-SUN'])
def test_constructor_datetime64_tzformat(self, freq):
# see GH#6572: ISO 8601 format results in pytz.FixedOffset
idx = date_range('2013-01-01T00:00:00-05:00',
'2016-01-01T23:59:59-05:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013-01-01T00:00:00+09:00',
'2016-01-01T23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
# Non ISO 8601 format results in dateutil.tz.tzoffset
idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00',
freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013/1/1 0:00:00+9:00',
'2016/1/1 23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
expected = DatetimeIndex(['2013-01-01', '2013-01-02']
).tz_localize('US/Eastern')
tm.assert_index_equal(idx, expected)
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
tz='US/Eastern')
tm.assert_index_equal(idx, expected)
# if we already have a tz and its not the same, then raise
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
pytest.raises(ValueError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns]'))
# this is effectively trying to convert tz's
pytest.raises(TypeError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns, CET]'))
pytest.raises(ValueError,
lambda: DatetimeIndex(
idx, tz='CET',
dtype='datetime64[ns, US/Eastern]'))
result = DatetimeIndex(idx, dtype='datetime64[ns, US/Eastern]')
tm.assert_index_equal(idx, result)
def test_constructor_name(self):
idx = date_range(start='2000-01-01', periods=1, freq='A',
name='TEST')
assert idx.name == 'TEST'
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
assert idx.nanosecond[0] == t1.nanosecond
def test_disallow_setting_tz(self):
# GH 3746
dti = DatetimeIndex(['2010'], tz='UTC')
with pytest.raises(AttributeError):
dti.tz = pytz.timezone('US/Pacific')
@pytest.mark.parametrize('tz', [
None, 'America/Los_Angeles', pytz.timezone('America/Los_Angeles'),
Timestamp('2000', tz='America/Los_Angeles').tz])
def test_constructor_start_end_with_tz(self, tz):
# GH 18595
start = Timestamp('2013-01-01 06:00:00', tz='America/Los_Angeles')
end = Timestamp('2013-01-02 06:00:00', tz='America/Los_Angeles')
result = date_range(freq='D', start=start, end=end, tz=tz)
expected = DatetimeIndex(['2013-01-01 06:00:00',
'2013-01-02 06:00:00'],
tz='America/Los_Angeles')
tm.assert_index_equal(result, expected)
# Especially assert that the timezone is consistent for pytz
assert pytz.timezone('America/Los_Angeles') is result.tz
@pytest.mark.parametrize('tz', ['US/Pacific', 'US/Eastern', 'Asia/Tokyo'])
def test_constructor_with_non_normalized_pytz(self, tz):
# GH 18595
non_norm_tz = Timestamp('2010', tz=tz).tz
result = DatetimeIndex(['2010'], tz=non_norm_tz)
assert pytz.timezone(tz) is result.tz
def test_constructor_timestamp_near_dst(self):
# GH 20854
ts = [Timestamp('2016-10-30 03:00:00+0300', tz='Europe/Helsinki'),
Timestamp('2016-10-30 03:00:00+0200', tz='Europe/Helsinki')]
result = DatetimeIndex(ts)
expected = DatetimeIndex([ts[0].to_pydatetime(),
ts[1].to_pydatetime()])
tm.assert_index_equal(result, expected)
# TODO(GH-24559): Remove the xfail for the tz-aware case.
@pytest.mark.parametrize('klass', [Index, DatetimeIndex])
@pytest.mark.parametrize('box', [
np.array, partial(np.array, dtype=object), list])
@pytest.mark.parametrize('tz, dtype', [
pytest.param('US/Pacific', 'datetime64[ns, US/Pacific]',
marks=[pytest.mark.xfail(),
pytest.mark.filterwarnings(
"ignore:\\n Passing:FutureWarning")]),
[None, 'datetime64[ns]'],
])
def test_constructor_with_int_tz(self, klass, box, tz, dtype):
# GH 20997, 20964
ts =
|
Timestamp('2018-01-01', tz=tz)
|
pandas.Timestamp
|
import streamlit as st # streamlit run Location100_RF_streamlit.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import warnings
from sklearn.model_selection import train_test_split, GridSearchCV, learning_curve, cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
from sklearn import preprocessing
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from scipy import stats
from sklearn.neighbors import KNeighborsRegressor
# import config
st.title('Pepper ML for Chicago area(location 100) by using random forest')
# df = pd.read_csv("C:\PepperPepper\pepperProject.csv", encoding = 'unicode_escape', engine ='python')
url = f'https://raw.githubusercontent.com/LeonZly90/myData/main/pepperProject.csv?token=<KEY>'
df = pd.read_csv(url, encoding='unicode_escape', engine='python')
df_data = df.copy()
new_sheet = pd.DataFrame(df_data,
columns=['OMOP_COMP_CODE', 'CTRL_JOB', 'STAGE_CODE', 'MARKET_TYPE', 'POTENTIAL_REV_AMT',
'TOTAL_HOURS'])
new_sheet = new_sheet[~new_sheet['MARKET_TYPE'].isin(['Select Market', 'Self Performed Work', 'Self Performed Direct'])]
new_sheet = new_sheet[new_sheet['POTENTIAL_REV_AMT'] > 0]
location_100 = new_sheet[new_sheet.OMOP_COMP_CODE == 100]
location_100 = location_100.drop('OMOP_COMP_CODE', 1)
# st.write('location_100:\n', location_100)
JobHour_by_StageMarket = location_100.groupby(['CTRL_JOB', 'STAGE_CODE', 'MARKET_TYPE'])[
'POTENTIAL_REV_AMT', 'TOTAL_HOURS'].sum().reset_index()
# st.write('JobHour_by_StageMarket:\n', JobHour_by_StageMarket) # [474 rows x 5 columns]
revAmt_Hour0 = JobHour_by_StageMarket.iloc[:, -2:].abs()
# st.write(revAmt_Hour0)
# with st.echo(code_location='below'):
# fig1 = plt.figure(1)
# plt.scatter(revAmt_Hour0['POTENTIAL_REV_AMT'], revAmt_Hour0['TOTAL_HOURS'])
# plt.xlabel('POTENTIAL_REV_AMT')
# plt.ylabel('TOTAL_HOURS')
# plt.show()
# st.write(fig1)
# clean outlier [469 rows x 5 columns]
z_scores = stats.zscore(revAmt_Hour0)
abs_z_scores = np.abs(z_scores)
revAmt_Hour1 = revAmt_Hour0[(abs_z_scores < 3).all(axis=1)]
# st.write(revAmt_Hour1)
# with st.echo(code_location='below'):
# fig2=plt.figure(2)
# plt.scatter(revAmt_Hour1['POTENTIAL_REV_AMT'], revAmt_Hour1['TOTAL_HOURS'])
# plt.xlabel('POTENTIAL_REV_AMT1')
# plt.ylabel('TOTAL_HOURS1')
# plt.show()
# st.write(fig2)
rest = JobHour_by_StageMarket.iloc[:, :-2]
JobHour_by_StageMarket = rest.join(revAmt_Hour1, how='outer')
# @st.cache # 👈 This function will be cached
JobHour_by_StageMarket = JobHour_by_StageMarket.dropna()
# st.write('Now JobHour_by_StageMarket:\n', JobHour_by_StageMarket) # [469 rows x 5 columns]
# @st.cache # 👈 This function will be cached
standardscaler = preprocessing.StandardScaler()
numer_feature = standardscaler.fit_transform(JobHour_by_StageMarket["POTENTIAL_REV_AMT"].values.reshape(-1, 1))
numer_feature =
|
pd.DataFrame(numer_feature, columns=["POTENTIAL_REV_AMT"])
|
pandas.DataFrame
|
#views.py
from flask import abort, jsonify, send_from_directory, render_template, request, redirect, url_for, send_file, make_response
from app import app
from models import *
import os
import csv
import json
import uuid
import pandas as pd
import requests
import requests_cache
import metadata_validator
import config
import pandas as pd
import ast
from ccmsproteosafepythonapi import proteosafe
requests_cache.install_cache('demo_cache', allowable_codes=(200, 404, 500))
black_list_attribute = ["SubjectIdentifierAsRecorded", "UniqueSubjectID", "UBERONOntologyIndex", "DOIDOntologyIndex", "ComorbidityListDOIDIndex"]
"""Resolving ontologies only if they need to be"""
def resolve_ontology(attribute, term):
if attribute == "ATTRIBUTE_BodyPart":
url = "https://www.ebi.ac.uk/ols/api/ontologies/uberon/terms?iri=http://purl.obolibrary.org/obo/%s" % (term.replace(":", "_"))
try:
requests.get(url)
ontology_json = json.loads(requests.get(url).text)
#print(json.dumps(ontology_json))
return ontology_json["_embedded"]["terms"][0]["label"]
except KeyboardInterrupt:
raise
except:
return term
if attribute == "ATTRIBUTE_Disease":
url = "https://www.ebi.ac.uk/ols/api/ontologies/doid/terms?iri=http://purl.obolibrary.org/obo/%s" % (term.replace(":", "_"))
try:
ontology_json = requests.get(url).json()
return ontology_json["_embedded"]["terms"][0]["label"]
except KeyboardInterrupt:
raise
except:
return term
if attribute == "ATTRIBUTE_DatasetAccession":
try:
url = f"https://massive.ucsd.edu/ProteoSAFe//proxi/v0.1/datasets?filter={term}&function=datasets"
dataset_information = requests.get(url).json()
return dataset_information["title"]
except:
raise
#raise Exception(url)
return term
def count_compounds_in_files(filelist1, filelist2, filelist3, filelist4, filelist5, filelist6):
output_list = []
input_fileset1 = set(filelist1)
input_fileset2 = set(filelist2)
input_fileset3 = set(filelist3)
input_fileset4 = set(filelist4)
input_fileset5 = set(filelist5)
input_fileset6 = set(filelist6)
all_compounds = Compound.select()
for my_compound in all_compounds:
my_files = Filename.select().join(CompoundFilenameConnection).where(CompoundFilenameConnection.compound==my_compound)
my_files_set = set([one_file.filepath for one_file in my_files])
intersection_set1 = input_fileset1.intersection(my_files_set)
intersection_set2 = input_fileset2.intersection(my_files_set)
intersection_set3 = input_fileset3.intersection(my_files_set)
intersection_set4 = input_fileset4.intersection(my_files_set)
intersection_set5 = input_fileset5.intersection(my_files_set)
intersection_set6 = input_fileset6.intersection(my_files_set)
output_dict = {}
output_dict["compound"] = my_compound.compoundname
include_row = False
output_dict["count1"] = len(intersection_set1)
if len(filelist1) > 0:
output_dict["count1_norm"] = int(float(len(intersection_set1)) / float(len(filelist1)) * 100.0)
else:
output_dict["count1_norm"] = 0
output_dict["count2"] = len(intersection_set2)
if len(filelist2) > 0:
output_dict["count2_norm"] = int(float(len(intersection_set2)) / float(len(filelist2)) * 100.0)
else:
output_dict["count2_norm"] = 0
output_dict["count3"] = len(intersection_set3)
if len(filelist3) > 0:
output_dict["count3_norm"] = int(float(len(intersection_set3)) / float(len(filelist3)) * 100.0)
else:
output_dict["count3_norm"] = 0
output_dict["count4"] = len(intersection_set4)
if len(filelist4) > 0:
output_dict["count4_norm"] = int(float(len(intersection_set4)) / float(len(filelist4)) * 100.0)
else:
output_dict["count4_norm"] = 0
output_dict["count5"] = len(intersection_set5)
if len(filelist5) > 0:
output_dict["count5_norm"] = int(float(len(intersection_set5)) / float(len(filelist5)) * 100.0)
else:
output_dict["count5_norm"] = 0
output_dict["count6"] = len(intersection_set6)
if len(filelist6) > 0:
output_dict["count6_norm"] = int(float(len(intersection_set6)) / float(len(filelist6)) * 100.0)
else:
output_dict["count6_norm"] = 0
counts_total = output_dict["count1"] + output_dict["count2"] + output_dict["count3"] + output_dict["count4"] + output_dict["count5"] + output_dict["count6"]
if counts_total > 0:
output_list.append(output_dict)
return output_list
def count_tags_in_files(filelist1, filelist2, filelist3, filelist4, filelist5, filelist6):
output_list = []
input_fileset1 = set(filelist1)
input_fileset2 = set(filelist2)
input_fileset3 = set(filelist3)
input_fileset4 = set(filelist4)
input_fileset5 = set(filelist5)
input_fileset6 = set(filelist6)
all_tags = CompoundTag.select()
for my_tag in all_tags:
my_files = Filename.select().join(CompoundTagFilenameConnection).where(CompoundTagFilenameConnection.compoundtag==my_tag)
my_files_set = set([one_file.filepath for one_file in my_files])
intersection_set1 = input_fileset1.intersection(my_files_set)
intersection_set2 = input_fileset2.intersection(my_files_set)
intersection_set3 = input_fileset3.intersection(my_files_set)
intersection_set4 = input_fileset4.intersection(my_files_set)
intersection_set5 = input_fileset5.intersection(my_files_set)
intersection_set6 = input_fileset6.intersection(my_files_set)
output_dict = {}
output_dict["compound"] = my_tag.tagname
include_row = False
output_dict["count1"] = len(intersection_set1)
if len(filelist1) > 0:
output_dict["count1_norm"] = int(float(len(intersection_set1)) / float(len(filelist1)) * 100.0)
else:
output_dict["count1_norm"] = 0
output_dict["count2"] = len(intersection_set2)
if len(filelist2) > 0:
output_dict["count2_norm"] = int(float(len(intersection_set2)) / float(len(filelist2)) * 100.0)
else:
output_dict["count2_norm"] = 0
output_dict["count3"] = len(intersection_set3)
if len(filelist3) > 0:
output_dict["count3_norm"] = int(float(len(intersection_set3)) / float(len(filelist3)) * 100.0)
else:
output_dict["count3_norm"] = 0
output_dict["count4"] = len(intersection_set4)
if len(filelist4) > 0:
output_dict["count4_norm"] = int(float(len(intersection_set4)) / float(len(filelist4)) * 100.0)
else:
output_dict["count4_norm"] = 0
output_dict["count5"] = len(intersection_set5)
if len(filelist5) > 0:
output_dict["count5_norm"] = int(float(len(intersection_set5)) / float(len(filelist5)) * 100.0)
else:
output_dict["count5_norm"] = 0
output_dict["count6"] = len(intersection_set6)
if len(filelist6) > 0:
output_dict["count6_norm"] = int(float(len(intersection_set6)) / float(len(filelist6)) * 100.0)
else:
output_dict["count6_norm"] = 0
counts_total = output_dict["count1"] + output_dict["count2"] + output_dict["count3"] + output_dict["count4"] + output_dict["count5"] + output_dict["count6"]
if counts_total > 0:
output_list.append(output_dict)
return output_list
@app.route('/filename', methods=['GET'])
def getfilename():
query_filename = request.args["query"].replace("/spectrum/", "/ccms_peak/")
expanded_attributes = request.args.get("expanded", "false")
filepath_db = Filename.select().where(Filename.filepath == query_filename)
if len(filepath_db) == 0:
return "[]"
all_connections = FilenameAttributeConnection.select().where(FilenameAttributeConnection.filename == filepath_db)
resolved_terms = []
for connection in all_connections:
attribute_name = connection.attribute.categoryname
attribute_term = connection.attributeterm.term
resolved_term = resolve_ontology(attribute_name, attribute_term)
if expanded_attributes == "false" and attribute_name:
resolved_terms.append(resolved_term)
if expanded_attributes == "true" and not(attribute_name):
resolved_terms.append(resolved_term)
return json.dumps(resolved_terms)
@app.route('/filenamedict', methods=['GET'])
def queryfilename():
query_filename = request.args["query"].replace("/spectrum/", "/ccms_peak/")
expanded_attributes = request.args.get("expanded", "false")
all_attributes = request.args.get("allattributes", "false")
filepath_db = Filename.select().where(Filename.filepath == query_filename)
if len(filepath_db) == 0:
return "[]"
all_connections = FilenameAttributeConnection.select().where(FilenameAttributeConnection.filename == filepath_db)
resolved_terms = []
for connection in all_connections:
attribute_name = connection.attribute.categoryname
attribute_term = connection.attributeterm.term
resolved_term = resolve_ontology(attribute_name, attribute_term)
if all_attributes == "true":
resolved_terms.append({"attribute_name": attribute_name, "attribute_term" : resolved_term})
else:
if expanded_attributes == "false" and attribute_name:
resolved_terms.append({"attribute_name": attribute_name, "attribute_term" : resolved_term})
if expanded_attributes == "true" and not(attribute_name):
resolved_terms.append({"attribute_name": attribute_name, "attribute_term" : resolved_term})
return json.dumps(resolved_terms)
@app.route('/attributes', methods=['GET'])
def viewattributes():
all_attributes = Attribute.select()
output_list = []
for attribute in all_attributes:
all_terms = AttributeTerm.select().join(FilenameAttributeConnection).join(Attribute).where(Attribute.categoryname == attribute.categoryname).group_by(AttributeTerm.term)
output_dict = {}
output_dict["attributename"] = attribute.categoryname
output_dict["attributedisplay"] = attribute.categoryname.replace("ATTRIBUTE_", "").replace("Analysis_", "").replace("Subject_", "").replace("Curated_", "")
output_dict["countterms"] = len(all_terms)
if attribute.categoryname in black_list_attribute:
continue
else:
output_list.append(output_dict)
output_list = sorted(output_list, key=lambda x: x["attributedisplay"], reverse=False)
return json.dumps(output_list)
#Returns all the terms given an attribute along with file counts for each term
@app.route('/attribute/<attribute>/attributeterms', methods=['GET'])
def viewattributeterms(attribute):
attribute_db = Attribute.select().where(Attribute.categoryname == attribute)
all_terms_db = AttributeTerm.select().join(FilenameAttributeConnection).where(FilenameAttributeConnection.attribute == attribute_db).group_by(AttributeTerm.term)
filters_list = json.loads(request.values.get('filters', "[]"))
output_list = []
for attribute_term_db in all_terms_db:
all_files_db = Filename.select().join(FilenameAttributeConnection).where(FilenameAttributeConnection.attributeterm == attribute_term_db).where(FilenameAttributeConnection.attribute == attribute)
all_files = set([file_db.filepath for file_db in all_files_db])
#Adding the filter
all_filtered_files_list = [all_files]
for filterobject in filters_list:
new_db = Filename.select().join(FilenameAttributeConnection).where(FilenameAttributeConnection.attributeterm == filterobject["attributeterm"]).where(FilenameAttributeConnection.attribute == filterobject["attributename"])
all_filtered_files_list.append(set([file_db.filepath for file_db in new_db]))
intersection_set = set.intersection(*all_filtered_files_list)
if len(intersection_set) > 0:
output_dict = {}
output_dict["attributename"] = attribute
output_dict["attributeterm"] = attribute_term_db.term
output_dict["ontologyterm"] = resolve_ontology(attribute, attribute_term_db.term)
output_dict["countfiles"] = len(intersection_set)
output_list.append(output_dict)
return json.dumps(output_list)
#Returns all the terms given an attribute along with file counts for each term
@app.route('/attribute/<attribute>/attributeterm/<term>/files', methods=['GET'])
def viewfilesattributeattributeterm(attribute, term):
all_files_db = Filename.select().join(FilenameAttributeConnection).where(FilenameAttributeConnection.attributeterm == term).where(FilenameAttributeConnection.attribute == attribute)
all_files = set([file_db.filepath for file_db in all_files_db])
filters_list = json.loads(request.args['filters'])
all_filtered_files_list = [all_files]
for filterobject in filters_list:
new_db = Filename.select().join(FilenameAttributeConnection).where(FilenameAttributeConnection.attributeterm == filterobject["attributeterm"]).where(FilenameAttributeConnection.attribute == filterobject["attributename"])
all_filtered_files_list.append(set([file_db.filepath for file_db in new_db]))
intersection_set = set.intersection(*all_filtered_files_list)
output_list = []
for filepath in intersection_set:
output_dict = {}
output_dict["attribute"] = attribute
output_dict["attributeterm"] = term
output_dict["filename"] = filepath
output_list.append(output_dict)
return json.dumps(output_list)
#Summarize Files Per Comparison Group
@app.route('/explorer', methods=['POST'])
def summarizefiles():
all_files_G1 = json.loads(request.values.get("G1", "[]"))
all_files_G2 = json.loads(request.values.get("G2", "[]"))
all_files_G3 = json.loads(request.values.get("G3", "[]"))
all_files_G4 = json.loads(request.values.get("G4", "[]"))
all_files_G5 = json.loads(request.values.get("G5", "[]"))
all_files_G6 = json.loads(request.values.get("G6", "[]"))
output = count_compounds_in_files(all_files_G1, all_files_G2, all_files_G3, all_files_G4, all_files_G5, all_files_G6)
return json.dumps(output)
# Lists all Compounds
@app.route('/compounds', methods=['GET'])
def querycompounds():
file_list = []
try:
file_list = ast.literal_eval(request.args["files"])["filenames"]
except:
pass
all_compounds = []
#in the case we display all compounds from all files
if len(file_list) == 0:
all_compounds_db = CompoundFilenameConnection.select(CompoundFilenameConnection.compound, fn.COUNT(CompoundFilenameConnection.compound).alias('count')).join(Compound).group_by(CompoundFilenameConnection.compound).dicts()
for compound in all_compounds_db:
compound_dict = {}
compound_dict["compound"]= compound["compound"]
compound_dict["count"] = compound["count"]
all_compounds.append(compound_dict)
#in the case of file filtration based on metadata
else:
all_compounds_db = CompoundFilenameConnection.select(CompoundFilenameConnection.compound, fn.COUNT(CompoundFilenameConnection.compound).alias('count')).where(CompoundFilenameConnection.filename.in_(file_list)).join(Compound).group_by(CompoundFilenameConnection.compound).dicts()
for compound in all_compounds_db:
compound_dict = {}
compound_dict["compound"] = compound["compound"]
compound_dict["count"] = compound["count"]
all_compounds.append(compound_dict)
return json.dumps(all_compounds)
@app.route('/compoundfilename', methods=['GET'])
def queryfilesbycompound():
compoundname = request.args['compoundname']
compound_db = Compound.select().where(Compound.compoundname == compoundname)
filenames_db = Filename.select().join(CompoundFilenameConnection).where(CompoundFilenameConnection.compound==compound_db)
output_filenames = []
for filename in filenames_db:
output_filenames.append({"filepath" : filename.filepath})
return json.dumps(output_filenames)
@app.route('/compoundenrichment', methods=['POST'])
def compoundenrichment():
blacklist_attributes = ["ATTRIBUTE_DatasetAccession", "ATTRIBUTE_Curated_BodyPartOntologyIndex", "filename", "UniqueSubjectID", "UBERONOntologyIndex", "SubjectIdentifierAsRecorded", "SampleCollectionDateandTime", "LatitudeandLongitude", "InternalStandardsUsed", "DepthorAltitudeMeters", "DOIDOntologyIndex", "Country", "ComorbidityListDOIDIndex", "AgeInYears"]
compoundname = request.form['compoundname']
compound_db = Compound.select().where(Compound.compoundname == compoundname)
compound_filenames = [filename.filepath for filename in Filename.select().join(CompoundFilenameConnection).where(CompoundFilenameConnection.compound==compound_db)]
enrichment_list = []
if "filenames" in request.form:
filter_filenames = set(json.loads(request.form["filenames"]))
if len(filter_filenames) == 0:
filter_filenames = set([filename.filepath for filename in Filename.select()])
else:
filter_filenames = set([filename.filepath for filename in Filename.select()])
all_metadata = FilenameAttributeConnection.select(Attribute.categoryname, AttributeTerm.term, fn.COUNT(FilenameAttributeConnection.filename).alias('ct')).join(Attribute).switch(FilenameAttributeConnection).join(AttributeTerm).group_by(Attribute.categoryname, AttributeTerm.term).dicts()
for attribute_term_pair in all_metadata:
# if attribute_term_pair["categoryname"].find("ATTRIBUTE_") == -1:
# continue
if attribute_term_pair["categoryname"] in blacklist_attributes:
continue
attribute_files_db = Filename.select().join(FilenameAttributeConnection).where(FilenameAttributeConnection.attributeterm == attribute_term_pair["term"]).where(FilenameAttributeConnection.attribute == attribute_term_pair["categoryname"])
attribute_filenames = set([filename.filepath for filename in attribute_files_db]).intersection(filter_filenames)
if len(attribute_filenames) > 0:
intersection_filenames = set(compound_filenames).intersection(set(attribute_filenames)).intersection(filter_filenames)
enrichment_dict = {}
enrichment_dict["attribute_name"] = attribute_term_pair["categoryname"]
enrichment_dict["attribute_term"] = attribute_term_pair["term"]
enrichment_dict["totalfiles"] = len(attribute_filenames)
enrichment_dict["compoundfiles"] = len(intersection_filenames)
enrichment_dict["percentage"] = len(intersection_filenames)/float(len(attribute_filenames))
enrichment_list.append(enrichment_dict)
enrichment_list = sorted(enrichment_list, key=lambda list_object: list_object["percentage"], reverse=True)
# Creating Bokeh Plot Here
enrichment_df = pd.DataFrame(enrichment_list)
# Finding all non-zero entries
enrichment_df = enrichment_df[enrichment_df["totalfiles"] != 0]
all_attributes = list(set(list(enrichment_df["attribute_name"])))
from bokeh.models import Panel, Tabs
from bokeh.plotting import figure
from bokeh.embed import components
all_tabs = []
for attribute in all_attributes:
filtered_df = enrichment_df[enrichment_df["attribute_name"] == attribute]
filtered_df = filtered_df[filtered_df["percentage"] > 0]
all_terms = list(filtered_df["attribute_term"])
all_percentage = list(filtered_df["percentage"])
plot = figure(x_range=all_terms, plot_height=300, plot_width=1200, sizing_mode="scale_width", title="{} Percentage of Terms".format(attribute))
plot.vbar(x=all_terms, top=all_percentage, width=0.9)
tab = Panel(child=plot, title=attribute)
all_tabs.append(tab)
tabs = Tabs(tabs=all_tabs)
script, div = components(tabs)
drawing_dict = {}
drawing_dict["div"] = div
drawing_dict["script"] = script
return_dict = {}
return_dict["enrichment_list"] = enrichment_list
return_dict["drawings"] = drawing_dict
return json.dumps(return_dict)
@app.route('/filesenrichment', methods=['POST'])
def filesenrichment():
blacklist_attributes = ["ATTRIBUTE_DatasetAccession", "ATTRIBUTE_Curated_BodyPartOntologyIndex"]
compound_filenames = set(json.loads(request.form["filenames"]))
enrichment_list = []
filter_filenames = set([filename.filepath for filename in Filename.select()])
all_metadata = FilenameAttributeConnection.select(Attribute.categoryname, AttributeTerm.term, fn.COUNT(FilenameAttributeConnection.filename).alias('ct')).join(Attribute).switch(FilenameAttributeConnection).join(AttributeTerm).group_by(Attribute.categoryname, AttributeTerm.term).dicts()
for attribute_term_pair in all_metadata:
if attribute_term_pair["categoryname"].find("ATTRIBUTE_") == -1:
continue
if attribute_term_pair["categoryname"] in blacklist_attributes:
continue
attribute_files_db = Filename.select().join(FilenameAttributeConnection).where(FilenameAttributeConnection.attributeterm == attribute_term_pair["term"]).where(FilenameAttributeConnection.attribute == attribute_term_pair["categoryname"])
attribute_filenames = set([filename.filepath for filename in attribute_files_db]).intersection(filter_filenames)
if len(attribute_filenames) > 0:
intersection_filenames = set(compound_filenames).intersection(set(attribute_filenames)).intersection(filter_filenames)
enrichment_dict = {}
enrichment_dict["attribute_name"] = attribute_term_pair["categoryname"]
enrichment_dict["attribute_term"] = attribute_term_pair["term"]
enrichment_dict["totalfiles"] = len(attribute_filenames)
enrichment_dict["compoundfiles"] = len(intersection_filenames)
enrichment_dict["percentage"] = len(intersection_filenames)/float(len(attribute_filenames))
enrichment_list.append(enrichment_dict)
enrichment_list = sorted(enrichment_list, key=lambda list_object: list_object["percentage"], reverse=True)
return json.dumps(enrichment_list)
@app.route('/tagexplorer', methods=['POST'])
def summarizetagfiles():
all_files_G1 = json.loads(request.form["G1"])
all_files_G2 = json.loads(request.form["G2"])
all_files_G3 = json.loads(request.form["G3"])
all_files_G4 = json.loads(request.form["G4"])
all_files_G5 = json.loads(request.form["G5"])
all_files_G6 = json.loads(request.form["G6"])
output = count_tags_in_files(all_files_G1, all_files_G2, all_files_G3, all_files_G4, all_files_G5, all_files_G6)
return json.dumps(output)
@app.route('/plottags', methods=['POST'])
def plottags():
import os
uuid_to_use = str(uuid.uuid4())
input_filename = os.path.join("static", "temp", uuid_to_use + ".tsv")
all_counts = json.loads(request.form["tagcounts"])
sourcelevel = request.form["sourcelevel"]
with open(input_filename, 'w') as csvfile:
field_name = ["source information", "G1 number", "G1 percent", "G2 number", "G2 percent", "G3 number", "G3 percent", "G4 number", "G4 percent", "G5 number", "G5 percent", "G6 number", "G6 percent"]
writer = csv.DictWriter(csvfile, fieldnames=field_name, delimiter="\t")
writer.writeheader()
for row in all_counts:
new_dict = {}
new_dict["source information"] = row["compound"]
new_dict["G1 number"] = row["count1"]
new_dict["G1 percent"] = row["count1_norm"]
new_dict["G2 number"] = row["count2"]
new_dict["G2 percent"] = row["count2_norm"]
new_dict["G3 number"] = row["count3"]
new_dict["G3 percent"] = row["count3_norm"]
new_dict["G4 number"] = row["count4"]
new_dict["G4 percent"] = row["count4_norm"]
new_dict["G5 number"] = row["count5"]
new_dict["G6 percent"] = row["count5_norm"]
new_dict["G6 number"] = row["count6"]
new_dict["G6 percent"] = row["count6_norm"]
writer.writerow(new_dict)
output_counts_png = os.path.join("static", "temp", uuid_to_use + "_count.png")
output_percent_png = os.path.join("static", "temp", uuid_to_use + "_percent.png")
cmd = "Rscript %s %s %s %s %s" % ("Meta_Analysis_Plot_Example.r", input_filename, output_counts_png, output_percent_png, sourcelevel)
os.system(cmd)
return json.dumps({"uuid" : uuid_to_use})
""" Production Views """
@app.route('/', methods=['GET'])
def homepage():
total_files = Filename.select().count()
total_identifications = CompoundFilenameConnection.select().count()
total_compounds = Compound.select().count()
return render_template('homepage.html', total_files=total_files, total_identifications=total_identifications, total_compounds=total_compounds)
@app.route('/globalmultivariate', methods=['GET'])
def globalmultivariate():
return render_template('globalmultivariate.html')
@app.route('/comparemultivariate', methods=['GET', 'POST'])
def comparemultivariate():
return render_template('comparemultivariate.html')
@app.route('/compoundslist', methods=['GET'])
def compoundslist():
return render_template('compoundslist.html')
@app.route('/compoundfilenamelist', methods=['GET'])
def compoundfilenamelist():
return render_template('compoundfilelist.html')
#Summarize Files Per Comparison Group
@app.route('/explorerdashboard', methods=['GET'])
def explorerdashboard():
return render_template('explorerdashboard.html')
@app.route('/compoundenrichmentdashboard', methods=['GET'])
def compoundenrichmentview():
return render_template('compoundenrichment.html')
@app.route('/metadataselection', methods=['GET'])
def metadataselection():
return render_template('metadataselection.html')
@app.route('/heartbeat', methods=['GET'])
def heartbeat():
return "{'status' : 'up'}"
@app.route('/datalookup', methods=['GET'])
def datalookup():
return render_template('datalookup.html')
@app.route('/dump', methods=['GET'])
def dump():
return send_file(config.PATH_TO_ORIGINAL_MAPPING_FILE, cache_timeout=1, as_attachment=True, attachment_filename="all_sampleinformation.tsv")
@app.route('/ReDUValidator', methods = ["GET"])
def ReDUValidator():
return render_template('ReDUValidator.html')
# API End Points
@app.route('/metabatchdump', methods=['GET'])
def metabatchdump():
df = pd.read_table(config.PATH_TO_ORIGINAL_MAPPING_FILE)
filenames = df["filename"].tolist()
batch_size = 1000
batch_num = len(filenames) // batch_size
output_list = []
for x in range(batch_num):
files = filenames[(batch_size * x):(batch_size * (x+1))]
string_temp = ';'.join(files)
output_dict = {}
output_dict["filename"] = string_temp
output_dict["id"] = x
output_list.append(output_dict)
new_file = pd.DataFrame(output_list)
return new_file.to_csv(sep="\t", index=False)
def allowed_file_metadata(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ["tsv"]
@app.route('/validate', methods=['POST'])
def validate():
request_file = request.files['file']
#Invalid File Types
if not allowed_file_metadata(request_file.filename):
error_dict = {}
error_dict["header"] = "Incorrect File Type"
error_dict["line_number"] = "N/A"
error_dict["error_string"] = "Please provide a tab separated file"
validation_dict = {}
validation_dict["status"] = False
validation_dict["errors"] = [error_dict]
validation_dict["stats"] = []
validation_dict["stats"].append({"type":"total_rows", "value": 0})
validation_dict["stats"].append({"type":"valid_rows", "value": 0})
return json.dumps(validation_dict)
local_filename = os.path.join(app.config['UPLOAD_FOLDER'], str(uuid.uuid4()))
request_file.save(local_filename)
"""Trying stuff out with pandas"""
metadata_df = pd.read_csv(local_filename, sep="\t")
metadata_df.to_csv(local_filename, index=False, sep="\t")
metadata_validator.rewrite_metadata(local_filename)
pass_validation, failures, errors_list, valid_rows, total_rows = metadata_validator.perform_validation(local_filename)
validation_dict = {}
validation_dict["status"] = pass_validation
validation_dict["errors"] = errors_list
validation_dict["stats"] = []
validation_dict["stats"].append({"type":"total_rows", "value":total_rows})
validation_dict["stats"].append({"type":"valid_rows", "value":len(valid_rows)})
"""Try to find datasets in public data"""
try:
dataset_success, result_string, valid_items = metadata_validator.perform_validation_against_massive(local_filename)
validation_dict["stats"].append({"type":"massive_files_founds", "value": valid_items})
except:
print("Massive validation error")
try:
os.remove(local_filename)
except:
print("Cannot Remove File")
return json.dumps(validation_dict)
import uuid
import redu_pca
import config
#This displays global PCoA of public data as a web url
@app.route("/displayglobalmultivariate", methods = ["GET"])
def displayglobalmultivariate():
if not (os.path.isfile(config.PATH_TO_ORIGINAL_PCA) and os.path.isfile(config.PATH_TO_EIGS)):
print("Missing Global PCA Calculation, Calculating")
if not os.path.isfile(config.PATH_TO_GLOBAL_OCCURRENCES):
#Get the actual all identifictions file
import urllib.request as request
from contextlib import closing
import shutil
with closing(request.urlopen('ftp://massive.ucsd.edu/MSV000084206/other/ReDU_all_identifications.tsv')) as r:
with open(config.PATH_TO_GLOBAL_OCCURRENCES, 'wb') as f:
shutil.copyfileobj(r, f)
redu_pca.calculate_master_projection(config.PATH_TO_GLOBAL_OCCURRENCES)
print("Begin Getting Global PCA")
df_temp = pd.read_csv(config.PATH_TO_ORIGINAL_PCA)
full_file_list = df_temp["Unnamed: 0"].tolist()
df_temp.drop("Unnamed: 0", axis = 1, inplace = True)
sklearn_output = df_temp.values
component_matrix = pd.read_csv(config.PATH_TO_COMPONENT_MATRIX)
eig_var_df = pd.read_csv(config.PATH_TO_EIGS)
eigenvalues = eig_var_df["eigenvalues"].tolist()
percent_variance = eig_var_df["percent_variance"].tolist()
output_file = ("./tempuploads/global")
redu_pca.emperor_output(sklearn_output, full_file_list, eigenvalues, percent_variance, output_file)
return send_file("./tempuploads/global/index.html")
###This takes the file selected PCA and redirects it to a new page for user viewing
@app.route('/fileselectedpcaviews', methods=['GET'])
def selectedpcaviews():
pcaid = str(request.args['pcaid'])
return(send_file(os.path.join('./tempuploads', pcaid, 'index.html')))
###This is the backend funtion for re-calcualtion of pca based on the files selected by user
@app.route('/fileselectedpca', methods=['POST'])
def fileselectedpca():
files_of_interest = json.loads(request.form["files"])
files_of_interest = [item[2:] for item in files_of_interest]
#making sure the abbreviated metadata is available
if os.path.isfile(config.PATH_TO_PARSED_GLOBAL_OCCURRENCES):
print("Parsed Global Occurrences File Found")
full_occ_table =
|
pd.read_table(config.PATH_TO_PARSED_GLOBAL_OCCURRENCES)
|
pandas.read_table
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 5 15:33:50 2019
@author: luc
"""
#%% Import Libraries
import numpy as np
import pandas as pd
import itertools
from stimuli_dictionary import cued_stim, free_stim, cued_stim_prac, free_stim_prac
def randomize(ID, Age, Gender, Handedness):
'''
Create a randomized and counterbalanced stimulus list for the current participant
Parameters
----------
ID : INT
The subject ID. Based on the subject ID the correct counterbalancing is determined
Returns
-------
design : Pandas DataFame
The dataframe containing the complete stimulus list (including practice trials)
keys: Dictionary
the response keys for the free phase
'''
#%% Variables
# experiment variables
nBlocks = 6
Phases = ['prac_cued', 'prac_free', 'cued', 'free']
nstim = 60 # sample 60 stim from each target_type
# sample from main stimulus set without replacement
# randomize word targets to avoid relationship reward - stimulus
for idx, name in enumerate(['lism','lila','nosm','nola']):
cued_stim[name] = np.random.choice(cued_stim[name], size = nstim, replace = False)
wide_cued =
|
pd.DataFrame(cued_stim)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
@pytest.fixture
def df_checks():
"""fixture dataframe"""
return pd.DataFrame(
{
"famid": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"birth": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
"ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9],
}
)
@pytest.fixture
def df_multi():
"""MultiIndex dataframe fixture."""
return pd.DataFrame(
{
("name", "a"): {0: "Wilbur", 1: "Petunia", 2: "Gregory"},
("names", "aa"): {0: 67, 1: 80, 2: 64},
("more_names", "aaa"): {0: 56, 1: 90, 2: 50},
}
)
def test_column_level_wrong_type(df_multi):
"""Raise TypeError if wrong type is provided for column_level."""
with pytest.raises(TypeError):
df_multi.pivot_longer(index="name", column_level={0})
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_index(df_checks):
"""Raise TypeError if wrong type is provided for the index."""
with pytest.raises(TypeError):
df_checks.pivot_longer(index=2007)
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_column_names(df_checks):
"""Raise TypeError if wrong type is provided for column_names."""
with pytest.raises(TypeError):
df_checks.pivot_longer(column_names=2007)
def test_type_names_to(df_checks):
"""Raise TypeError if wrong type is provided for names_to."""
with pytest.raises(TypeError):
df_checks.pivot_longer(names_to={2007})
def test_subtype_names_to(df_checks):
"""
Raise TypeError if names_to is a sequence
and the wrong type is provided for entries
in names_to.
"""
with pytest.raises(TypeError, match="1 in names_to.+"):
df_checks.pivot_longer(names_to=[1])
def test_duplicate_names_to(df_checks):
"""Raise error if names_to contains duplicates."""
with pytest.raises(ValueError, match="y is duplicated in names_to."):
df_checks.pivot_longer(names_to=["y", "y"], names_pattern="(.+)(.)")
def test_both_names_sep_and_pattern(df_checks):
"""
Raise ValueError if both names_sep
and names_pattern is provided.
"""
with pytest.raises(
ValueError,
match="Only one of names_pattern or names_sep should be provided.",
):
df_checks.pivot_longer(
names_to=["rar", "bar"], names_sep="-", names_pattern="(.+)(.)"
)
def test_name_pattern_wrong_type(df_checks):
"""Raise TypeError if the wrong type provided for names_pattern."""
with pytest.raises(TypeError, match="names_pattern should be one of.+"):
df_checks.pivot_longer(names_to=["rar", "bar"], names_pattern=2007)
def test_name_pattern_no_names_to(df_checks):
"""Raise ValueError if names_pattern and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_pattern="(.+)(.)")
def test_name_pattern_groups_len(df_checks):
"""
Raise ValueError if names_pattern
and the number of groups
differs from the length of names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of groups in names_pattern.+",
):
df_checks.pivot_longer(names_to=".value", names_pattern="(.+)(.)")
def test_names_pattern_wrong_subtype(df_checks):
"""
Raise TypeError if names_pattern is a list/tuple
and wrong subtype is supplied.
"""
with pytest.raises(TypeError, match="1 in names_pattern.+"):
df_checks.pivot_longer(
names_to=["ht", "num"], names_pattern=[1, "\\d"]
)
def test_names_pattern_names_to_unequal_length(df_checks):
"""
Raise ValueError if names_pattern is a list/tuple
and wrong number of items in names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
names_to=["variable"], names_pattern=["^ht", ".+i.+"]
)
def test_names_pattern_names_to_dot_value(df_checks):
"""
Raise Error if names_pattern is a list/tuple and
.value in names_to.
"""
with pytest.raises(
ValueError,
match=".value is not accepted in names_to "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(
names_to=["variable", ".value"], names_pattern=["^ht", ".+i.+"]
)
def test_name_sep_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for names_sep."""
with pytest.raises(TypeError, match="names_sep should be one of.+"):
df_checks.pivot_longer(names_to=[".value", "num"], names_sep=["_"])
def test_name_sep_no_names_to(df_checks):
"""Raise ValueError if names_sep and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_sep="_")
def test_values_to_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for `values_to`."""
with pytest.raises(TypeError, match="values_to should be one of.+"):
df_checks.pivot_longer(values_to={"salvo"})
def test_values_to_wrong_type_names_pattern(df_checks):
"""
Raise TypeError if `values_to` is a list,
and names_pattern is not.
"""
with pytest.raises(
TypeError,
match="values_to can be a list/tuple only "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(values_to=["salvo"])
def test_values_to_names_pattern_unequal_length(df_checks):
"""
Raise ValueError if `values_to` is a list,
and the length of names_pattern
does not match the length of values_to.
"""
with pytest.raises(
ValueError,
match="The length of values_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
values_to=["salvo"],
names_pattern=["ht", r"\d"],
names_to=["foo", "bar"],
)
def test_values_to_names_seq_names_to(df_checks):
"""
Raise ValueError if `values_to` is a list,
and intersects with names_to.
"""
with pytest.raises(
ValueError, match="salvo in values_to already exists in names_to."
):
df_checks.pivot_longer(
values_to=["salvo"], names_pattern=["ht"], names_to="salvo"
)
def test_sub_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains non strings."""
with pytest.raises(TypeError, match="1 in values_to.+"):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=[1, "salvo"],
)
def test_duplicate_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains duplicates."""
with pytest.raises(ValueError, match="salvo is duplicated in values_to."):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=["salvo", "salvo"],
)
def test_values_to_exists_in_columns(df_checks):
"""
Raise ValueError if values_to already
exists in the dataframe's columns.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(index="birth", values_to="birth")
def test_values_to_exists_in_names_to(df_checks):
"""
Raise ValueError if values_to is in names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(values_to="num", names_to="num")
def test_column_multiindex_names_sep(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_sep is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
column_names=[("names", "aa")],
names_sep="_",
names_to=["names", "others"],
)
def test_column_multiindex_names_pattern(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_pattern is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
index=[("name", "a")],
names_pattern=r"(.+)(.+)",
names_to=["names", "others"],
)
def test_index_tuple_multiindex(df_multi):
"""
Raise ValueError if index is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(index=("name", "a"))
def test_column_names_tuple_multiindex(df_multi):
"""
Raise ValueError if column_names is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(column_names=("names", "aa"))
def test_sort_by_appearance(df_checks):
"""Raise error if sort_by_appearance is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"],
names_sep="_",
sort_by_appearance="TRUE",
)
def test_ignore_index(df_checks):
"""Raise error if ignore_index is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"], names_sep="_", ignore_index="TRUE"
)
def test_names_to_index(df_checks):
"""
Raise ValueError if there is no names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to="famid",
index="famid",
)
def test_names_sep_pattern_names_to_index(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to=["dim", "famid"],
names_sep="_",
index="famid",
)
def test_dot_value_names_to_columns_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the new columns
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist in the new dataframe\'s columns.+",
):
df_checks.pivot_longer(
index="famid", names_to=(".value", "ht"), names_pattern="(.+)(.)"
)
def test_values_to_seq_index_intersect(df_checks):
"""
Raise ValueError if values_to is a sequence,
and intersects with the index
"""
match = ".+values_to already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(ValueError, match=rf"{match}"):
df_checks.pivot_longer(
index="famid",
names_to=("value", "ht"),
names_pattern=["ht", r"\d"],
values_to=("famid", "foo"),
)
def test_dot_value_names_to_index_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the index
"""
match = ".+already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(
ValueError,
match=rf"{match}",
):
df_checks.rename(columns={"famid": "ht"}).pivot_longer(
index="ht", names_to=(".value", "num"), names_pattern="(.+)(.)"
)
def test_names_pattern_list_empty_any(df_checks):
"""
Raise ValueError if names_pattern is a list,
and not all matches are returned.
"""
with pytest.raises(
ValueError, match="No match was returned for the regex.+"
):
df_checks.pivot_longer(
index=["famid", "birth"],
names_to=["ht"],
names_pattern=["rar"],
)
def test_names_pattern_no_match(df_checks):
"""Raise error if names_pattern is a regex and returns no matches."""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(rar)(.)",
)
def test_names_pattern_incomplete_match(df_checks):
"""
Raise error if names_pattern is a regex
and returns incomplete matches.
"""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(ht)(.)",
)
def test_names_sep_len(df_checks):
"""
Raise error if names_sep,
and the number of matches returned
is not equal to the length of names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=".value", names_sep="(\\d)")
def test_pivot_index_only(df_checks):
"""Test output if only index is passed."""
result = df_checks.pivot_longer(
index=["famid", "birth"],
names_to="dim",
values_to="num",
)
actual = df_checks.melt(
["famid", "birth"], var_name="dim", value_name="num"
)
assert_frame_equal(result, actual)
def test_pivot_column_only(df_checks):
"""Test output if only column_names is passed."""
result = df_checks.pivot_longer(
column_names=["ht1", "ht2"],
names_to="dim",
values_to="num",
ignore_index=False,
)
actual = df_checks.melt(
["famid", "birth"],
var_name="dim",
value_name="num",
ignore_index=False,
)
assert_frame_equal(result, actual)
def test_pivot_sort_by_appearance(df_checks):
"""Test output if sort_by_appearance is True."""
result = df_checks.pivot_longer(
column_names="ht*",
names_to="dim",
values_to="num",
sort_by_appearance=True,
)
actual = (
df_checks.melt(
["famid", "birth"],
var_name="dim",
value_name="num",
ignore_index=False,
)
.sort_index()
.reset_index(drop=True)
)
assert_frame_equal(result, actual)
def test_names_pat_str(df_checks):
"""
Test output when names_pattern is a string,
and .value is present.
"""
result = (
df_checks.pivot_longer(
column_names="ht*",
names_to=(".value", "age"),
names_pattern="(.+)(.)",
sort_by_appearance=True,
)
.reindex(columns=["famid", "birth", "age", "ht"])
.astype({"age": int})
)
actual = pd.wide_to_long(
df_checks, stubnames="ht", i=["famid", "birth"], j="age"
).reset_index()
assert_frame_equal(result, actual)
def test_multiindex_column_level(df_multi):
"""
Test output from MultiIndex column,
when column_level is provided.
"""
result = df_multi.pivot_longer(
index="name", column_names="names", column_level=0
)
expected_output = df_multi.melt(
id_vars="name", value_vars="names", col_level=0
)
assert_frame_equal(result, expected_output)
def test_multiindex(df_multi):
"""
Test output from MultiIndex column,
where column_level is not provided,
and there is no names_sep/names_pattern.
"""
result = df_multi.pivot_longer(index=[("name", "a")])
expected_output = df_multi.melt(id_vars=[("name", "a")])
assert_frame_equal(result, expected_output)
def test_multiindex_names_to(df_multi):
"""
Test output from MultiIndex column,
where column_level is not provided,
there is no names_sep/names_pattern,
and names_to is provided as a sequence.
"""
result = df_multi.pivot_longer(
index=[("name", "a")], names_to=["variable_0", "variable_1"]
)
expected_output = df_multi.melt(id_vars=[("name", "a")])
|
assert_frame_equal(result, expected_output)
|
pandas.testing.assert_frame_equal
|
#!/usr/bin/env python3
import click
import pandas as pd
from Bio import Phylo
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option("-i", "--newick-tree-input", type=click.Path(exists=True), required=False, default='')
@click.option("-m", "--metadata-output", type=click.Path(exists=False), required=False, default='')
@click.option("-ma", "--metadata-aa-change", type=click.Path(exists=False), required=False, default='')
@click.option("-r", "--lineage-report", help="Pangolin report of input sequences", type=click.Path(exists=False),
required=False, default='')
@click.option("-l", "--leaflist", help="Leaves list", type=click.Path(exists=False), required=False)
def main(newick_tree_input, metadata_output, lineage_report, leaflist, metadata_aa_change):
df_lineage_report = pd.read_table(lineage_report, sep=',')
df_aa_change =
|
pd.read_table(metadata_aa_change)
|
pandas.read_table
|
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from functools import reduce
def asset_class_heatmap(df, period):
df_period = df[-period:]
mask = np.triu(df_period.corr())
plt.figure(figsize=(12.8, 12.8))
return sns.heatmap(
df_period.corr(),
annot=True,
mask=mask,
cmap='coolwarm',
square=True,
linewidths=3,
cbar_kws={"shrink": .5}
)
def jpm_wide_to_long(df, set_date_name, set_index_name, set_values_name):
"""
:param df:
:param set_date_name:
:param set_index_name:
:param set_values_name:
:return:
"""
return (
pd.melt(
(
df
.replace('-', np.NaN)
.rename(columns={'Unnamed: 0': set_date_name})
.set_index('Date')
.transpose()
.reset_index(drop=False)
.rename(columns={'index': set_index_name})
),
id_vars=[set_index_name],
value_name=set_values_name)
.sort_values([set_index_name, set_date_name])
.reset_index(drop=True)
)
if __name__ == "__main__":
folder_path = "C:/Users/mnguyen/LGSS/Investments Team - SandPits - SandPits/data/input/vendors/jpm/markets/performance/2021/05/"
lgs_dictionary_path = 'C:/Users/mnguyen/LGSS/Investments Team - SandPits - SandPits/data/archive/input/dictionary/2021/05/New New Dictionary_v21.xlsx'
jpm_main_mv_path = folder_path + 'Historical Time Series - Monthly - Main Market Values.xlsx'
jpm_alts_mv_path = folder_path + 'Historical Time Series - Monthly - Alts Market Values.xlsx'
jpm_main_returns_path = folder_path + 'Historical Time Series - Monthly - Main Returns.xlsx'
jpm_alts_returns_path = folder_path + 'Historical Time Series - Monthly - Alts Returns.xlsx'
jpm_main_benchmarks_path = folder_path + 'Historical Time Series - Monthly - Main Benchmarks.xlsx'
jpm_alts_benchmarks_path = folder_path + 'Historical Time Series - Monthly - Alts Benchmarks.xlsx'
use_manager_id = [0, 1, 2, 3, 4, 5, 6, 7, 10, 11, 12]
use_account_id = [0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 12]
footnote_rows = 28
df_lgs_dictionary = pd.read_excel(pd.ExcelFile(lgs_dictionary_path), sheet_name='Sheet1', header=0)
df_lgs_dictionary = df_lgs_dictionary[~df_lgs_dictionary['LGS Name'].isin(['Australian Fixed Interest', 'International Fixed Interest', 'Inflation Linked Bonds', 'Liquid Alternatives', 'Short Term Fixed Interest'])].reset_index(drop=True)
df_jpm_main_mv = jpm_wide_to_long(
df=pd.read_excel(
pd.ExcelFile(jpm_main_mv_path),
sheet_name='Sheet1',
skiprows=use_account_id,
skipfooter=footnote_rows,
header=1
),
set_date_name='Date',
set_index_name='JPM Account Id',
set_values_name='MV_a_m'
)
df_jpm_alts_mv = jpm_wide_to_long(
df=pd.read_excel(
pd.ExcelFile(jpm_alts_mv_path),
sheet_name='Sheet1',
skiprows=use_account_id,
skipfooter=footnote_rows,
header=1
),
set_date_name='Date',
set_index_name='JPM Account Id',
set_values_name='MV_a_m'
)
df_jpm_main_returns = jpm_wide_to_long(
df=pd.read_excel(
pd.ExcelFile(jpm_main_returns_path),
sheet_name='Sheet1',
skiprows=use_account_id,
skipfooter=footnote_rows,
header=1
),
set_date_name='Date',
set_index_name='JPM Account Id',
set_values_name='R_a_m_p'
)
df_jpm_alts_returns = jpm_wide_to_long(
df=pd.read_excel(
pd.ExcelFile(jpm_alts_returns_path),
sheet_name='Sheet1',
skiprows=use_account_id,
skipfooter=footnote_rows,
header=1
),
set_date_name='Date',
set_index_name='JPM Account Id',
set_values_name='R_a_m_p'
)
df_jpm_main_benchmarks = jpm_wide_to_long(
df=pd.read_excel(
pd.ExcelFile(jpm_main_benchmarks_path),
sheet_name='Sheet1',
skiprows=use_account_id,
skipfooter=footnote_rows,
header=1
),
set_date_name='Date',
set_index_name='JPM Account Id',
set_values_name='R_a_m_b'
)
df_jpm_alts_benchmarks = jpm_wide_to_long(
df=pd.read_excel(
pd.ExcelFile(jpm_alts_benchmarks_path),
sheet_name='Sheet1',
skiprows=use_account_id,
skipfooter=footnote_rows,
header=1
),
set_date_name='Date',
set_index_name='JPM Account Id',
set_values_name='R_a_m_b'
)
df_jpms = [
pd.concat([df_jpm_main_mv, df_jpm_alts_mv]),
pd.concat([df_jpm_main_returns, df_jpm_alts_returns]),
pd.concat([df_jpm_main_benchmarks, df_jpm_alts_benchmarks])
]
df_jpm = reduce(lambda x, y:
|
pd.merge(left=x, right=y, on=['JPM Account Id', 'Date'], how='inner')
|
pandas.merge
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 17 14:48:16 2020
@author: <NAME>
"""
import json
import requests
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from fbprophet import Prophet
import math
import time
import calendar
from datetime import date, datetime
def india_world_pred():
plt.close('all')
plt.rcParams.update({'figure.max_open_warning': 0})
#**** INDIA ****
response = requests.get("https://corona.lmao.ninja/v3/covid-19/historical/india?lastdays=all")
india = json.loads(response.text)
cases=[]
deaths=[]
rec=[]
for i in india['timeline']:
for j in india['timeline'][i].items():
if i == 'cases':
cases.append(j)
elif i == 'deaths':
deaths.append(j)
else:
rec.append(j)
#creating dataframe in the structure acceptable by fb prophet
cases = pd.DataFrame(cases,columns = ['ds','y'])
deaths = pd.DataFrame(deaths,columns = ['ds','y'])
rec=pd.DataFrame(rec,columns = ['ds','y'])
#modifying the time
#year = (datetime.now()).strftime('%Y')
dates_list = []
for i in range(len(cases)):
a = cases['ds'][i].split("/")
b = a[1]+' '+calendar.month_abbr[int(a[0])]+' 20'+ a[2]
dates_list.append(b)
dates_list = pd.DataFrame(dates_list,columns = ['Date'])
#creating csv file for india
original = pd.concat([dates_list['Date'],cases['y'],deaths['y'],rec['y']],
axis=1,sort=False)
original.columns = ['Date','Cases','Deaths','Recoveries']
original.to_csv("data/india_original.csv")
#converting values to log
cases['y'] = np.log(cases['y'])
deaths['y'] = np.log(deaths['y'])
rec['y'] = np.log(rec['y'])
#replacing infinite values with 0
for i in range(len(cases)):
if math.isinf(cases['y'][i]):
cases['y'][i]=0
if math.isinf(deaths['y'][i]):
deaths['y'][i]=0
if math.isinf(rec['y'][i]):
rec['y'][i]=0
###predicting cases using fb prophet
m = Prophet()
m.add_seasonality(name='daily', period=40.5, fourier_order=5)
m.fit(cases)
future = m.make_future_dataframe(periods=7)
# future.tail()
forecast = m.predict(future)
# forecast[['ds', 'yhat']].tail()
#plotting the model and saving it for cases
plot_locations = []
fig = m.plot(forecast)
location = "static/img/plot/india_plot_cases" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
fig.legend(("actual","fitted","80% confidence interval"),loc='center right',fontsize=10)
plt.savefig(location,figsize=(15,7),dpi=250)
fig = m.plot_components(forecast)
location = "static/img/plot/india_components_cases" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
plt.savefig(location,figsize=(15,7),dpi=250)
#final dataframe for cases
final_cases = pd.DataFrame()
final_cases['ds'] = forecast['ds']
final_cases['y'] = np.exp(forecast['yhat'])
final_cases = final_cases.iloc[(len(final_cases)-7):,:].reset_index()
final_cases.drop(columns = 'index',inplace=True)
###predicting deaths using fb prophet model
m = Prophet()
m.add_seasonality(name='daily', period=40.5, fourier_order=5)
m.fit(deaths)
future = m.make_future_dataframe(periods=7)
# future.tail()
forecast = m.predict(future)
# forecast[['ds', 'yhat']].tail()
#plotting the model and saving it for deaths
fig = m.plot(forecast)
location = "static/img/plot/india_plot_deaths" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
fig.legend(("actual","fitted","80% confidence interval"),loc='center right',fontsize=10)
plt.savefig(location,figsize=(15,7),dpi=250)
fig = m.plot_components(forecast)
location = "static/img/plot/india_component_deaths" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
plt.savefig(location,figsize=(15,7),dpi=250)
#final dataframe for deaths
final_deaths = pd.DataFrame()
final_deaths['ds'] = forecast['ds']
final_deaths['y'] = np.exp(forecast['yhat'])
final_deaths = final_deaths.iloc[(len(final_deaths)-7):,:].reset_index()
final_deaths.drop(columns = 'index',inplace = True)
###predicting recoveries using fb prophet model
m = Prophet()
m.add_seasonality(name='daily', period=40.5, fourier_order=5)
m.fit(rec)
future = m.make_future_dataframe(periods=7)
# future.tail()
forecast = m.predict(future)
# forecast[['ds', 'yhat']].tail()
#plotting the model and saving it for recoveries
fig = m.plot(forecast)
location = "static/img/plot/india_plot_recovered" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
fig.legend(("actual","fitted","80% confidence interval"),loc='center right',fontsize=10)
plt.savefig(location,figsize=(15,7),dpi=250)
fig = m.plot_components(forecast)
location = "static/img/plot/india_component_recovered" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
plt.savefig(location,figsize=(15,7),dpi=250)
#creating final dataframe for recoveries
final_rec = pd.DataFrame()
final_rec['ds'] = forecast['ds']
final_rec['y'] = np.exp(forecast['yhat'])
final_rec = final_rec.iloc[(len(final_rec)-7):,:].reset_index()
final_rec.drop(columns = 'index',inplace = True)
dates_list = []
for i in range(len(final_cases)):
c = final_cases['ds'][i].strftime("%m/%d/%Y")
a = c.split("/")
b = a[1]+' '+calendar.month_abbr[int(a[0])]+' '+ a[2]
dates_list.append(b)
dates_list = pd.DataFrame(dates_list,columns = ['Date'])
###creating the csv for fitted values for India
fitted = pd.concat([dates_list['Date'],final_cases['y'],final_deaths['y'],final_rec['y']],
axis=1,sort=False)
fitted.columns = ['Date','Cases','Deaths','Recoveries']
fitted.to_csv("data/india_fitted.csv")
#**** WORLD ****
url_cases = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
url_death = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
url_recovered = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv"
df_cases = pd.read_csv(url_cases)
df_death = pd.read_csv(url_death)
df_recovered = pd.read_csv(url_recovered)
#creating dataframe for cases
data_cases = df_cases.groupby(['Country/Region']).sum()
data_deaths = df_death.groupby(['Country/Region']).sum()
data_rec = df_recovered.groupby(['Country/Region']).sum()
data_cases.drop(columns = ['Lat','Long'],inplace=True)
data_deaths.drop(columns = ['Lat','Long'],inplace=True)
data_rec.drop(columns = ['Lat','Long'],inplace=True)
#year = (datetime.now()).strftime('%Y')
values_cases=[]
for (i,k) in zip(range(data_cases.shape[1]),data_cases.columns):
s = 0
for j in range(data_cases.shape[0]):
s = s + data_cases.iloc[j][i]
values_cases.append(( k , s ))
values_deaths=[]
for (i,k) in zip(range(data_deaths.shape[1]),data_deaths.columns):
s = 0
for j in range(data_deaths.shape[0]):
s = s + data_deaths.iloc[j][i]
values_deaths.append(( k , s ))
values_rec=[]
for (i,k) in zip(range(data_rec.shape[1]),data_rec.columns):
s = 0
for j in range(data_rec.shape[0]):
s = s + data_rec.iloc[j][i]
values_rec.append(( k , s ))
cases = pd.DataFrame(values_cases,columns = ['ds','y'])
deaths = pd.DataFrame(values_deaths,columns = ['ds','y'])
rec = pd.DataFrame(values_rec,columns = ['ds','y'])
cases_list = list(cases['y'])
deaths_list = list(deaths['y'])
rec_list = list(rec['y'])
dates_list = []
for i in range(len(cases)):
a = cases['ds'][i].split("/")
b = a[1]+' '+calendar.month_abbr[int(a[0])]+' 20'+ a[2]
dates_list.append(b)
#creating csv for original data for world
world_original = pd.DataFrame(list(zip(dates_list,cases_list,deaths_list,rec_list)),
columns = ['Date','Total Confirmed','Total Deaths','Total Recoveries'])
world_original.to_csv('data/world_original.csv')
#predicting world cases
m = Prophet()
m.add_seasonality(name='daily', period=30.5, fourier_order=5)
m.fit(cases)
future = m.make_future_dataframe(periods=7)
forecast = m.predict(future)
forecast[['ds', 'yhat']]
#plotting and saving figure for world cases
fig = m.plot(forecast)
location = "static/img/plot/world_plot_cases" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
fig.legend(("actual","fitted","80% confidence interval"),loc='center right',fontsize=10)
plt.savefig(location,figsize=(15,7),dpi=250)
fig = m.plot_components(forecast)
location = "static/img/plot/world_component_cases" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
plt.savefig(location,figsize=(15,7),dpi=250)
fitted_dates = []
fitted_cases = []
for i in range(len(forecast)):
a = forecast['ds'][i].strftime('%d %b %Y')
b = a.split(" ")
c = b[0]+' '+b[1]+' '+b[2]
fitted_dates.append(c)
if forecast['yhat'][i]<0:
d = 0
else:
d = forecast['yhat'][i]
fitted_cases.append(d)
#predicting world deaths
m = Prophet()
m.add_seasonality(name='daily', period=30.5, fourier_order=5)
m.fit(deaths)
future = m.make_future_dataframe(periods=7)
# future.tail()
forecast = m.predict(future)
# forecast[['ds', 'yhat']].tail()
#plotting and saving the figure for world deaths
fig = m.plot(forecast)
location = "static/img/plot/world_plot_deaths" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
fig.legend(("actual","fitted","80% confidence interval"),loc='center right',fontsize=10)
plt.savefig(location,figsize=(15,7),dpi=250)
fig = m.plot_components(forecast)
location = "static/img/plot/world_component_deaths" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
plt.savefig(location,figsize=(15,7),dpi=250)
fitted_deaths = []
for i in range(len(forecast)):
if forecast['yhat'][i]<0:
d = 0
else:
d = forecast['yhat'][i]
fitted_deaths.append(d)
#predicting world recovery
m = Prophet()
m.add_seasonality(name='daily', period=30.5, fourier_order=5)
m.fit(rec)
future = m.make_future_dataframe(periods=7)
# future.tail()
forecast = m.predict(future)
# forecast[['ds', 'yhat']].tail()
#plotting and saving for world recoveries
fig = m.plot(forecast)
location = "static/img/plot/world_plot_recovered" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
fig.legend(("actual","fitted","80% confidence interval"),loc='center right',fontsize=10)
plt.savefig(location,figsize=(15,7),dpi=250)
fig = m.plot_components(forecast)
location = "static/img/plot/world_component_recovered" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
plt.savefig(location,figsize=(15,7),dpi=250)
fitted_rec = []
for i in range(len(forecast)):
if forecast['yhat'][i]<0:
d = 0
else:
d = forecast['yhat'][i]
fitted_rec.append(d)
#creating the csv for fitted values for world
world_fitted = pd.DataFrame(list(zip(fitted_dates,fitted_cases,fitted_deaths,fitted_rec)),
columns = ['Date','Total Confirmed','Total Deaths','Total Recoveries'])
world_fitted = world_fitted.iloc[(len(world_fitted)-7):,:].reset_index()
world_fitted.drop(columns = ['index'],inplace=True)
world_fitted.to_csv('data/world_fitted.csv')
with open('json_data/plot_locations.json', 'w') as json_file:
json.dump(plot_locations, json_file)
def country_wise():
url_cases = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
url_death = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
url_recovered = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv"
df_cases = pd.read_csv(url_cases)
df_death = pd.read_csv(url_death)
df_recovered = pd.read_csv(url_recovered)
#year = (datetime.now()).strftime('%Y')
data_cases = df_cases.groupby(['Country/Region']).sum()
data_death = df_death.groupby(['Country/Region']).sum().reset_index()
data_recovered = df_recovered.groupby(['Country/Region']).sum().reset_index()
data_cases.drop(columns=['Lat','Long'],inplace=True)
data_death.drop(columns=['Lat','Long'],inplace=True)
data_recovered.drop(columns=['Lat','Long'],inplace=True)
date = []
for i in data_cases.columns:
a = i.split('/')
b = a[1]+ ' '+ calendar.month_abbr[int(a[0])] + ' 20'+ a[2]
date.append(b)
date.insert(0,'Country')
data_cases = data_cases.reset_index()
X_cases = data_cases.iloc[:,:].values
X_deaths = data_death.iloc[:,:].values
X_rec = data_recovered.iloc[:,:].values
new_cases = pd.DataFrame(X_cases,columns = [date])
new_deaths = pd.DataFrame(X_deaths,columns = [date])
new_rec = pd.DataFrame(X_rec,columns = [date])
new_cases.to_csv('data/country_wise_cases.csv')
new_deaths.to_csv('data/country_wise_deaths.csv')
new_rec.to_csv('data/country_wise_recovered.csv')
def read_country_list():
df = pd.read_csv("data/country_wise_cases.csv")
country = list(df['Country'])
return country
def read_country_data(country_index):
df_cases = pd.read_csv("data/country_wise_cases.csv")
df_deaths = pd.read_csv("data/country_wise_deaths.csv")
df_rec = pd.read_csv("data/country_wise_recovered.csv")
cases = list(df_cases.iloc[country_index,:].values)[2:]
deaths = list(df_deaths.iloc[country_index,:].values)[2:]
rec = list(df_rec.iloc[country_index,:].values)[2:]
dates = list(df_cases.columns)[2:]
final = pd.DataFrame(list(zip(dates,cases,deaths,rec)),columns = ['Date','Total Confirmed','Total Deaths','Total Recovered'])
final_dict={}
cases_list=[]
deaths_list=[]
rec_list=[]
for i in range(len(final)):
cases_list.append({final['Date'][i]: int(final['Total Confirmed'][i])})
deaths_list.append({final['Date'][i]: int(final['Total Deaths'][i])})
rec_list.append({final['Date'][i]: int(final['Total Recovered'][i])})
final_dict['country'] = df_cases['Country'][country_index]
final_dict['cases'] = cases_list
final_dict['deaths'] = deaths_list
final_dict['recovered'] = rec_list
return final_dict
def world_original():
final = pd.read_csv("data/world_original.csv")
final_dict={}
cases_list=[]
deaths_list=[]
rec_list=[]
for i in range(len(final)):
cases_list.append({final['Date'][i]: int(final['Total Confirmed'][i])})
deaths_list.append({final['Date'][i]: int(final['Total Deaths'][i])})
rec_list.append({final['Date'][i]: int(final['Total Recoveries'][i])})
final_dict['cases'] = cases_list
final_dict['deaths'] = deaths_list
final_dict['recovered'] = rec_list
return final_dict
def world_fitted():
final =
|
pd.read_csv("data/world_fitted.csv")
|
pandas.read_csv
|
from dagster_pandas.constraints import (
ColumnAggregateConstraintWithMetadata,
ColumnConstraintWithMetadata,
ColumnRangeConstraintWithMetadata,
ColumnWithMetadataException,
ConstraintWithMetadata,
ConstraintWithMetadataException,
DataFrameWithMetadataException,
MultiAggregateConstraintWithMetadata,
MultiColumnConstraintWithMetadata,
MultiConstraintWithMetadata,
StrictColumnsWithMetadata,
)
from pandas import DataFrame
def basic_validation_function(inframe):
if isinstance(inframe, DataFrame):
return (True, {})
else:
return (
False,
{'expectation': "a " + DataFrame.__name__, 'actual': "a " + type(inframe).__name__},
)
basic_confirmation_function = ConstraintWithMetadata(
description='this constraint confirms that table is correct type',
validation_fn=basic_validation_function,
resulting_exception=DataFrameWithMetadataException,
raise_or_typecheck=False,
)
basic_multi_constraint = MultiConstraintWithMetadata(
description='this constraint confirms that table is correct type',
validation_fn_arr=[basic_validation_function],
resulting_exception=DataFrameWithMetadataException,
raise_or_typecheck=False,
)
def test_failed_basic():
assert not basic_confirmation_function.validate([]).success
def test_basic():
assert basic_confirmation_function.validate(DataFrame())
def test_failed_multi():
mul_val = basic_multi_constraint.validate([]).metadata_entries[0].entry_data.data
assert mul_val["expected"] == {'basic_validation_function': 'a DataFrame'}
assert mul_val["actual"] == {'basic_validation_function': 'a list'}
def test_success_multi():
mul_val = basic_multi_constraint.validate(DataFrame())
assert mul_val.success == True
assert mul_val.metadata_entries == []
def test_failed_strict():
strict_column = StrictColumnsWithMetadata(["base_test"], raise_or_typecheck=False)
assert not strict_column.validate(DataFrame()).success
def test_successful_strict():
strict_column = StrictColumnsWithMetadata([], raise_or_typecheck=False)
assert strict_column.validate(DataFrame()).success
def test_column_constraint():
def column_num_validation_function(value):
return (isinstance(value, int), {})
df = DataFrame({'foo': [1, 2], 'bar': ['a', 2], 'baz': [1, 'a']})
column_val = ColumnConstraintWithMetadata(
"Confirms type of column values",
column_num_validation_function,
ColumnWithMetadataException,
raise_or_typecheck=False,
)
val = column_val.validate(df, *df.columns).metadata_entries[0].entry_data.data
assert {'bar': ['row 0'], 'baz': ['row 1']} == val["offending"]
assert {'bar': ['a'], 'baz': ['a']} == val["actual"]
def test_multi_val_constraint():
def column_num_validation_function(value):
return (value >= 3, {})
df = DataFrame({'foo': [1, 2], 'bar': [3, 2], 'baz': [1, 4]})
column_val = ColumnConstraintWithMetadata(
"Confirms values greater than 3",
column_num_validation_function,
ColumnWithMetadataException,
raise_or_typecheck=False,
)
val = column_val.validate(df, *df.columns).metadata_entries[0].entry_data.data
assert {'foo': ['row 0', 'row 1'], 'bar': ['row 1'], 'baz': ['row 0']} == val["offending"]
assert {'foo': [1, 2], 'bar': [2], 'baz': [1]} == val["actual"]
def test_multi_column_constraint():
def col_val_three(value):
"""
returns values greater than or equal to 3
"""
return (value >= 2, {})
def col_val_two(value):
"""
returns values less than 2
"""
return (value < 2, {})
df =
|
DataFrame({'foo': [1, 2, 3], 'bar': [3, 2, 1], 'baz': [1, 4, 5]})
|
pandas.DataFrame
|
#%%
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
import random
#%%
df_train =
|
pd.read_csv("data/train_ohe.csv")
|
pandas.read_csv
|
#
# Copyright (C) 2014 Xinguard Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -*- coding: utf-8 -*-
import pandas as pd
import os
import re
from flask import Flask
from flask import render_template, jsonify, request, g, abort, redirect, url_for
import json
#from xinui.rest import Rest
from rest import Rest
app = Flask(__name__)
global_flow_table = {}
# Summary page
@app.route("/")
def summary():
sw_desc = Rest.get_switch_desc()
if sw_desc is False:
abort(404)
return render_template("summary.html", sw_desc=sw_desc)
# Policy page
@app.route("/policy/")
def policy():
sw_dpid_list = Rest.get_switch_list()
if sw_dpid_list is False:
abort(404)
return render_template("policy.html", sw_dpid_list=sw_dpid_list)
@app.route("/policy/<dpid>")
def dpid_policy(dpid):
sw_dpid_list = Rest.get_switch_list()
if sw_dpid_list is False or int(dpid) not in sw_dpid_list:
abort(404)
flow_table = None
flow_table = Rest.get_flow_table(dpid)
global global_flow_table
global_flow_table = flow_table
port = Rest.get_switch_port(dpid)
return render_template("policy.html", sw_dpid_list=sw_dpid_list, dpid=dpid, port=port, flow_table=flow_table)
# Topology page
@app.route("/topology")
def topology():
topo = Rest.get_topology()
return render_template("topology.html", topo=topo)
@app.route("/_query_flow/")
def query_flow(dpid):
#dpid = request.args.get("dpid")
flow_table = Rest.get_flow_table(dpid)
#print flow_table
return render_template("policy.html", flow_table=flow_table)
#return jsonify(flow_table)
@app.route("/_query_port")
def query_port():
dpid = request.args.get("dpid")
port = Rest.get_switch_port(dpid)
return jsonify(port)
@app.route("/_add_flow", methods=["POST"])
def add_flow():
req = json.loads(request.form["flow_cmd"])
flow_cmd = {}
match_dict = {}
action_list = []
flow_cmd["dpid"] = req["common"]["dpid"]
flow_cmd["priority"] = req["common"]["priority"]
flow_cmd["idle_timeout"] = req["common"]["idle"]
flow_cmd["hard_timeout"] = req["common"]["hard"]
if req["match"]["input"] != "Any":
match_dict["in_port"] = int(req["match"]["input"])
if req["match"]["dl_saddr"] != "None":
match_dict["dl_src"] = req["match"]["dl_saddr"]
if req["match"]["dl_daddr"] != "None":
match_dict["dl_dst"] = req["match"]["dl_daddr"]
if req["match"]["nw_saddr"] != "None":
match_dict["nw_src"] = req["match"]["nw_saddr"]
if req["match"]["nw_daddr"] != "None":
match_dict["nw_dst"] = req["match"]["nw_daddr"]
if req["match"]["nw_saddr"] != "None" or req["match"]["nw_daddr"] != "None" or req["match"]["l4_proto"]:
match_dict["dl_type"] = 2048
if req["match"]["l4_proto"] != "None":
if req["match"]["l4_proto"] == "TCP":
match_dict["nw_proto"] = 6
elif req["match"]["l4_proto"] == "UDP":
match_dict["nw_proto"] = 17
if req["match"]["sport"] != "None":
match_dict["tp_src"] = int(req["match"]["sport"])
if req["match"]["dport"] != "None":
match_dict["tp_dst"] = int(req["match"]["dport"])
if req["match"]["vlan_id"] != "None":
match_dict["dl_vlan"] = int(req["match"]["vlan_id"])
if req["action"]["output"] != "Drop":
for value in req["action"]["output"].split(" "):
action_dict = {}
action_dict["port"] = int(value)
action_dict["type"] = "OUTPUT"
action_list.append(action_dict)
if req["action"]["vlan_action"] != "None":
if "Strip" in req["action"]["vlan_action"]:
action_dict = {}
action_dict["type"] = "POP_VLAN"
action_list.append(action_dict)
elif "Swap" in req["action"]["vlan_action"]:
action_dict = {}
action_dict["field"] = "vlan_vid"
action_dict["value"] = req["action"]["vlan_action"].split(" ")[-1]
action_dict["type"] = "SET_FIELD"
action_list.append(action_dict)
elif "New" in req["action"]["vlan_action"]:
push_vlan_dict = {}
push_vlan_dict["ethertype"] = 33024
push_vlan_dict["type"] = "PUSH_VLAN"
action_list.append(push_vlan_dict)
action_dict = {}
action_dict["field"] = "vlan_vid"
action_dict["value"] = req["action"]["vlan_action"].split(" ")[-1]
action_dict["type"] = "SET_FIELD"
action_list.append(action_dict)
flow_cmd["match"] = match_dict
flow_cmd["actions"] = action_list
#print json.dumps(flow_cmd)
Rest.add_flow(json.dumps(flow_cmd))
return "foo"
@app.route("/_del_flow", methods=["POST"])
def del_flow():
index = request.form["index"]
flow_cmd = {}
match_dict = {}
for key, list in global_flow_table.iteritems():
flow_cmd["dpid"] = key
match_dict = list[int(index)]["match"]
flow_cmd["match"] = match_dict
#print json.dumps(flow_cmd)
Rest.del_flow(json.dumps(flow_cmd))
return "foo"
@app.errorhandler(404)
def page_not_found(error):
return render_template("page_not_found.html"), 404
#CRUD
@app.route("/get")
def show_tables():
filename = 'example2.xlsx'
data = pd.read_excel(filename,sheetname='Sheet1')
data = data.fillna('')
return render_template('index.html',tables=[re.sub(' mytable', '" id="example', data.to_html(classes='mytable'))],
titles = ['Excel Data to Flask'])
@app.route('/insert', methods= ['POST','GET'])
def insert():
q1 = request.form['num1']
q2 = request.form['num2']
print(q1,q2)
df = pd.DataFrame({'a': [q1],
'b': [q2]})
book = pd.read_excel('example2.xlsx')
writer = pd.ExcelWriter('example2.xlsx', engine='openpyxl')
book.to_excel(writer, startrow=0, index=False)
df.to_excel(writer, startrow=len(book) + 1, header=False, index=False)
writer.save()
return redirect('/')
@app.route('/save', methods= ['POST','GET'])
def save():
url = 'http://127.0.0.1:5000/'
urll = request.get_data()
print(urll)
data =
|
pd.read_html(urll)
|
pandas.read_html
|
# Heavily influenced by: https://www.kaggle.com/opanichev/lightgbm-and-tf-idf-starter?login=true#
import pandas as pd
import lightgbm as lgbm
import numpy as np
import os
import scripts.donorchoose_functions as fn
import re
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from datetime import datetime
from tqdm import tqdm
# Reading in data
dtype = {
'id': str,
'teacher_id': str,
'teacher_prefix': str,
'school_state': str,
'project_submitted_datetime': str,
'project_grade_category': str,
'project_subject_categories': str,
'project_subject_subcategories': str,
'project_title': str,
'project_essay_1': str,
'project_essay_2': str,
'project_essay_3': str,
'project_essay_4': str,
'project_resource_summary': str,
'teacher_number_of_previously_posted_projects': int,
'project_is_approved': np.uint8,
}
data_dir = "F:/Nerdy Stuff/Kaggle/DonorsChoose"
sub_path = "F:/Nerdy Stuff/Kaggle submissions/DonorChoose"
train = pd.read_csv(os.path.join(data_dir, "data/train.csv"), dtype=dtype)
test = pd.read_csv(os.path.join(data_dir, "data/test.csv"), dtype=dtype)
print("Extracting text features")
train = fn.extract_text_features(train)
test = fn.extract_text_features(test)
print("Extracting datetime features")
train = fn.extract_timestamp_features(train)
test = fn.extract_timestamp_features(test)
print("Joining together essays")
train['project_essay'] = fn.join_essays(train)
test['project_essay'] = fn.join_essays(test)
train = train.drop([
'project_essay_1', 'project_essay_2',
'project_essay_3', 'project_essay_4'
], axis=1)
test = test.drop([
'project_essay_1', 'project_essay_2',
'project_essay_3', 'project_essay_4'
], axis=1)
sample_sub = pd.read_csv(os.path.join("data/sample_submission.csv"))
res = pd.read_csv(os.path.join(data_dir, "data/resources.csv"))
id_test = test['id'].values
# Rolling up resources to one row per application
print("Rolling up resource requirements to one line and creating aggregate feats")
res = (res
.groupby('id').apply(fn.price_quantity_agg)
.reset_index())
res['mean_price'] = res['price_sum']/res['quantity_sum']
print("Train has %s rows and %s cols" % (train.shape[0], train.shape[1]))
print("Test has %s rows and %s cols" % (test.shape[0], test.shape[1]))
print("Res has %s rows and %s cols" % (res.shape[0], res.shape[1]))
print("Train has %s more rows than test" % (train.shape[0] / test.shape[0]))
train = pd.merge(left=train, right=res, on="id", how="left")
test = pd.merge(left=test, right=res, on="id", how="left")
print("Train after merge has %s rows and %s cols" % (train.shape[0], train.shape[1]))
print("Test after merge has %s rows and %s cols" % (test.shape[0], test.shape[1]))
print("Concatenating datasets so I can build the label encoders")
df_all =
|
pd.concat([train, test], axis=0)
|
pandas.concat
|
import logging
import os
import warnings
from pathlib import Path
from typing import Dict, Iterable, Union
import nibabel as nib
import numpy as np
import pandas as pd
import tqdm
from nilearn.image import resample_to_img
from nipype.interfaces.ants import ApplyTransforms
from nipype.interfaces.freesurfer import (
CALabel,
MRIsCALabel,
ParcellationStats,
SegStats,
)
from connecticity.parcellation import messages
warnings.filterwarnings("ignore")
#: Default parcellation logging configuration.
LOGGER_CONFIG = dict(
filemode="w",
format="%(asctime)s - %(message)s",
level=logging.INFO,
)
#: Command template to be used to run dwi2tensor.
DWI2TENSOR_COMMAND_TEMPLATE: str = "dwi2tensor -grad {grad} {dwi} {out_file}"
#: Custom mapping of dwi2tensor keyword arguments.
TENSOR2METRIC_KWARGS_MAPPING: Dict[str, str] = {"eval": "value"}
#: QSIPrep DWI file template.
QSIPREP_DWI_TEMPLATE = "{qsiprep_dir}/sub-{participant_label}/ses-{session}/dwi/sub-{participant_label}_ses-{session}_space-T1w_desc-preproc_dwi.{extension}" # noqa
#: Tensor metric file template.
TENSOR_METRICS_FILES_TEMPLATE = "{dmriprep_dir}/sub-{participant_label}/ses-{session}/dwi/sub-{participant_label}_ses-{session}_dir-FWD_space-anat_desc-{metric}_epiref.nii.gz" # noqa
#: Parcellated tensor metrics file template.
TENSOR_METRICS_OUTPUT_TEMPLATE = "{dmriprep_dir}/sub-{participant_label}/ses-{session}/dwi/sub-{participant_label}_ses-{session}_space-anat_desc-TensorMetrics_atlas-{parcellation_scheme}_meas-{measure}.csv" # noqa: E501
#: Command template to be used to run aparcstats2table.
APARCTSTATS2TABLE_TEMPLATE = "aparcstats2table --subjects {subjects} --parc={parcellation_scheme} --hemi={hemi} --measure={measure} --tablefile={out_file}" # noqa: E501
#: Hemisphere labels in file name templates.
HEMISPHERE_LABELS: Iterable[str] = ["lh", "rh"]
#: Surface labels in file name templates.
SURFACES: Iterable[str] = ["smoothwm", "curv", "sulc"]
#: Data types in file name templates.
DATA_TYPES: Iterable[str] = ["pial", "white"]
#: Registered file name template.
REG_FILE_NAME_TEMPLATE: str = "{hemisphere_label}.sphere.reg"
#: FreeSurfer's surfaces directory name.
SURFACES_DIR_NAME: str = "surf"
#: FreeSurfer's MRI directory name.
MRI_DIR_NAME: str = "mri"
#: FreeSurfer's labels directory name.
LABELS_DIR_NAME: str = "label"
#: FreeSurfer's stats directory name.
STATS_DIR_NAME: str = "stats"
#: mris_anatomical_stats parameter keys.
STATS_KEYS: Iterable[Union[str, bool]] = [
"brainmask",
"aseg",
"ribbon",
"wm",
"transform",
"tabular_output",
]
#: mris_anatomical_stats parameter values.
STATS_VALUES: Iterable[str] = [
"brainmask.mgz",
"aseg.presurf.mgz",
"ribbon.mgz",
"wm.mgz",
"transforms/talairach.xfm",
True,
]
STATS_MEASURES: Iterable[str] = [
"area",
"volume",
"thickness",
"thicknessstd",
"meancurv",
]
STATS_NAME_TEMPLATE: str = (
"{hemisphere_label}_{parcellation_scheme}_{measure}.csv"
)
SUBCORTICAL_STATS_NAME_TEMPLATE: str = "subcortex.{parcellation_scheme}.stats"
def generate_annotation_file(
subject_dir: Path,
hemisphere_label: str,
parcellation_scheme: str,
gcs_template: str,
):
surfaces_dir = subject_dir / SURFACES_DIR_NAME
labels_dir = subject_dir / LABELS_DIR_NAME
# Check for existing file in expected output path.
out_file_name = f"{hemisphere_label}.{parcellation_scheme}.annot"
out_file = labels_dir / out_file_name
if out_file.exists():
return out_file
# If no existing output is found, create mris_ca_label input
# configuration.
reg_file_name = REG_FILE_NAME_TEMPLATE.format(
hemisphere_label=hemisphere_label
)
reg_file = surfaces_dir / reg_file_name
curv, smoothwm, sulc = [
surfaces_dir / f"{hemisphere_label}.{surface_label}"
for surface_label in SURFACES
]
hemi_gcs = gcs_template.format(hemi=hemisphere_label)
# Log annotation file generation start.
message = messages.ANNOTATION_FILE_GENERATION_START.format(
parcellation_scheme=parcellation_scheme,
subject_label=subject_dir.name,
hemisphere_label=hemisphere_label,
)
logging.info(message)
# Create interface instance, run, and return the result.
ca_label = MRIsCALabel(
canonsurf=reg_file,
subjects_dir=subject_dir.parent,
curv=curv,
smoothwm=smoothwm,
sulc=sulc,
subject_id=subject_dir.parent.name,
hemisphere=hemisphere_label,
out_file=out_file,
classifier=hemi_gcs,
seed=42,
)
ca_label.run()
return out_file
def generate_annotations(
freesurfer_dir: Path,
subject_label: str,
parcellation_scheme: str,
gcs_template: str,
):
"""
For a single subject, produces an annotation file, in which each cortical
surface vertex is assigned a neuroanatomical label.
Parameters
----------
freesurfer_dir : Path
Path to Freesurfer's outputs directory
subject_label : str
A string representing an existing subject in *freesurfer_dir*
parcellation_scheme : str
The name of the parcellation scheme
gcs_template : str
A freesurfer's .gcs template file
Returns
-------
dict
A dictionary with keys of hemispheres and values as corresponding
*.annot* files
"""
subject_dir = freesurfer_dir / subject_label
return {
hemisphere_label: generate_annotation_file(
subject_dir, hemisphere_label, parcellation_scheme, gcs_template
)
for hemisphere_label in HEMISPHERE_LABELS
}
def generate_default_args(freesurfer_dir: Path, subject_label: str) -> dict:
"""
Gather default required arguments for nipype's implementation of
FreeSurfer's *mris_anatomical_stats*.
Parameters
----------
freesurfer_dir : Path
Path to Freesurfer's outputs directory
subject_label : str
A string representing an existing subject in *freesurfer_dir*
Returns
-------
dict
A dictionary with keys that map to nipype's required arguements
"""
subject_dir = freesurfer_dir / subject_label
surface_dir = subject_dir / SURFACES_DIR_NAME
mri_dir = subject_dir / MRI_DIR_NAME
args = {"subject_id": subject_label, "subjects_dir": freesurfer_dir}
for hemi in HEMISPHERE_LABELS:
for datatype in DATA_TYPES:
key = f"{hemi}_{datatype}"
file_name = f"{hemi}.{datatype}"
args[key] = surface_dir / file_name
for key, value in zip(STATS_KEYS, STATS_VALUES):
args[key] = mri_dir / value
return args
def map_subcortex(
freesurfer_dir: Path,
subject_label: str,
parcellation_scheme: str,
gcs_subcrotex: str,
):
"""
For a single subject, produces an annotation file, in which each
sub-cortical surface vertex is assigned a neuroanatomical label.
Parameters
----------
freesurfer_dir : Path
Path to Freesurfer's outputs directory
subject : str
A string representing an existing subject in *freesurfer_dir*
parcellation_scheme : str
The name of the parcellation scheme.
gcs_subcortex : str
A freesurfer's .gcs template file.
Returns
-------
dict
A dictionary with keys of hemispheres and values as corresponding
*.annot* files.
"""
# Check for an existing annotations file.
subject_dir = freesurfer_dir / subject_label
mri_dir = subject_dir / MRI_DIR_NAME
out_file = mri_dir / f"{parcellation_scheme}_subcortex.mgz"
if out_file.exists():
return out_file
# Create a subcortical annotations file if none was found.
target = mri_dir / "brain.mgz"
transform = mri_dir / "transforms" / "talairach.m3z"
# Log subcortical annotations file generation start.
message = messages.SUBCORTICAL_ANNOTATION_FILE_GENERATION_START.format(
parcellation_scheme=parcellation_scheme,
subject_label=subject_label,
)
logging.info(message)
# Create interface instance, run, and return result.
ca_label = CALabel(
subjects_dir=freesurfer_dir,
in_file=target,
transform=transform,
out_file=out_file,
template=gcs_subcrotex,
)
ca_label.run()
return out_file
def freesurfer_subcortical_parcellation(
freesurfer_dir: Path,
subject_label: str,
parcellation_scheme: str,
gcs_subcortex: str,
color_table: str,
):
"""
Calculates different Freesurfer-derived metrics according to subcortical
parcellation
Parameters
----------
freesurfer_dir : Path
Path to Freesurfer's outputs directory
subject_label : str
A string representing an existing subject in *freesurfer_dir*
parcellation_scheme : str
The name of the parcellation scheme
gcs_subcortex : str
A freesurfer's .gcs template file
Returns
-------
dict
A dictionary with keys corresponding to hemisphere's metrics acoording
to *parcellation_scheme*
"""
mapped_subcortex = map_subcortex(
freesurfer_dir, subject_label, parcellation_scheme, gcs_subcortex
)
subject_dir = freesurfer_dir / subject_label
stats_dir = subject_dir / STATS_DIR_NAME
file_name = SUBCORTICAL_STATS_NAME_TEMPLATE.format(
parcellation_scheme=parcellation_scheme
)
summary_file = stats_dir / file_name
if not summary_file.exists():
ss = SegStats(
segmentation_file=mapped_subcortex,
subjects_dir=freesurfer_dir,
summary_file=summary_file,
color_table_file=color_table,
exclude_id=0,
)
ss.run()
return summary_file
def freesurfer_anatomical_parcellation(
freesurfer_dir: Path,
subject_label: str,
parcellation_scheme: str,
gcs: str,
):
"""
Calculates different Freesurfer-derived metrics according to .annot files
Parameters
----------
freesurfer_dir : Path
Path to Freesurfer's outputs directory
subject_label : str
A string representing an existing subject in *freesurfer_dir*
parcellation_scheme : str
The name of the parcellation scheme.
gcs : str
A freesurfer's .gcs template file.
Returns
-------
dict
A dictionary with keys corresponding to hemisphere's metrics acoording
to *parcellation_scheme*
"""
annotations = generate_annotations(
freesurfer_dir, subject_label, parcellation_scheme, gcs
)
args = generate_default_args(freesurfer_dir, subject_label)
stats = {}
subject_dir = freesurfer_dir / subject_label
labels_dir = subject_dir / LABELS_DIR_NAME
stats_dir = subject_dir / STATS_DIR_NAME
surfaces_dir = subject_dir / SURFACES_DIR_NAME
for hemisphere_label, annotations_path in annotations.items():
stats[hemisphere_label] = {}
out_color = labels_dir / f"aparc.annot.{parcellation_scheme}.ctab"
out_table = (
stats_dir / f"{hemisphere_label}.{parcellation_scheme}.stats"
)
args["hemisphere"] = hemisphere_label
args["in_annotation"] = annotations_path
args["thickness"] = surfaces_dir / f"{hemisphere_label}.thickness"
if not out_table.exists() or not out_color.exists():
parcstats = ParcellationStats(**args)
parcstats.run()
stats[hemisphere_label]["table"] = out_table
stats[hemisphere_label]["color"] = out_color
return stats
def group_freesurfer_metrics(
subjects: list,
destination: Path,
parcellation_scheme: str,
force=True,
):
"""
Utilizes FreeSurfer's aparcstats2table to group different
FreeSurfer-derived across subjects according to *parcellation_scheme*.
Parameters
----------
subjects : list
A list of subjects located under *SUBJECTS_DIR*
destination : Path
The destination underwhich group-wise files will be stored
parcellation_scheme : str
The parcellation scheme (subjects must have
*stats/{hemi}.{parcellation_scheme}.stats* file for this to work)
"""
destination.mkdir(exist_ok=True, parents=True)
data = {}
for hemisphere_label in HEMISPHERE_LABELS:
data[hemisphere_label] = {}
for measure in STATS_MEASURES:
out_file_name = STATS_NAME_TEMPLATE.format(
hemisphere_label=hemisphere_label,
parcellation_scheme=parcellation_scheme,
measure=measure,
)
out_file = destination / out_file_name
if not out_file.exists() or force:
cmd = APARCTSTATS2TABLE_TEMPLATE.format(
subjects=" ".join(subjects),
parcellation_scheme=parcellation_scheme,
hemi=hemisphere_label,
measure=measure,
out_file=out_file,
)
os.system(cmd)
data[hemisphere_label][measure] = out_file
return data
def parcellate_image(
atlas: Path, image: Path, parcels: pd.DataFrame, np_operation="nanmean"
) -> pd.Series:
"""
Parcellates an image according to *atlas*.
Parameters
----------
atlas : Path
A parcellation atlas in *image* space
image : Path
An image to be parcellated
parcels : pd.DataFrame
A dataframe for *atlas* parcels
Returns
-------
pd.Series
The mean value of *image* in each *atlas* parcel
"""
out = pd.Series(index=parcels.index)
try:
for i in parcels.index:
# TODO: Remove?
# callable = getattr(np, np_operation)
# out.loc[i] = callable(data[mask])
continue
return out
except IndexError:
atlas = resample_to_img(
nib.load(atlas),
nib.load(image),
interpolation="nearest",
)
return parcellate_image(atlas, image, parcels, np_operation)
def parcellate_subject_tensors(
dmriprep_dir: Path,
participant_label: str,
image: Path,
multi_column: pd.MultiIndex,
parcels: pd.DataFrame,
parcellation_scheme: str,
cropped_to_gm: bool = True,
force: bool = False,
np_operation: str = "nanmean",
):
"""
Parcellates available data for *participant_label*, declared by
*multi_column* levels.
Parameters
----------
dmriprep_dir : Path
Path to *dmriprep* outputs' directory
participant_label : str
A label referring to an existing subject
image : Path
Path to subject's native-space parcellation
multi_column : pd.MultiIndex
A multi-column constructed by ROI * metrics
parcels : pd.DataFrame
A dataframe describing the parcellation scheme
parcellation_scheme : str
The name of the parcellation scheme
Returns
-------
pd.DataFrame
A dataframe containing all of *participant_label*'s data, parcellated
by *parcellation_scheme*
"""
sessions = [
s.name.split("-")[-1]
for s in dmriprep_dir.glob(f"sub-{participant_label}/ses-*")
]
multi_index = pd.MultiIndex.from_product([[participant_label], sessions])
subj_data = pd.DataFrame(index=multi_index, columns=multi_column)
for session in sessions:
out_file = Path(
TENSOR_METRICS_OUTPUT_TEMPLATE.format(
dmriprep_dir=dmriprep_dir,
participant_label=participant_label,
session=session,
parcellation_scheme=parcellation_scheme,
measure=np_operation.replace("nan", ""),
)
)
if cropped_to_gm:
out_name = out_file.name.split("_")
out_name.insert(3, "label-GM")
out_file = out_file.parent / "_".join(out_name)
if out_file.exists() and not force:
data = pd.read_csv(out_file, index_col=[0, 1], header=[0, 1])
subj_data.loc[(participant_label, session)] = data.T.loc[
(participant_label, session)
]
else:
for metric in multi_column.levels[-1]:
logging.info(metric)
metric_file = TENSOR_METRICS_FILES_TEMPLATE.format(
dmriprep_dir=dmriprep_dir,
participant_label=participant_label,
session=session,
metric=metric.lower(),
)
subj_data.loc[
(participant_label, session), (slice(None), metric)
] = parcellate_image(
image, metric_file, parcels, np_operation
).values
subj_data.loc[(participant_label, session)].to_csv(out_file)
return subj_data
def dwi2tensor(in_file: Path, grad: Path, out_file: Path):
"""
Estimate diffusion's tensor via *mrtrix3*'s *dwi2tensor*.
Parameters
----------
in_file : Path
DWI series
grad : Path
DWI gradient table in *mrtrix3*'s format.
out_file : Path
Output template
Returns
-------
Path
Refined output in *mrtrix3*'s .mif format.
"""
out_name = out_file.name.split(".")[0]
out_file = out_file.with_name("." + out_name + ".mif")
if not out_file.exists():
cmd = DWI2TENSOR_COMMAND_TEMPLATE.format(
grad=grad, dwi=in_file, out_file=out_file
)
os.system(cmd)
return out_file
def tensor2metric(
tensor: Path,
derivatives: Path,
participant_label: str,
session: str,
metrics: list,
):
"""[summary]
Parameters
----------
tensor : Path
[description]
derivatives : Path
[description]
participant_label : str
[description]
session : str
[description]
metrics : list
[description]
"""
cmd = "tensor2metric"
flag = []
for metric in metrics:
metric_file = TENSOR_METRICS_FILES_TEMPLATE.format(
dmriprep_dir=derivatives,
participant_label=participant_label,
session=session,
metric=metric.lower(),
)
if Path(metric_file).exists():
flag.append(False)
else:
flag.append(True)
metric = metric.lower()
cmd += f" -{TENSOR2METRIC_KWARGS_MAPPING.get(metric, metric)} {metric_file}" # noqa: E501
cmd += f" {tensor}"
if any(flag):
os.system(cmd)
def estimate_tensors(
parcellations: dict,
derivatives_dir: Path,
multi_column: pd.MultiIndex,
):
"""
Parameters
----------
parcellation_scheme : str
A string representing a parcellation atlas
parcellations : dict
A dictionary with subjects as keys and their corresponding
*parcellation_scheme* in native space
derivatives_dir : Path
Path to derivatives, usually *qsiprep*'s
Returns
-------
[type]
[description]
"""
metrics = multi_column.levels[-1]
for participant_label, image in tqdm.tqdm(parcellations.items()):
logging.info(
f"Estimating tensor-derived metrics in subject {participant_label} anatomical space." # noqa: E501
)
for ses in derivatives_dir.glob(f"sub-{participant_label}/ses-*"):
ses_id = ses.name.split("-")[-1]
dwi, grad = [
QSIPREP_DWI_TEMPLATE.format(
qsiprep_dir=derivatives_dir,
participant_label=participant_label,
session=ses_id,
extension=extension,
)
for extension in ["nii.gz", "b"]
]
tensor = TENSOR_METRICS_FILES_TEMPLATE.format(
dmriprep_dir=derivatives_dir,
participant_label=participant_label,
session=ses_id,
metric="tensor",
)
tensor = dwi2tensor(dwi, grad, Path(tensor))
tensor2metric(
tensor, derivatives_dir, participant_label, ses_id, metrics
)
def parcellate_tensors(
dmriprep_dir: Path,
multi_column: pd.MultiIndex,
parcellations: dict,
parcels: pd.DataFrame,
parcellation_scheme: str,
cropped_to_gm: bool = True,
force: bool = False,
np_operation: str = "nanmean",
) -> pd.DataFrame:
"""
Parcellate *dmriprep* derived tensor's metrics according to ROI stated by
*df*.
Parameters
----------
dmriprep_dir : Path
Path to *dmriprep* outputs
multi_column : pd.MultiIndex
A multi-level column with ROI/tensor metrics combinations
parcellations : dict
A dictionary with representing subjects, and values containing paths
to subjects-space parcellations
Returns
-------
pd.DataFrame
An updated *df*
"""
data =
|
pd.DataFrame()
|
pandas.DataFrame
|
'''
The main driving code
1. CML/FL Training
2. Compute/Approximate Cosine Gradient Shapley
3. Calculate and realize the fair gradient reward
'''
import os, sys, json
from os.path import join as oj
import copy
from copy import deepcopy as dcopy
import time, datetime, random, pickle
from collections import defaultdict
from itertools import product
import numpy as np
import pandas as pd
import torch
from torch import nn, optim
from torch.linalg import norm
from torchtext.data import Batch
import torch.nn.functional as F
from utils.Data_Prepper import Data_Prepper
from utils.arguments import mnist_args, cifar_cnn_args, mr_args, sst_args
from utils.utils import cwd, train_model, evaluate, cosine_similarity, mask_grad_update_by_order, \
compute_grad_update, add_update_to_model, add_gradient_updates,\
flatten, unflatten, compute_distance_percentage
import argparse
parser = argparse.ArgumentParser(description='Process which dataset to run')
parser.add_argument('-D', '--dataset', help='Pick the dataset to run.', type=str, required=True)
parser.add_argument('-N', '--n_agents', help='The number of agents.', type=int, default=5)
parser.add_argument('-nocuda', dest='cuda', help='Not to use cuda even if available.', action='store_false')
parser.add_argument('-cuda', dest='cuda', help='Use cuda if available.', action='store_true')
parser.add_argument('-split', '--split', dest='split', help='The type of data splits.', type=str, default='all', choices=['all', 'uni', 'cla', 'pow'])
cmd_args = parser.parse_args()
print(cmd_args)
N = cmd_args.n_agents
if torch.cuda.is_available() and cmd_args.cuda:
device = torch.device('cuda')
else:
device = torch.device('cpu')
if cmd_args.dataset == 'mnist':
args = copy.deepcopy(mnist_args)
if N > 0:
agent_iterations = [[N, N*600]]
else:
agent_iterations = [[5,3000], [10, 6000], [20, 12000]]
if cmd_args.split == 'uni':
splits = ['uniform']
elif cmd_args.split == 'pow':
splits = ['powerlaw']
elif cmd_args.split == 'cla':
splits = ['classimbalance']
elif cmd_args.split == 'all':
splits = ['uniform', 'powerlaw', 'classimbalance',]
args['iterations'] = 200
args['E'] = 3
args['lr'] = 1e-3
args['num_classes'] = 10
args['lr_decay'] = 0.955
elif cmd_args.dataset == 'cifar10':
args = copy.deepcopy(cifar_cnn_args)
if N > 0:
agent_iterations = [[N, N*2000]]
else:
agent_iterations = [[10, 20000]]
if cmd_args.split == 'uni':
splits = ['uniform']
elif cmd_args.split == 'pow':
splits = ['powerlaw']
elif cmd_args.split == 'cla':
splits = ['classimbalance']
elif cmd_args.split == 'all':
splits = ['uniform', 'powerlaw', 'classimbalance']
args['iterations'] = 200
args['E'] = 3
args['num_classes'] = 10
elif cmd_args.dataset == 'sst':
args = copy.deepcopy(sst_args)
agent_iterations = [[5, 8000]]
splits = ['powerlaw']
args['iterations'] = 200
args['E'] = 3
args['num_classes'] = 5
elif cmd_args.dataset == 'mr':
args = copy.deepcopy(mr_args)
agent_iterations = [[5, 8000]]
splits = ['powerlaw']
args['iterations'] = 200
args['E'] = 3
args['num_classes'] = 2
E = args['E']
ts = time.time()
time_str = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H:%M')
for N, sample_size_cap in agent_iterations:
args.update(vars(cmd_args))
args['n_agents'] = N
args['sample_size_cap'] = sample_size_cap
# args['momentum'] = 1.5 / N
for beta in [0.5, 1, 1.2, 1.5, 2, 1e7]:
args['beta'] = beta
for split in splits:
args['split'] = split
optimizer_fn = args['optimizer_fn']
loss_fn = args['loss_fn']
print(args)
print("Data Split information for the agents:")
data_prepper = Data_Prepper(
args['dataset'], train_batch_size=args['batch_size'], n_agents=N, sample_size_cap=args['sample_size_cap'],
train_val_split_ratio=args['train_val_split_ratio'], device=device, args_dict=args)
# valid_loader = data_prepper.get_valid_loader()
test_loader = data_prepper.get_test_loader()
train_loaders = data_prepper.get_train_loaders(N, args['split'])
shard_sizes = data_prepper.shard_sizes
# shard sizes refer to the sizes of the local data of each agent
shard_sizes = torch.tensor(shard_sizes).float()
relative_shard_sizes = torch.div(shard_sizes, torch.sum(shard_sizes))
print("Shard sizes are: ", shard_sizes.tolist())
if args['dataset'] in ['mr', 'sst']:
server_model = args['model_fn'](args=data_prepper.args).to(device)
else:
server_model = args['model_fn']().to(device)
D = sum([p.numel() for p in server_model.parameters()])
init_backup = dcopy(server_model)
# ---- init the agents ----
agent_models, agent_optimizers, agent_schedulers = [], [], []
for i in range(N):
model = copy.deepcopy(server_model)
# try:
# optimizer = optimizer_fn(model.parameters(), lr=args['lr'], momentum=args['momentum'])
# except:
optimizer = optimizer_fn(model.parameters(), lr=args['lr'])
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100, 200, 300], gamma=0.1)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma = args['lr_decay'])
agent_models.append(model)
agent_optimizers.append(optimizer)
agent_schedulers.append(scheduler)
# ---- book-keeping variables
rs_dict, qs_dict = [], []
rs = torch.zeros(N, device=device)
past_phis = []
# for performance analysis
valid_perfs, local_perfs, fed_perfs = defaultdict(list), defaultdict(list), defaultdict(list)
# for gradient/model parameter analysis
dist_all_layer, dist_last_layer = defaultdict(list), defaultdict(list)
reward_all_layer, reward_last_layer= defaultdict(list), defaultdict(list)
# ---- CML/FL begins ----
for iteration in range(args['iterations']):
gradients = []
for i in range(N):
loader = train_loaders[i]
model = agent_models[i]
optimizer = agent_optimizers[i]
scheduler = agent_schedulers[i]
model.train()
model = model.to(device)
backup = copy.deepcopy(model)
model = train_model(model, loader, loss_fn, optimizer, device=device, E=E, scheduler=scheduler)
gradient = compute_grad_update(old_model=backup, new_model=model, device=device)
# SUPPOSE DO NOT TOP UP WITH OWN GRADIENTS
model.load_state_dict(backup.state_dict())
# add_update_to_model(model, gradient, device=device)
# append the normalzied gradient
flattened = flatten(gradient)
norm_value = norm(flattened) + 1e-7 # to prevent division by zero
gradient = unflatten(torch.multiply(torch.tensor(args['Gamma']), torch.div(flattened, norm_value)), gradient)
gradients.append(gradient)
# ---- Server Aggregate ----
aggregated_gradient = [torch.zeros(param.shape).to(device) for param in server_model.parameters()]
# aggregate and update server model
if iteration == 0:
# first iteration use FedAvg
weights = torch.div(shard_sizes, torch.sum(shard_sizes))
else:
weights = rs
for gradient, weight in zip(gradients, weights):
add_gradient_updates(aggregated_gradient, gradient, weight=weight)
add_update_to_model(server_model, aggregated_gradient)
# update reputation and calculate reward gradients
flat_aggre_grad = flatten(aggregated_gradient)
# phis = torch.zeros(N, device=device)
phis = torch.tensor([F.cosine_similarity(flatten(gradient), flat_aggre_grad, 0, 1e-10) for gradient in gradients], device=device)
past_phis.append(phis)
rs = args['alpha'] * rs + (1 - args['alpha']) * phis
rs = torch.clamp(rs, min=1e-3) # make sure the rs do not go negative
rs = torch.div(rs, rs.sum()) # normalize the weights to 1
# --- altruistic degree function
q_ratios = torch.tanh(args['beta'] * rs)
q_ratios = torch.div(q_ratios, torch.max(q_ratios))
qs_dict.append(q_ratios)
rs_dict.append(rs)
for i in range(N):
reward_gradient = mask_grad_update_by_order(aggregated_gradient, mask_percentile=q_ratios[i], mode='layer')
add_update_to_model(agent_models[i], reward_gradient)
''' Analysis of rewarded gradients in terms cosine to the aggregated gradient '''
reward_all_layer[str(i)+'cos'].append(F.cosine_similarity(flatten(reward_gradient), flat_aggre_grad, 0, 1e-10).item() )
reward_all_layer[str(i)+'l2'].append(norm(flatten(reward_gradient) - flat_aggre_grad).item())
reward_last_layer[str(i)+'cos'].append(F.cosine_similarity(flatten(reward_gradient[-2]), flatten(aggregated_gradient[-2]), 0, 1e-10).item() )
reward_last_layer[str(i)+'l2'].append(norm(flatten(reward_gradient[-2])- flatten(aggregated_gradient[-2])).item())
weights = torch.div(shard_sizes, torch.sum(shard_sizes)) if iteration == 0 else rs
for i, model in enumerate(agent_models + [server_model]):
loss, accuracy = evaluate(model, test_loader, loss_fn=loss_fn, device=device)
valid_perfs[str(i)+'_loss'].append(loss.item())
valid_perfs[str(i)+'_accu'].append(accuracy.item())
fed_loss, fed_accu = 0, 0
for j, train_loader in enumerate(train_loaders):
loss, accuracy = evaluate(model, train_loader, loss_fn=loss_fn, device=device)
fed_loss += weights[j] * loss.item()
fed_accu += weights[j] * accuracy.item()
if j == i:
local_perfs[str(i)+'_loss'].append(loss.item())
local_perfs[str(i)+'_accu'].append(accuracy.item())
fed_perfs[str(i)+'_loss'].append(fed_loss.item())
fed_perfs[str(i)+'_accu'].append(fed_accu.item())
# ---- Record model distance to the server model ----
for i, model in enumerate(agent_models + [init_backup]) :
percents, dists = compute_distance_percentage(model, server_model)
dist_all_layer[str(i)+'dist'].append(np.mean(dists))
dist_last_layer[str(i)+'dist'].append(dists[-1])
dist_all_layer[str(i)+'perc'].append(np.mean(percents))
dist_last_layer[str(i)+'perc'].append(percents[-1])
# Saving results, into csvs
agent_str = '{}-{}'.format(args['split'][:3].upper(), 'A'+str(N), )
folder = oj('RESULTS', args['dataset'], time_str, agent_str,
'beta-{}'.format(str(args['beta'])[:4]) )
os.makedirs(folder, exist_ok=True)
with cwd(folder):
# distance to the full gradient: all layers and only last layer of the model parameters
pd.DataFrame(reward_all_layer).to_csv(('all_layer.csv'), index=False)
pd.DataFrame(reward_last_layer).to_csv(('last_layer.csv'), index=False)
# distance to server model parameters: all layers and only last layer of the model parameters
pd.DataFrame(dist_all_layer).to_csv(('dist_all_layer.csv'), index=False)
pd.DataFrame(dist_last_layer).to_csv(('dist_last_layer.csv'), index=False)
# importance coefficients rs
rs_dict = torch.stack(rs_dict).detach().cpu().numpy()
df = pd.DataFrame(rs_dict)
df.to_csv(('rs.csv'), index=False)
# q values
qs_dict = torch.stack(qs_dict).detach().cpu().numpy()
df = pd.DataFrame(qs_dict)
df.to_csv(('qs.csv'), index=False)
# federated performance (local objectives weighted w.r.t the importance coefficient rs)
df =
|
pd.DataFrame(fed_perfs)
|
pandas.DataFrame
|
import pandas as pd
import pathlib
from utils import load_from_file, morse_potential, fit_morse_potential
import matplotlib.pyplot as plt
import numpy as np
paths = pathlib.Path("/home/mscherbela/runs/forces/atoms/").glob("*/results.bz2")
data = []
for p in paths:
data_content = load_from_file(p)
config = data_content['config']
metrics = data_content['metrics']
forces_mean = 1e3*metrics['forces_mean']
forces_std = 1e3*np.std(data_content['metrics']['forces'], axis=0)
data_dict = dict(dirname=p.parent.name,
molecule=config['physical.name'],
Fx=forces_mean[0][0],
Fy=forces_mean[0][1],
Fz=forces_mean[0][2],
FxStd=forces_std[0][0],
FyStd=forces_std[0][1],
FzStd=forces_std[0][2],
energy=data_content['metrics']['E_mean'])
for k in config:
if k.startswith('evaluation.forces'):
data_dict[k.replace('evaluation.forces.', '')] = config[k]
data.append(data_dict)
df_full =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from datasets.dataset import Dataset
from backport.utils import RepresentationTranslator
from competitor.actions.feature import CategoricFeature, Feature
class DatasetWrapper(Dataset):
def __init__(self, name, legacy_dataset, feature_objects):
self.legacy_dataset = legacy_dataset
self.feature_objects = feature_objects.copy()
self.translator = RepresentationTranslator(self.feature_objects.copy())
super().__init__(name=name, description="Competitor dataset wrapper.")
def extract_info(self):
columns = np.array(list(self.feature_objects.keys()))
order = np.argsort([f.idx for _, f in self.feature_objects.items()])
columns = columns[order]
real_features = []
cat_features = []
for i, feature_key in enumerate(columns):
feature = self.feature_objects[feature_key]
if isinstance(feature, Feature):
real_features.append(i)
elif isinstance(feature, CategoricFeature):
cat_features.append(i)
return columns, self.legacy_dataset.labels, real_features, cat_features
def load(self) -> pd.DataFrame:
(
self.zs,
self.xs,
self.train_xs,
self.test_xs,
self.train_zs,
self.test_zs,
) = self.legacy_dataset.get_relevant_data()
self.label_encoder = LabelEncoder()
labels = self.label_encoder.fit_transform(self.train_xs[:, -1])
# ! train_zs [:-2], -2 because the last two columns are actualy the labels..
return self.train_zs[:, :-2], labels
def transform_to_original(self, X: np.array) -> np.array:
translated = np.array([self.translator.instance_to_x(x) for x in X])
return translated
# raise Exception("Function not defined")
"""Like one-hot encoding etc."""
def encode_features(self, X: np.array) -> np.array:
translated = np.array([self.translator.instance_to_z(x) for x in X])
return translated
# raise Exception("Function not defined")
def decode_features(self, X: np.array) -> np.array:
return X
# raise Exception("Function not defined")
"""Filter out certain variables before etc.
Also, transform cat to numerical"""
def preprocess(self, X: pd.DataFrame) -> pd.DataFrame:
return X
# raise Exception("Function not defined")
def get_numpy_representation(self) -> np.array:
return self.train_xs[:, :-1]
def numpy_to_df(self, X: np.array) -> pd.DataFrame:
return
|
pd.DataFrame(X)
|
pandas.DataFrame
|
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from img2vec_pytorch import Img2Vec
import pandas as pd
from PIL import Image
from tqdm import tqdm
import numpy as np
import os
def most_similar(train_path, test_path, images_path, results_path, cuda=False):
"""
Nearest Neighbor Baseline: Img2Vec library (https://github.com/christiansafka/img2vec/) is used to obtain
image embeddings, extracted from ResNet-18. For each test image the cosine similarity with all the training images
is computed in order to retrieve similar training images.
The caption of the most similar retrieved image is returned as the generated caption of the test image.
:param train_path: The path to the train data tsv file with the form: "image \t caption"
:param test_path: The path to the test data tsv file with the form: "image \t caption"
:param images_path: The path to the images folder
:param results_path: The folder in which to save the results file
:param cuda: Boolean value of whether to use cuda for image embeddings extraction. Default: False
If a GPU is available pass True
:return: Dictionary with the results
"""
img2vec = Img2Vec(cuda=cuda)
# Load train data
train_data = pd.read_csv(train_path, sep="\t", header=None)
train_data.columns = ["id", "caption"]
train_images = dict(zip(train_data.id, train_data.caption))
# Get embeddings of train images
print("Calculating visual embeddings from train images")
train_images_vec = {}
print("Extracting embeddings for all train images...")
for train_image in tqdm(train_data.id):
image = Image.open(os.path.join(images_path, train_image))
image = image.convert('RGB')
vec = img2vec.get_vec(image)
train_images_vec[train_image] = vec
print("Got embeddings for train images.")
# Load test data
test_data = pd.read_csv(test_path, sep="\t", header=None)
test_data.columns = ["id", "caption"]
# Save IDs and raw image vectors separately but aligned
ids = [i for i in train_images_vec]
raw = np.array([train_images_vec[i] for i in train_images_vec])
# Normalize image vectors to avoid normalized cosine and use dot
raw = raw / np.array([np.sum(raw,1)] * raw.shape[1]).transpose()
sim_test_results = {}
for test_image in tqdm(test_data.id):
# Get test image embedding
image = Image.open(os.path.join(images_path, test_image))
image = image.convert('RGB')
vec = img2vec.get_vec(image)
# Compute cosine similarity with every train image
vec = vec / np.sum(vec)
# Clone to do efficient mat mul dot
test_mat = np.array([vec] * raw.shape[0])
sims = np.sum(test_mat * raw, 1)
top1 = np.argmax(sims)
# Assign the caption of the most similar train image
sim_test_results[test_image] = train_images[ids[top1]]
# Save test results to tsv file
df =
|
pd.DataFrame.from_dict(sim_test_results, orient="index")
|
pandas.DataFrame.from_dict
|
from pathlib import Path
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from message_ix import Scenario, macro
from message_ix.models import MACRO
from message_ix.testing import SCENARIO, make_westeros
W_DATA_PATH = Path(__file__).parent / "data" / "westeros_macro_input.xlsx"
MR_DATA_PATH = Path(__file__).parent / "data" / "multiregion_macro_input.xlsx"
class MockScenario:
def __init__(self):
self.data = pd.read_excel(MR_DATA_PATH, sheet_name=None, engine="openpyxl")
for name, df in self.data.items():
if "year" in df:
df = df[df.year >= 2030]
self.data[name] = df
def has_solution(self):
return True
def var(self, name, **kwargs):
df = self.data["aeei"]
# Add extra commodity to be removed
extra_commod = df[df.sector == "i_therm"].copy()
extra_commod["sector"] = "bar"
# Add extra region to be removed
extra_region = df[df.node == "R11_AFR"].copy()
extra_region["node"] = "foo"
df = pd.concat([df, extra_commod, extra_region])
if name == "DEMAND":
df = df.rename(columns={"sector": "commodity"})
elif name in ["COST_NODAL_NET", "PRICE_COMMODITY"]:
df = df.rename(columns={"sector": "commodity", "value": "lvl"})
df["lvl"] = 1e3
return df
@pytest.fixture(scope="class")
def westeros_solved(test_mp):
yield make_westeros(test_mp, solve=True, quiet=True)
@pytest.fixture(scope="class")
def westeros_not_solved(westeros_solved):
yield westeros_solved.clone(keep_solution=False)
def test_calc_valid_data_file(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.read_data()
def test_calc_invalid_data(westeros_solved):
with pytest.raises(TypeError, match="neither a dict nor a valid path"):
macro.Calculate(westeros_solved, list())
with pytest.raises(ValueError, match="not an Excel data file"):
macro.Calculate(westeros_solved, Path(__file__).joinpath("other.zip"))
def test_calc_valid_data_dict(westeros_solved):
s = westeros_solved
data = pd.read_excel(W_DATA_PATH, sheet_name=None, engine="openpyxl")
c = macro.Calculate(s, data)
c.read_data()
# Test for selecting desirable years specified in config from the Excel input
def test_calc_valid_years(westeros_solved):
s = westeros_solved
data = pd.read_excel(W_DATA_PATH, sheet_name=None, engine="openpyxl")
# Adding an arbitrary year
arbitrary_yr = 2021
gdp_extra_yr = data["gdp_calibrate"].iloc[0, :].copy()
gdp_extra_yr["year"] = arbitrary_yr
data["gdp_calibrate"] = data["gdp_calibrate"].append(gdp_extra_yr)
# Check the arbitrary year is not in config
assert arbitrary_yr not in data["config"]["year"]
# But it is in gdp_calibrate
assert arbitrary_yr in set(data["gdp_calibrate"]["year"])
# And macro does calibration without error
c = macro.Calculate(s, data)
c.read_data()
def test_calc_no_solution(westeros_not_solved):
s = westeros_not_solved
pytest.raises(RuntimeError, macro.Calculate, s, W_DATA_PATH)
def test_config(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
assert "config" in c.data
assert "sector" in c.data["config"]
# Removing a column from config and testing
data = c.data.copy()
data["config"] = c.data["config"][["node", "sector"]]
try:
macro.Calculate(s, data)
except KeyError as error:
assert 'Missing config data for "level"' in str(error)
# Removing config completely and testing
data.pop("config")
try:
macro.Calculate(s, data)
except KeyError as error:
assert "Missing config in input data" in str(error)
c.read_data()
assert c.nodes == set(["Westeros"])
assert c.sectors == set(["light"])
def test_calc_data_missing_par(westeros_solved):
s = westeros_solved
data = pd.read_excel(W_DATA_PATH, sheet_name=None, engine="openpyxl")
data.pop("gdp_calibrate")
c = macro.Calculate(s, data)
pytest.raises(ValueError, c.read_data)
def test_calc_data_missing_column(westeros_solved):
s = westeros_solved
data = pd.read_excel(W_DATA_PATH, sheet_name=None, engine="openpyxl")
# skip first data point
data["gdp_calibrate"] = data["gdp_calibrate"].drop("year", axis=1)
c = macro.Calculate(s, data)
pytest.raises(ValueError, c.read_data)
def test_calc_data_missing_datapoint(westeros_solved):
s = westeros_solved
data = pd.read_excel(W_DATA_PATH, sheet_name=None, engine="openpyxl")
# skip first data point
data["gdp_calibrate"] = data["gdp_calibrate"][1:]
c = macro.Calculate(s, data)
pytest.raises(ValueError, c.read_data)
#
# Regression tests: these tests were compiled upon moving from R to Python,
# values were confirmed correct at the time and thus are tested explicitly here
#
@pytest.mark.parametrize(
"method, test, expected",
(
("_growth", "allclose", [0.02658363, 0.04137974, 0.04137974, 0.02918601]),
("_rho", "equal", [-4.0]),
("_gdp0", "equal", [500.0]),
("_k0", "equal", [1500.0]),
(
"_total_cost",
"allclose",
1e-3
* np.array(
[9.17583, 11.653576414880422, 12.446804289049216, 15.369457033651215]
),
),
("_price", "allclose", [211, 511.02829331, 162.03953933, 161.0026274]),
("_demand", "allclose", [27, 55, 82, 104]),
("_bconst", "allclose", [9.68838201e-08]),
("_aconst", "allclose", [26.027323]),
),
)
def test_calc(westeros_solved, method, test, expected):
calc = macro.Calculate(westeros_solved, W_DATA_PATH)
calc.read_data()
function = getattr(calc, method)
assertion = getattr(npt, f"assert_{test}")
assertion(function().values, expected)
# Testing how macro handles zero values in PRICE_COMMODITY
def test_calc_price_zero(westeros_solved):
s = westeros_solved
clone = s.clone(scenario="low_demand", keep_solution=False)
clone.check_out()
# Lowering demand in the first year
clone.add_par("demand", ["Westeros", "light", "useful", 700, "year"], 10, "GWa")
# Making investment and var cost zero for delivering light
# TODO: these units are based on testing.make_westeros: needs improvement
clone.add_par("inv_cost", ["Westeros", "bulb", 700], 0, "USD/GWa")
for y in [690, 700]:
clone.add_par(
"var_cost", ["Westeros", "grid", y, 700, "standard", "year"], 0, "USD/GWa"
)
clone.commit("demand reduced and zero cost for bulb")
clone.solve()
price = clone.var("PRICE_COMMODITY")
# Assert if there is no zero price (to make sure MACRO receives 0 price)
assert np.isclose(0, price["lvl"]).any()
c = macro.Calculate(clone, W_DATA_PATH)
c.read_data()
try:
c._price()
except RuntimeError as err:
# To make sure the right error message is raised in macro.py
assert "0-price found in MESSAGE variable PRICE_" in str(err)
else:
raise Exception("No error in macro.read_data() for zero price(s)")
def test_init(message_test_mp):
scen = Scenario(message_test_mp, **SCENARIO["dantzig"])
scen = scen.clone("foo", "bar")
scen.check_out()
MACRO.initialize(scen)
scen.commit("foo")
scen.solve(quiet=True)
assert np.isclose(scen.var("OBJ")["lvl"], 153.675)
assert "mapping_macro_sector" in scen.set_list()
assert "aeei" in scen.par_list()
assert "DEMAND" in scen.var_list()
assert "COST_ACCOUNTING_NODAL" in scen.equ_list()
def test_add_model_data(westeros_solved):
base = westeros_solved
clone = base.clone("foo", "bar", keep_solution=False)
clone.check_out()
MACRO.initialize(clone)
macro.add_model_data(base, clone, W_DATA_PATH)
clone.commit("finished adding macro")
clone.solve(quiet=True)
obs = clone.var("OBJ")["lvl"]
exp = base.var("OBJ")["lvl"]
assert np.isclose(obs, exp)
def test_calibrate(westeros_solved):
base = westeros_solved
clone = base.clone(base.model, "test macro calibration", keep_solution=False)
clone.check_out()
MACRO.initialize(clone)
macro.add_model_data(base, clone, W_DATA_PATH)
clone.commit("finished adding macro")
start_aeei = clone.par("aeei")["value"]
start_grow = clone.par("grow")["value"]
macro.calibrate(clone, check_convergence=True)
end_aeei = clone.par("aeei")["value"]
end_grow = clone.par("grow")["value"]
# calibration should have changed some/all of these values and none should
# be NaNs
assert not np.allclose(start_aeei, end_aeei, rtol=1e-2)
assert not np.allclose(start_grow, end_grow, rtol=1e-2)
assert not end_aeei.isnull().any()
assert not end_grow.isnull().any()
def test_calibrate_roundtrip(westeros_solved):
# this is a regression test with values observed on Aug 9, 2019
with_macro = westeros_solved.add_macro(W_DATA_PATH, check_convergence=True)
aeei = with_macro.par("aeei")["value"].values
npt.assert_allclose(
aeei,
1e-3 * np.array([20, -7.5142879, 43.6256281, 21.1434631]),
)
grow = with_macro.par("grow")["value"].values
npt.assert_allclose(
grow,
1e-3
* np.array(
[
26.5836313,
69.1417640,
79.1435807,
24.5225556,
]
),
)
#
# These are a series of tests to guarantee multiregion/multisector
# behavior is as expected.
#
def test_multiregion_valid_data():
s = MockScenario()
c = macro.Calculate(s, MR_DATA_PATH)
c.read_data()
def test_multiregion_derive_data():
s = MockScenario()
c = macro.Calculate(s, MR_DATA_PATH)
c.read_data()
c.derive_data()
nodes = ["R11_AFR", "R11_CPA"]
sectors = ["i_therm", "rc_spec"]
# make sure no extraneous data is there
check = c.data["demand"].reset_index()
assert (check["node"].unique() == nodes).all()
assert (check["sector"].unique() == sectors).all()
obs = c.data["aconst"]
exp = pd.Series(
[3.74767687, 0.00285472], name="value", index=
|
pd.Index(nodes, name="node")
|
pandas.Index
|
import mechanize
import pandas as pd
import bs4
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
from math import ceil
from time import sleep
import re
import os
import sys
import unicodedata
def strip_special_latin_char(string):
"""
Method that either transliterates selected latin characters, or maps unexpected characters to empty string, ''.
Example::
strip_special_latin_char('thƝis ƛis a teōst: ÆæœƔþðßøÞĿØijƧaÐŒƒ¿Ǣ')
:param str string: String to be striped of special latin characters
:return: String striped of special latin characters
:rtype: str
"""
latin_char_map = {'Æ': 'AE', 'Ð': 'D', 'Ø': 'O', 'Þ': 'TH', 'ß': 'ss', 'æ': 'ae',
'ð': 'd', 'ø': 'o', 'þ': 'th', 'Œ': 'OE', 'œ': 'oe', 'ƒ': 'f'}
string_ascii = ''
for s in string:
# Check if string character is ascii
if ord(s) < 128:
string_ascii += s
# If not ascii but is a special latin character, transliterate
elif s in latin_char_map.keys():
string_ascii += latin_char_map[s]
# Otherwise, remove the unexpected character
else:
string_ascii += ''
return string_ascii
def strip_accents(string):
"""
Method that transliterates accented characters.
Example::
strip_accents('thîš ìŝ ã tëśt')
:param str string: String to be striped of accented characters
:return: String striped of accented characters
:rtype: str
"""
# Taken from here:
# https://stackoverflow.com/a/518232
return ''.join(c for c in unicodedata.normalize('NFD', string)
if unicodedata.category(c) != 'Mn')
def convert_to_ascii(string):
"""
Method that ensures a given string object is converted into ascii format by removing accented characters
and transliterating special latin characters.
Example::
convert_to_ascii('thƝîš ƛìŝ ã tëśt: ÆæœƔþðßøÞĿØijƧaÐŒƒ¿Ǣ')
:param str string: String to be converted into ascii
:return: String converted into ascii
:rtype: str
"""
string = strip_accents(string)
string = strip_special_latin_char(string)
return string
def row_count_csv(input_file):
"""
Method to return the number of populated rows in a text file.
Example::
row_count_csv('example_file.txt')
:param str input_file: File path
:return: Number of popluated rows in input_file
:rtype: int
"""
# Modified from https://stackoverflow.com/a/44144945/3905509
with open(input_file, errors='ignore') as f:
for i, l in enumerate(f):
pass
return i + 1
def get_last_line_csv(input_file):
"""
Method to return the last populated line in a text file.
Example::
get_last_line_csv('example_file.txt')
:param str input_file: File path
:return: Last populated line of input_file
:rtype: str
"""
if row_count_csv(input_file) == 1:
with open(input_file, 'r') as f:
return f.readline()
# Modified from https://www.quora.com/How-can-I-read-the-last-line-from-a-log-file-in-Python
with open(input_file, 'rb') as f:
f.seek(-2, os.SEEK_END)
while f.read(1) != b'\n':
f.seek(-2, os.SEEK_CUR)
last_line = f.readline().decode('utf-8')
return last_line
def delete_last_line_csv(input_file):
"""
Method to delete the last line in a text file.
Example::
delete_last_line_csv('example_file.txt')
:param str input_file: File path
"""
# If there's 1 line left, clear the file. This is hack-y, but needed since the code below doesn't appear to work
# when there's 1 remaining populated row followed by 1 blank line.
if row_count_csv(input_file) == 1:
open(input_file, 'w').close()
# Modified from https://stackoverflow.com/a/10289740/3905509
with open(input_file, "r+", errors='ignore', encoding="utf-8") as file:
# Move the pointer (similar to a cursor in a text editor) to the end of the file
file.seek(0, os.SEEK_END)
# This code means the following code skips the very last character in the file -
# i.e. in the case the last line is null we delete the last line
# and the penultimate one
pos = file.tell() - 1
# Read each character in the file one at a time from the penultimate
# character going backwards, searching for a newline character
# If we find a new line, exit the search
while pos > 0 and file.read(1) != "\n":
pos -= 1
file.seek(pos, os.SEEK_SET)
# So long as we're not at the start of the file, delete all the characters ahead
# of this position
if pos > 0:
file.seek(pos, os.SEEK_SET)
file.truncate()
def scrape_chicago_marathon_urls(url='http://chicago-history.r.mikatiming.de/2015/', year=2016,
event="MAR_999999107FA30900000000A1", gender='M', num_results_per_page=1000,
unit_test_ind=False):
"""
Method to scrape all URLs of each Chicago Marathon runner returned from a specified web form.
Example::
scrape_chicago_marathon_urls(url='http://chicago-history.r.mikatiming.de/2015/', year=2017,
event="MAR_999999107FA30900000000A1", gender='M',
num_results_per_page=1000, unit_test_ind=True)
:param str url: URL to Chicago Marathon web form
:param int year: Year of marathon (supported values: 2014, 2015, 2016, 2017)
:param str event: Internal label used to specify the type of marathon participants (varies by year)
:param str gender: Gender of runner ('M' for male, 'W' for female)
:param int num_results_per_page: Number of results per page to return from the web form (use default value only)
:param bool unit_test_ind: Logical value to specify if only the first URL should be returned (True) or all (False)
:return: DataFrame containing URLs, City, and State for all runners found in results page
:rtype: pandas.DataFrame
"""
# Setup backend browser via mechanize package
br = mechanize.Browser()
# Ignore robots.txt
# Note: I have not found any notice on the Chicago Marathon website that prohibits web scraping.
br.set_handle_robots(False)
br.open(url)
# Select the overall results, not individual runner results
br.select_form(nr=2)
# Set year
br.form['event_main_group'] = [str(year)]
# Manually add an option for the 'event' drop-down, which is expected to be populated when the the
# 'event_main_group' drop-down is modified.
mechanize.Item(
br.form.find_control(name='event'),
{'contents': event, 'value': event, 'label': event}
)
# Set event
br.form['event'] = [event]
# Set gender
br.form['search[sex]'] = gender
# Set age group
br.form['search[age_class]'] = "%"
# Set number of results per page
br.form['num_results'] = [str(num_results_per_page)]
# Submit form
resp = br.submit()
# Retrieve selected tags via SoupStrainer
html = resp.read()
strainer = SoupStrainer(["ul", "h4"])
soup = BeautifulSoup(html, "lxml", parse_only=strainer)
# The first instance of ul with class = list-group appears to always
# contain the total expected number of results
first_list_group_item = soup.select_one('li.list-group-item').text
total_expected_num_results = int(str.split(first_list_group_item)[0])
total_expected_num_pages = ceil(total_expected_num_results / num_results_per_page)
print('Finding URLs for Year = ' + str(year) + ' and Gender = ' + str(gender))
print('Total expected results: ' + str(total_expected_num_results))
# Define lists to store data
result_urls = []
result_cities = []
result_states = []
# Starting with 1 page returned since the form was submitted
total_returned_num_pages = 1
total_returned_num_results = 0
while total_returned_num_pages <= total_expected_num_pages:
print('Progress: Page ' + str(total_returned_num_pages) + ' of ' + str(total_expected_num_pages), end='\r')
if total_returned_num_pages > 1:
# Note: No delay is included here because the current code
# takes longer than 1 second to complete per page. We feel
# that this is enough to avoid hammering the server and hogging
# resources.
# The link with text ">" appears to always point to the next
# results page.
next_page_link = br.find_link(text='>')
resp = br.follow_link(next_page_link)
html = resp.read()
soup = BeautifulSoup(html, "lxml", parse_only=strainer)
# Store URLs for individual runners
runner_links = soup.select('h4.type-fullname a[href]')
result_per_page_count = 0
for link in runner_links:
runner_url = url + link['href']
result_urls.extend([runner_url])
result_per_page_count += 1
if unit_test_ind:
break
# Grab city & state data since it's only fully populated on the
# form results page, not the individual runners' pages.
# The 'type-eval' label appears to always store this info.
# The first element in the CSS select statement is the table
# header "City, State", which we don't want. Manually removing
# this element via indexing.
location_table = soup.select('div.type-eval')[1:]
for location_row in location_table:
# The text of each location_row includes a sub-header "City, State",
# which we don't want. Using re.sub to remove it.
location = re.sub('City, State', '', location_row.text).split(', ')
# Some runners have no location listed, which is presented as '-'.
if location[0] == '–':
result_cities.append(None)
result_states.append(None)
else:
city = convert_to_ascii(location[0].replace('"', ''))
result_cities.append(city)
# Store state info if present in the 2nd element of location.
if len(location) == 2:
state = convert_to_ascii(location[1].replace('"', ''))
result_states.append(state)
else:
result_states.append(None)
if unit_test_ind:
break
# Tracking returned results for printing to console.
total_returned_num_results += result_per_page_count
total_returned_num_pages += 1
if unit_test_ind:
break
print('')
print('URL scraping complete!')
# Combining all results into one pd.DataFrame.
dict_urls = {'urls': result_urls}
dict_city = {'city': result_cities}
dict_state = {'state': result_states}
dict_results = {}
for dictionary in [dict_urls, dict_city, dict_state]:
dict_results.update(dictionary)
df = pd.DataFrame.from_dict(dict_results)
df = df.reindex(['urls', 'city', 'state'], axis='columns')
return df
def scrape_chicago_runner_details(url, gender, city, state):
"""
Method to scrape relevant information about a given runner in Chicago Marathon. The scraped details include:
* **year**: Year in which the runner participated in the marathon
* **bib**: Bib number to specify the runner for the particular marathon
* **age_group**: Age of runner at time of marathon. Expressed in 5 year bands, except for 16-19.
* **gender**: Gender of runner
* **city**: Runner's home city
* **state**: Runner's home state
* **country**: Runner's home country
* **overall**: Overall rank for runner based on the finish time
* **rank_gender**: Rank for runner within gender category based on the finish time
* **rank_age_group**: Rank for runner within age category based on the finish time
* **5k**: Split time at 5 km in seconds
* **10k**: Split time at 10 km in seconds
* **15k**: Split time at 15 km in seconds
* **20k**: Split time at 20 km in seconds
* **half**: Split time at half marathon in seconds
* **25k**: Split time at 25 km in seconds
* **30k**: Split time at 30 km in seconds
* **35k**: Split time at 35 km in seconds
* **40k**: Split time at 40 km in seconds
* **finish**: Split time to complete marathon in seconds
Example::
scrape_chicago_runner_details(url=('http://chicago-history.r.mikatiming.de/2015/?content=detail&fpid='
'search&pid=search&idp=999999107FA309000019D3BA&lang=EN_CAP&event='
'MAR_999999107FA309000000008D&lang=EN_CAP&search%5Bstart_no%5D='
'54250&search_event=ALL_EVENT_GROUP_2016'), gender='M', city='Portland',
state='OR')
:param str url: URL for an individual runner's results
:param str gender: Gender of runner ('M' for male, 'W' for female)
:param str city: City specified by runner
:param str state: State specified by runner
:return: DataFrame
:rtype: pandas.DataFrame
"""
br = mechanize.Browser()
# Ignore robots.txt
br.set_handle_robots(False)
# Link to results of a single runner
# Use try/except in case of unexpected internet/URL issues
try:
br.open(url)
except (mechanize.HTTPError, mechanize.URLError) as e:
if hasattr(e, 'code'):
if int(e.code) == 500:
print('Following URL has HTTP Error 500:')
print(url)
else:
print('Following URL has unexpected connection issue:')
print(url)
return 'Connection error'
html = br.response().read()
strainer = SoupStrainer(['tr', 'thead'])
soup = BeautifulSoup(html, "lxml", parse_only=strainer)
# Only process runners having event = 'Marathon', for the purposes of this project.
# Note: This is precautionary, as early exploration showed that non-runners were included among the results.
# This issue no longer appears present.
event_name = soup.select('td.f-event_name')[0].text
if event_name != 'Marathon':
return None
# Grab runner's info
marathon_year = int(soup.select('td.f-event_date')[0].text)
full_name = soup.select('td.f-__fullname')[0].text
age_group = soup.select('td.f-age_class')[0].text
bib_number = soup.select('td.f-start_no')[0].text
# Derive country name from runner's name.
# Modified from here:
# https://stackoverflow.com/a/4894156/3905509
country = convert_to_ascii(full_name[full_name.find("(")+1:full_name.find(")")])
rank_gender = soup.select('td.f-place_all')[0].text
rank_age_group = soup.select('td.f-place_age')[0].text
rank_overall = soup.select('td.f-place_nosex')[0].text
headers_index = ['year', 'bib', 'age_group', 'gender', 'city', 'state', 'country', 'overall', 'rank_gender',
'rank_age_group']
headers = headers_index.copy()
cols_constant = [marathon_year, bib_number, age_group, gender, city, state, country, rank_overall, rank_gender,
rank_age_group]
headers_splits = soup.select('thead th')
for header_split in headers_splits:
headers.append(header_split.text)
headers = [header.lower() for header in headers]
df = pd.DataFrame(columns=headers)
split_string_bs4 = 'tr.f-time_'
splits = ['05', '10', '15', '20', '52', '25', '30', '35', '40', 'finish_netto']
splits_select_list = [split_string_bs4 + split for split in splits]
for split_select in splits_select_list:
splits_row = soup.select(split_select)
# Take union of text in each column of splits_row
splits_row_union = [cell.text for cell in splits_row]
# Expecting the 'Finish' split time to share CSS tag with 'Finish Time',
# which excludes other info. In this case, only keep the former data.
if len(splits_row_union) > 1:
cols = cols_constant + splits_row_union[1].split('\n')[1:-1]
else:
cols = cols_constant + splits_row_union[0].split('\n')[1:-1]
# Convert results into a single row pd.DataFrame
cols = [dict(zip(headers, cols))]
df_row = pd.DataFrame(cols, columns=headers)
df = df.append(df_row)
# Reset index of dataframe since each row was individually appended
df.reset_index(drop=True, inplace=True)
# Only keep relevant fields
df = df[headers_index + ['split', 'time']]
# Convert split times into numeric seconds from strings.
# Modified from the following:
# https://stackoverflow.com/a/44445488/3905509
df['time'] =
|
pd.to_timedelta(df['time'])
|
pandas.to_timedelta
|
import pandas as pd
import lightgbm
from sklearn.linear_model import Lasso
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_regression
from sklearn.preprocessing import StandardScaler
def GBoostingFeatureSelection(X, y, random_state=0):
__trashhold__ = 2
# Scale the data
scaler = StandardScaler()
X_Scaled = scaler.fit_transform(X)
X_Scaled = pd.DataFrame(X_Scaled, columns=X.columns, index=X.index)
# Init Regressor
lightgbmRegressor = lightgbm.LGBMRegressor(random_state=random_state)
# Fit Regressor
lightgbmRegressor.fit(X_Scaled, y)
# Create Data Frame with Feature Importence
importance_dict = {'feature_name': lightgbmRegressor.feature_name_ ,
'feature_importance': lightgbmRegressor.feature_importances_,
'method': 'lightgbm.LGBMRegressor',
'random_state': random_state
}
feature_importance = pd.DataFrame(importance_dict)
feature_importance = feature_importance.loc[feature_importance['feature_importance']>=__trashhold__, :]
feature_importance.sort_values('feature_importance', ascending=False, inplace=True)
return feature_importance
def LassoFeatureSelection(X, y, random_state=0):
__trashhold__ = 0.1
# Scale the data
scaler = StandardScaler()
X_Scaled = scaler.fit_transform(X)
X_Scaled = pd.DataFrame(X_Scaled, columns=X.columns, index=X.index)
# Init Regressor
LassoRegressor = Lasso(alpha=0.00001, random_state=random_state)
# Fit Regressor
LassoRegressor.fit(X_Scaled, y)
# Create Data Frame with Feature Importence
importance_dict ={'feature_name': X_Scaled.columns,
'feature_importance': abs(LassoRegressor.coef_),
'method': 'LassoRegressor',
'random_state': random_state
}
feature_importance = pd.DataFrame(importance_dict)
feature_importance = feature_importance.loc[feature_importance['feature_importance']>__trashhold__,:]
feature_importance.sort_values('feature_importance', ascending=False, inplace=True)
return feature_importance
def KBestFeatureSelection(X, y):
__trashhold__ = 1.6
# Scale the data
scaler = StandardScaler()
X_Scaled = scaler.fit_transform(X)
X_Scaled = pd.DataFrame(X_Scaled, columns=X.columns, index=X.index)
# Init Regressor
KBest_Selector = SelectKBest(score_func=mutual_info_regression, k='all')
# Fit Regressor
KBest_Selector.fit(X_Scaled, y)
# Create Data Frame with Feature Importence
importance_dict ={'feature_name': X_Scaled.columns ,
'feature_importance': abs(KBest_Selector.scores_),
'method': 'SelectKBest',
'random_state': None
}
feature_importance = pd.DataFrame(importance_dict)
feature_importance = feature_importance.loc[feature_importance['feature_importance']>__trashhold__,:]
feature_importance.sort_values('feature_importance', ascending=False, inplace=True)
return feature_importance
def ChronosFeatureSelection(X, y, standart_scale_corr=True, random_state=0):
#Perform Individual Feature Selection
gb = GBoostingFeatureSelection(X, y, random_state)
ls = LassoFeatureSelection(X, y, random_state)
kb = KBestFeatureSelection(X, y)
essemble = pd.concat([gb,ls,kb]).reset_index(drop=True)
essemble_gp = essemble[['feature_name','method']].groupby('feature_name', as_index=False).agg(num_selector=('method', 'count'))
essemble = pd.merge(essemble, essemble_gp, how='left', on=['feature_name']).sort_values('num_selector', ascending=False)
if standart_scale_corr is True:
# Scale the data
scaler = StandardScaler()
X_Scaled = scaler.fit_transform(X)
X_Scaled =
|
pd.DataFrame(X_Scaled, columns=X.columns, index=X.index)
|
pandas.DataFrame
|
"""
This script contains a simple (intraday) trend following strategy using a bollinger band.
Strategy -
1) BUY when the price crosses the Upper Band from below.
2) SELL when the price crosses the Lower Band from above.
3) Close the positions at Take Profit or Stop Loss or
when counter positions needed to be taken.
4) If we get following signal same as the previous signal then
we increase the position size.
"""
# Imports
import logging
import talib as ta
import numpy as np
import pandas as pd
from time import sleep
from bars import EventDrivenBars
from connection import Client
# logging init
logging.basicConfig(
filename='error.log',
level=logging.WARNING,
format='%(asctime)s:%(levelname)s:%(message)s')
# setup the connection with the API
client = Client()
conn = client.connect()
api = client.api()
class TrendFollowing:
"""
A class for the trend-following strategy.
"""
def __init__(
self,
symbol: str,
bar_type: str,
TP: int = 2,
SL: int = 1,
qty: int = 1,
window_size: int = 22):
"""
:param symbol : (str) the asset symbol for the strategy.
:param bar_type : (str) the type of the alternative bars.
:param TP : (int) the take-profit multiple.
:param SL : (int) the stop-loss multiple.
:param qty : (int) the number quantities to buy and sell.
:param window_size : (int) the lookback window for the Bollinger Band.
"""
# Initialize model parameters like TP, SL, thresholds etc.
self.TP = TP # times the current volatility.
self.SL = SL # times the current volatility.
self.window = window_size # window size for bollinger bands
self.symbol = symbol # ticker symbol for the asset
self.bar_type = bar_type # bar_type for the strategy
# a flag to know if the strategy is in a bar collecting mode
self.collection_mode = True
self.active_trade = False # to know if any active trade is present
self.qty = qty # quantity to trade (buy or sell)
self.open_order = None # to know if any open orders exists
self.sl = None # stop-loss of current position
self.tp = None # take-profit of current position
self.prices = pd.Series()
# check if historical data exists
if self.read_data():
self.collection_mode = False
else:
print(f'on collection mode for {symbol}')
def read_data(self):
"""
A function to read the historical bar data.
"""
try:
df = pd.read_csv(
f'data/{self.bar_type}.csv',
index_col=[0],
parse_dates=[0],
usecols=[
'timestamp',
'symbol',
'close'])
# the length of minimum data will be the window size +1 of BB
if not df.empty:
prices = df[df['symbol'] == self.symbol]['close']
if len(prices) > self.window:
self.prices = prices[-self.window + 1:]
del df
return True
except FileNotFoundError as e:
pass
return False
def get_volatility(self, frequency: str = '1H'):
"""
A function to get hourly volatility if enough data exists.
Else will output minimum window_size volatility. The volatility
will be used to set the TP an SL of a position.
"""
ret = self.prices.pct_change()[1:]
# get hourly volatility
vol = ret.groupby(pd.Grouper(freq=frequency)).std()[-1]
return vol
def liquidate_position(self):
# check for brackets orders are present
self.cancel_orders()
try:
# close the position
res = api.close_position(self.symbol)
# check if filled
status = api.get_order(res.id).status
# reset
self.active_trade = False
self.sl = None
self.tp = None
except Exception as e:
logging.exception(e)
def cancel_orders(self):
"""
A function to handle cancelation of a open order.
"""
try:
api.cancel_order(self.open_order.id)
self.open_order = None
except Exception as e:
if e.status_code == 404:
# order not found
logging.exception(e)
if e.status_code == 422:
# the order status is not cancelable.
logging.exception(e)
# break
def check_open_position(self):
"""
Get any open position for the symbol
if exists.
"""
try:
pos = api.get_position(self.symbol)
self.active_trade = [pos.side, pos.qty]
except Exception as e:
if e.status_code == 404:
# position doesn't exist in the asset
self.active_trade = False
def RMS(self, price: float):
"""
If a position exists than check if take-profit or
stop-loss is reached. It is a simple risk-management
function.
:param price :(float) last trade price.
"""
self.check_open_position()
if self.active_trade:
# check SL and TP
if price <= self.sl or price >= self.tp:
# close the position
self.liquidate_position()
def OMS(self, BUY: bool = False, SELL: bool = False):
"""
An order management system that handles the orders and positions for given
asset.
:param BUY :(bool) If True will buy given quantity of asset at market price.
If a short sell position is active, it will close the short position.
:param SELL :(bool) if True will sell given quantity of asset at market price.
If a long BUY position is active, it will close the long position.
"""
# check for open position
self.check_open_position()
# calculate the current volatility
vol = self.get_volatility()
if BUY:
# check if counter position exists
if self.active_trade and self.active_trade[0] == 'short':
# exit the previous short SELL position
self.liquidate_position()
# calculate TP and SL for BUY order
self.tp = self.prices[-1] + (self.prices[-1] * self.TP * vol)
self.sl = self.prices[-1] - (self.prices[-1] * self.SL * vol)
side = 'buy'
if SELL:
# check if counter position exists
if self.active_trade and self.active_trade[0] == 'long':
# exit the previous long BUY position
self.liquidate_position()
# calculate TP and SL for SELL order
self.tp = self.prices[-1] - (self.prices[-1] * self.TP * vol)
self.sl = self.prices[-1] + (self.prices[-1] * self.SL * vol)
side = 'sell'
# check for time till market closing.
clock = api.get_clock()
closing = clock.next_close - clock.timestamp
market_closing = round(closing.seconds / 60)
if market_closing > 30 and (BUY or SELL):
# no more new trades after 30 mins till market close.
if self.open_order is not None:
# cancel any open orders before sending a new order
self.cancel_orders()
# submit a simple order.
self.open_order = api.submit_order(
symbol=self.symbol,
qty=self.qty,
side=side,
type='market',
time_in_force='day')
def on_bar(self, bar: dict):
"""
This function will be called everytime a new bar is formed. It
will calculate the entry logic using the Bollinger Bands.
:param bar : (dict) a Alternative bar generated from EventDrivenBars class.
"""
if self.collection_mode:
self.prices = self.prices.append(pd.Series(
[bar['close']], index=[pd.to_datetime(bar['timestamp'])]))
if len(self.prices) > self.window:
self.collection_mode = False
if not self.collection_mode:
# append the current bar to the prices series
self.prices = self.prices.append(pd.Series(
[bar['close']], index=[
|
pd.to_datetime(bar['timestamp'])
|
pandas.to_datetime
|
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("sort", [True, False])
def test_factorize(index_or_series_obj, sort):
obj = index_or_series_obj
result_codes, result_uniques = obj.factorize(sort=sort)
constructor = pd.Index
if isinstance(obj, pd.MultiIndex):
constructor = pd.MultiIndex.from_tuples
expected_uniques = constructor(obj.unique())
if sort:
expected_uniques = expected_uniques.sort_values()
# construct an integer ndarray so that
# `expected_uniques.take(expected_codes)` is equal to `obj`
expected_uniques_list = list(expected_uniques)
expected_codes = [expected_uniques_list.index(val) for val in obj]
expected_codes = np.asarray(expected_codes, dtype=np.intp)
tm.assert_numpy_array_equal(result_codes, expected_codes)
|
tm.assert_index_equal(result_uniques, expected_uniques)
|
pandas._testing.assert_index_equal
|
import pandas as pd
import numpy as np
import argparse
import time
def getArgs():
parser = argparse.ArgumentParser()
parser.add_argument('-output',
required=False,
default='processed_data/',
help='path of output folder.')
parser.add_argument('-inpath',
required=False,
default='inputdata/',
help='input file path and name.')
return parser.parse_args()
def get_prot2vec(prot):
prot2vec_path = 'protein_pos_protvec/'
prot2vec_file = prot2vec_path + prot + '.protvec'
with open(prot2vec_file, 'r') as prot_file:
protvec_data = prot_file.read()
protvec_string = protvec_data.split(',99\n')[1]
protvec_tmp = protvec_string.replace(',', ' ').split()
protvec_list = [float(i) for i in protvec_tmp]
return protvec_list
def get_mol2vec(drug_name):
drug_feature =
|
pd.read_csv('drugs_mol2vec/' + drug_name + '.csv')
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import pytest
from hypothesis import assume, given
from pandas.testing import assert_frame_equal
from janitor.testing_utils.strategies import (
categoricaldf_strategy,
df_strategy,
)
def test_case_when_1():
"""Test case_when function."""
df = pd.DataFrame(
{
"a": [0, 0, 1, 2, "hi"],
"b": [0, 3, 4, 5, "bye"],
"c": [6, 7, 8, 9, "wait"],
}
)
expected = pd.DataFrame(
{
"a": [0, 0, 1, 2, "hi"],
"b": [0, 3, 4, 5, "bye"],
"c": [6, 7, 8, 9, "wait"],
"value": ["x", 0, 8, 9, "hi"],
}
)
result = df.case_when(
((df.a == 0) & (df.b != 0)) | (df.c == "wait"),
df.a,
(df.b == 0) & (df.a == 0),
"x",
df.c,
column_name="value",
)
assert_frame_equal(result, expected)
def test_len_args(dataframe):
"""Raise ValueError if `args` length is less than 3."""
with pytest.raises(ValueError, match="three arguments are required"):
dataframe.case_when(dataframe.a < 10, "less_than_10", column_name="a")
def test_args_even(dataframe):
"""Raise ValueError if `args` length is even."""
with pytest.raises(ValueError, match="`default` argument is missing"):
dataframe.case_when(
dataframe.a < 10,
"less_than_10",
dataframe.a == 5,
"five",
column_name="a",
)
def test_column_name(dataframe):
"""Raise TypeError if `column_name` is not a string."""
with pytest.raises(TypeError):
dataframe.case_when(
dataframe.a < 10,
"less_than_10",
dataframe.a,
column_name=("a",),
)
@given(df=df_strategy())
def test_default_ndim(df):
"""Raise ValueError if `default` ndim > 1."""
with pytest.raises(ValueError):
df.case_when(df.a < 10, "less_than_10", df, column_name="a")
@given(df=df_strategy())
def test_default_length(df):
"""Raise ValueError if `default` length != len(df)."""
assume(len(df) > 10)
with pytest.raises(
ValueError,
match=(
"length of the `default` argument should be equal to the length of"
" the DataFrame"
),
):
df.case_when(
df.a < 10,
"less_than_10",
df.loc[:5, "a"],
column_name="a",
)
@given(df=df_strategy())
def test_error_multiple_conditions(df):
"""Raise ValueError for multiple conditions."""
with pytest.raises(ValueError):
df.case_when(df.a < 10, "baby", df.a + 5, "kid", df.a, column_name="a")
@given(df=df_strategy())
def test_case_when_condition_callable(df):
"""Test case_when for callable."""
result = df.case_when(
lambda df: df.a < 10, "baby", "bleh", column_name="bleh"
)
expected = np.where(df.a < 10, "baby", "bleh")
expected = df.assign(bleh=expected)
assert_frame_equal(result, expected)
@given(df=df_strategy())
def test_case_when_condition_eval(df):
"""Test case_when for callable."""
result = df.case_when("a < 10", "baby", "bleh", column_name="bleh")
expected = np.where(df.a < 10, "baby", "bleh")
expected = df.assign(bleh=expected)
assert_frame_equal(result, expected)
@given(df=df_strategy())
def test_case_when_replacement_callable(df):
"""Test case_when for callable."""
result = df.case_when(
"a > 10", lambda df: df.a + 10, lambda df: df.a * 2, column_name="bleh"
)
expected = np.where(df.a > 10, df.a + 10, df.a * 2)
expected = df.assign(bleh=expected)
assert_frame_equal(result, expected)
@given(df=categoricaldf_strategy())
def test_case_when_default_list(df):
"""
Test case_when for scenarios where `default` is list-like,
but not a Pandas or numpy object.
"""
default = range(len(df))
result = df.case_when(
"numbers > 1", lambda df: df.numbers + 10, default, column_name="bleh"
)
expected = np.where(df.numbers > 1, df.numbers + 10, default)
expected = df.assign(bleh=expected)
assert_frame_equal(result, expected)
@given(df=categoricaldf_strategy())
def test_case_when_default_index(df):
"""Test case_when for scenarios where `default` is an index."""
default = range(len(df))
result = df.case_when(
"numbers > 1",
lambda df: df.numbers + 10,
pd.Index(default),
column_name="bleh",
)
expected = np.where(df.numbers > 1, df.numbers + 10, default)
expected = df.assign(bleh=expected)
assert_frame_equal(result, expected)
@given(df=df_strategy())
def test_case_when_multiple_args(df):
"""Test case_when for multiple arguments."""
result = df.case_when(
df.a < 10,
"baby",
df.a.between(10, 20, "left"),
"kid",
lambda df: df.a.between(20, 30, "left"),
"young",
"30 <= a < 50",
"mature",
"grandpa",
column_name="elderly",
)
conditions = [
df["a"] < 10,
(df["a"] >= 10) & (df["a"] < 20),
(df["a"] >= 20) & (df["a"] < 30),
(df["a"] >= 30) & (df["a"] < 50),
]
choices = ["baby", "kid", "young", "mature"]
expected = np.select(conditions, choices, "grandpa")
expected = df.assign(elderly=expected)
|
assert_frame_equal(result, expected)
|
pandas.testing.assert_frame_equal
|
import os
import sys
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, train_test_split, StratifiedKFold
import PIL
from PIL import Image
import io
import cv2
from keras.datasets import mnist
import multiprocessing as mp
from multiprocessing import Pool, Manager, Process
from functools import partial
from . import logging_daily
from . import utils
from keras.utils import to_categorical
######################################################################
# Base Reader
######################################################################
class BaseReader(object):
"""Inherit from this class when implementing new readers."""
def __init__(self, log, path_info, network_info, verbose=True):
self.log = log
self.verbose = verbose
self.data_path = path_info['data_info']['data_path']
if network_info['model_info']['normalize_sym'] == 'True': self.normalize_sym = True
else: self.normalize_sym = False
if network_info['model_info']['n_label'] == 'None': self.n_label = None
else: self.n_label = int(network_info['model_info']['n_label'])
if network_info['model_info']['augment'] == 'True': self.augment = True
else: self.augment = False
self.x_list = None
self.img_shape = None
def read_dataset(self, data_path):
raise NotImplementedError()
def get_dataset(self):
raise NotImplementedError()
def get_cv_index(self, nfold=5):
raise NotImplementedError()
def get_augment(self, x):
for i in range(x.shape[0]):
if np.random.randint(2, size=1):
# Flip Horizontally
if np.random.randint(2, size=1):
x[i] = x[i,:,::-1,:] # (N, H, W, C)
# Channel Noise
if np.random.randint(2, size=1):
if np.random.randint(2, size=1):
# uniform noise
noise = np.random.uniform(0,0.05,(x.shape[1],x.shape[2]))
picked_ch = np.random.randint(3, size=1)[0]
x[i,:,:,picked_ch] += noise
x[i,:,:,picked_ch] = np.clip(x[i,:,:,picked_ch], a_min=0., a_max=1.)
elif np.random.randint(2, size=1):
# gray
x[i,:,:,:] = np.repeat(np.expand_dims(np.dot(x[i,:,:], [0.299, 0.587, 0.114]), axis=-1), 3, axis=-1)
return x
def show_class_information(self, y=None):
if np.any(y == None): y_table = self.y_table
else: y_table = pd.Series(y)
y_counts = y_table.value_counts()
self.log.info('-------------------------------------------------')
self.log.info('Images per Class')
self.log.info('\n%s', y_counts)
self.log.info('-------------------------------------------------')
self.log.info('Summary')
self.log.info('\n%s', y_counts.describe())
self.log.info('-------------------------------------------------')
# def write_embeddings_metadata(self, embedding_metadata_path, e_x, e_y):
# with open(embedding_metadata_path,'w') as f:
# f.write("Index\tLabel\tClass\n")
# for index,label in enumerate(e_y):
# f.write("%d\t%s\t%d\n" % (index,"fake",label)) # fake
# for index,label in enumerate(e_y):
# f.write("%d\t%s\t%d\n" % (len(e_y)+index,"true",10)) # true
def get_image_shape(self):
return self.img_shape
def get_cv_index(self, nfold=5, random_state = 12):
self.log.info('%d-fold Cross Validation Cut' % nfold)
kf = KFold(n_splits=nfold, shuffle=True, random_state=random_state)
return kf.split(range(self.y.shape[0]))
def get_training_validation_index(self, idx, validation_size=0.2):
return train_test_split(idx, test_size = validation_size)
def get_dataset(self):
return self.x, self.y
def get_label(self):
return self.y
def get_n_label(self):
return self.num_classes
def handle_imbalance(self, train_idx, minarity_group_size = 0.3, minarity_ratio = 0.3, seed=12):
self.log.info('----------------------------------------------------')
self.log.info('Handle imbalance')
self.log.info('Minarity_group_size : %s' % minarity_group_size)
self.log.info('Minarity_ratio (per group) : %s' % minarity_ratio)
self.log.info('----------------------------------------------------')
np.random.seed(seed)
minarities = np.random.choice(self.y_class, size= int(minarity_group_size * self.y_class.shape[0]))
pick = []
if len(minarities) > 0:
for i, minarity in enumerate(minarities):
minarity_index = self.y_index.get_loc(minarity)
delete_size = int(np.sum(minarity_index) * (1-minarity_ratio))
pick.append(np.random.choice(np.where(minarity_index)[0], replace=False, size=delete_size))
self.log.info('minarity class - %s : deleted %s of %s' %(minarity, delete_size, np.sum(minarity_index)))
pick = np.concatenate(pick)
train_idx = np.setdiff1d(train_idx, pick)
if self.verbose == True: self.show_class_information(self.y[train_idx])
return train_idx
def class_to_categorical(self, y):
return to_categorical(self.class_to_int(y), self.num_classes)
def categorical_to_series(self, y_coded):
return pd.Series(np.argmax(y_coded, axis=1)).map(self.y_int_to_class)
def class_to_int(self, y):
return np.array(pd.Series(y).map(self.y_class_to_int))
def int_to_class(self, y_int):
return pd.Series(y_int).map(self.y_int_to_class)
#########################################################################################################
# Toy Sample Reader
#########################################################################################################
class ToyReader(BaseReader):
def __init__(self, log, path_info, network_info, verbose=True):
super(ToyReader,self).__init__(log, path_info, network_info, verbose)
self.read_dataset(nlabel=self.n_label)
if verbose: self.show_class_information()
def read_dataset(self, nlabel=None):
dir_path = self.data_path
self.x = np.load('%s/x.npy'%dir_path).astype(np.float32)
self.x = self.x.reshape(self.x.shape[0],int(np.sqrt(self.x.shape[1])),int(np.sqrt(self.x.shape[1])),1)
self.y = np.load('%s/y.npy'%dir_path)
self.img_shape = self.x.shape[1:]
if not nlabel==None:
y_table = pd.Series(self.y)
selected_class = y_table.unique()[:nlabel]
selected_class = y_table.isin(selected_class)
self.x_list = self.x[selected_class]
self.y = self.y[selected_class]
## to categorical ####################
self.y_table = pd.Series(self.y)
self.y_index = pd.Index(self.y)
self.y_class = np.sort(self.y_table.unique())
self.y_class_to_int = dict(zip(self.y_class, range(self.y_class.shape[0])))
self.y_int_to_class = dict(zip(range(self.y_class.shape[0]), self.y_class))
self.num_classes = len(self.y_class)
######################################
def get_batch(self, idxs):
img_batches = self.x[idxs]
y = self.class_to_int(self.y[idxs])
return img_batches, y
def class_to_categorical(self, y):
return to_categorical(y, self.num_classes)
def categorical_to_class(self, y_coded):
return np.argmax(y_coded, axis=1)
def except_class(self, train_idx, except_class):
self.log.info('----------------------------------------------------')
for unknown_class in except_class:
self.log.info('Except class %d' % int(unknown_class))
unknown_class = int(unknown_class)
train_idx = train_idx[self.y[train_idx]!=unknown_class]
if self.verbose: self.show_class_information(self.y[train_idx])
self.log.info('----------------------------------------------------')
return train_idx
#########################################################################################################
# MNIST
#########################################################################################################
class MNISTReader(BaseReader):
def __init__(self, log, path_info, network_info, mode='train', verbose=True):
super(MNISTReader,self).__init__(log, path_info, network_info, verbose)
self.read_dataset(nlabel=self.n_label)
if verbose: self.show_class_information()
def read_dataset(self, nlabel=None):
(x_train, y_train), (x_test, y_test) = mnist.load_data()
self.x = np.concatenate((x_train, x_test), axis=0)
self.y = np.concatenate((y_train, y_test), axis=0)
if not nlabel==None:
y_table = pd.Series(self.y)
selected_class = y_table.unique()[:nlabel]
selected_class = y_table.isin(selected_class)
self.x_list = self.x[selected_class]
self.y = self.y[selected_class]
## to categorical ####################
self.y_table = pd.Series(self.y)
self.y_index = pd.Index(self.y)
self.y_class = np.sort(self.y_table.unique())
# self.y_class_to_int = dict(zip(self.y_class, range(self.y_class.shape[0])))
# self.y_int_to_class = dict(zip(range(self.y_class.shape[0]), self.y_class))
self.num_classes = len(self.y_class)
######################################
# normalize
if self.normalize_sym:
# force it to be of shape (...,28,28,1) with range [-1,1]
self.x = ((self.x - 127.5) / 127.5).astype(np.float32)
else:
self.x = (self.x / 225.).astype(np.float32)
self.x = np.expand_dims(self.x, axis=-1)
self.img_shape = self.x.shape[1:]
def get_batch(self, idxs):
img_batches = self.x[idxs]
if self.augment:
img_batches = self.get_augment(img_batches)
# y = self.class_to_int(self.y[idxs])
y = self.y[idxs]
return img_batches, y
def class_to_categorical(self, y):
return to_categorical(y, self.num_classes)
def categorical_to_class(self, y_coded):
return np.argmax(y_coded, axis=1)
def except_class(self, train_idx, except_class):
self.log.info('----------------------------------------------------')
for unknown_class in except_class:
self.log.info('Except class %d' % int(unknown_class))
unknown_class = int(unknown_class)
train_idx = train_idx[self.y[train_idx]!=unknown_class]
if self.verbose: self.show_class_information(self.y[train_idx])
self.log.info('----------------------------------------------------')
return train_idx
#########################################################################################################
# Omniglot
#########################################################################################################
class OmniglotReader(BaseReader):
def __init__(self, log, path_info, network_info, mode='train', verbose=True):
super(OmniglotReader,self).__init__(log, path_info, network_info, verbose)
self.mode = mode
self.read_dataset(nlabel=self.n_label)
if verbose: self.show_class_information()
def read_dataset(self, nlabel=None):
self.log.info('-------------------------------------------------')
self.log.info('Construct Dataset')
self.log.info('-------------------------------------------------')
self.log.info('Loading Omniglot dataset information')
self.img_shape = (105,105,1)
if self.mode=='train': data_type = 'images_background'
elif self.mode=='train_small1': data_type = 'images_background_small1'
elif self.mode=='train_small2': data_type = 'images_background_small2'
else: data_type = 'images_evaluation'
self.x_list = np.load('%s/%s_x_list.npy' % (self.data_path, data_type))
self.y = np.load('%s/%s_y.npy' % (self.data_path, data_type))
if not nlabel==None:
y_table = pd.Series(self.y)
selected_class = y_table.unique()[:nlabel]
selected_class = y_table.isin(selected_class)
self.x_list = self.x_list[selected_class]
self.y = self.y[selected_class]
# else:
# y_table = pd.Series(self.y)
# y_counts = y_table.value_counts()
# selected_class = y_counts[y_counts >= 5].keys()
# selected_class = y_table.isin(selected_class)
# self.x_list = self.x_list[selected_class]
# self.y = self.y[selected_class]
# self.not_used_class = y_counts[y_counts < 5].keys()
## to categorical ####################
self.y_table = pd.Series(self.y)
self.y_index = pd.Index(self.y)
self.y_class = np.sort(self.y_table.unique())
self.y_class_to_int = dict(zip(self.y_class, range(self.y_class.shape[0])))
self.y_int_to_class = dict(zip(range(self.y_class.shape[0]), self.y_class))
self.num_classes = len(self.y_class)
######################################
self.y_alphabet = np.array([xpath.split('/')[-3] for xpath in self.x_list])
# TODO except class list...
# def except_class(self, train_idx, unknown_class='9'):
# train_idx = np.array(train_idx)
# return train_idx[self.y[train_idx]!=unknown_class]
def get_cv_index(self, nfold=5, random_state = 12):
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html
self.log.info('Stratified %d-fold Cross Validation Cut' % nfold)
kf = StratifiedKFold(n_splits=nfold, shuffle=True, random_state=random_state)
return kf.split(range(self.y.shape[0]), self.y)
def get_dataset(self):
return self.x_list, self.y
def get_y_alphabet_class(self):
return self.y_alphabet
def get_label_name(self):
return np.array(self.y_class)
def get_batch(self, idxs):
try:
batch_imgs = []
batch_idxs = []
for i in idxs:
try:
batch_imgs.append(self._read_omniglot_image(self.x_list[i]))
batch_idxs.append(i)
except Exception as e:
raise ValueError(e)
batch_imgs = np.array(batch_imgs)
batch_idxs = np.array(batch_idxs)
# if self.augment and np.random.choice([0,1], 1, replace=False, p=[0.8,0.2]):
if self.augment:
batch_imgs = self.get_augment(batch_imgs)
if self.normalize_sym:
batch_imgs = (batch_imgs - 0.5) * 2.
y = self.class_to_int(self.y[np.array(batch_idxs)])
return batch_imgs, y
except Exception as e:
raise ValueError(e)
def _read_omniglot_image(self, filename):
try:
im = Image.open(filename)
# target_shape = np.array([self.img_shape[1],self.img_shape[0]])
# im = im.resize(target_shape, PIL.Image.ANTIALIAS)
im = np.expand_dims((1.-np.array(im).astype(np.float32)), -1)
# dilation (thickness)
# kernel = np.ones((3,3),np.uint8)
# im = np.expand_dims(cv2.dilate(im,kernel,iterations = 1), -1)
return im
except Exception as e:
raise ValueError('Error with %s : %s' % (filename, e))
# sys.exit()
#########################################################################
# funtions for augmentation
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.html
#########################################################################
def _dilation(self, im, kernal_size=(2,2)):
# Dilation (thickness)
kernel = np.ones(kernal_size,np.uint8)
im = cv2.dilate(im,kernel,iterations = 1)
im[im>=0.5] = 1.
im[im<0.5] = 0.
return np.expand_dims(np.array(im).astype(np.float32), -1)
def _rotation(self, im, max_angle = 10):
# Rotation
rows,cols,ch = im.shape
angle = np.random.choice(np.append(np.arange(-max_angle,max_angle,max_angle//4),max_angle), 1)[0]
M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)
im = cv2.warpAffine(im,M,(cols,rows))
im[im>=0.5] = 1.
im[im<0.5] = 0.
return np.expand_dims(np.array(im).astype(np.float32), -1)
def _affine(self, im, max_tiltrate = 6):
# Affine transformation
rows,cols,ch = im.shape
tiltsize=np.random.choice(np.arange(max_tiltrate//4,max_tiltrate,max_tiltrate//4), 1)[0]
pts1 = np.float32([[tiltsize,tiltsize],[rows-tiltsize,tiltsize],[tiltsize,cols-tiltsize]])
pts2 = np.float32([[tiltsize,tiltsize],[rows,0],[0,cols]])
M = cv2.getAffineTransform(pts1,pts2)
im = cv2.warpAffine(im,M,(cols,rows))
im[im>=0.5] = 1.
im[im<0.5] = 0.
return np.expand_dims(np.array(im).astype(np.float32), -1)
def _perspective(self, im, max_padsize=6):
# Perspective tranformation
rows,cols,ch = im.shape
padsize=np.random.choice(np.arange(max_padsize//4,max_padsize,max_padsize//4), 1)[0]
pts1 = np.float32([[padsize,padsize],[rows-padsize,padsize],[padsize,cols-padsize],[rows-padsize,cols-padsize]])
pts2 = np.float32([[0,0],[rows,0],[0,cols],[rows,cols]])
M = cv2.getPerspectiveTransform(pts1,pts2)
im = cv2.warpPerspective(im,M,(rows,cols))
im[im>=0.5] = 1.
im[im<0.5] = 0.
return np.expand_dims(np.array(im).astype(np.float32), -1)
def get_augment(self, x):
for i in range(x.shape[0]):
if np.random.randint(2, size=1):
# if np.random.randint(2, size=1): x[i] = self._dilation(x[i]) # Dilation (thickness)
if np.random.randint(2, size=1): x[i] = self._rotation(x[i]) # Rotation
if np.random.randint(2, size=1): x[i] = self._affine(x[i]) # Affine transformation
if np.random.randint(2, size=1): x[i] = self._perspective(x[i]) # Perspective tranformation
return x
#########################################################################################################
# CelebA
#########################################################################################################
class CelebAReader(BaseReader):
def __init__(self, log, path_info, network_info, verbose=True):
super(CelebAReader,self).__init__(log, path_info, network_info, verbose)
self.crop_style=network_info['model_info']['crop_style'].strip()
self.attr_label=network_info['model_info']['attr_label'].strip()
self.read_dataset(self.attr_label)
if verbose: self.show_class_information()
def read_dataset(self, attr_label='Male'):
self.log.info('-------------------------------------------------')
self.log.info('Construct Dataset')
self.log.info('-------------------------------------------------')
self.log.info('Loading CelebA dataset information')
self.img_shape = (64, 64, 3)
# num_samples = len(os.listdir(self.data_path)) #202599
# self.datapoint_ids = np.arange(1, num_samples + 1)
# np.random.shuffle(self.datapoint_ids)
# self.x_list = ['%.6d.jpg' % i for i in self.datapoint_ids]
self.x_list = np.load('%s/x_list.npy' % ('/'.join(self.data_path.split('/')[:-1], datatype)))
self.attr = pd.read_csv('/'.join(self.data_path.split('/')[:-1])+'/list_attr_celeba.csv')
sorterIndex = dict(zip(self.x_list,range(len(self.x_list))))
self.attr['index'] = self.attr['image_id'].map(sorterIndex)
self.attr = self.attr.sort_values('index')
self.y = np.array(self.attr[attr_label])
self.y[self.y == -1] = 0
self.class_name = np.array(['no_%s' % attr_label,attr_label])
self.y_table = pd.Series(self.y)
self.y_counts = self.y_table.value_counts()
self.y_index = pd.Index(self.y)
self.y_class = np.sort(self.y_table.unique())
self.num_classes = self.y_class.shape[0]
def get_dataset(self):
return self.x_list, self.y
def get_label_name(self):
return self.class_name
def get_batch(self, idxs):
img_batches = np.array([self._read_celeba_image(self.x_list[i]) for i in idxs])
if self.augment:
img_batches = self.get_augment(img_batches)
if self.normalize_sym:
img_batches = (img_batches - 0.5) * 2.
return img_batches, self.y[np.array(idxs)]
def _read_celeba_image(self, filename):
# from WAE
width = 178
height = 218
new_width = 140
new_height = 140
im = Image.open(utils.o_gfile((self.data_path, filename), 'rb'))
if self.crop_style == 'closecrop':
# This method was used in DCGAN, pytorch-gan-collection, AVB, ...
left = (width - new_width) / 2.
top = (height - new_height) / 2.
right = (width + new_width) / 2.
bottom = (height + new_height)/2.
im = im.crop((left, top, right, bottom))
im = im.resize((64, 64), PIL.Image.ANTIALIAS)
elif self.crop_style == 'resizecrop':
# This method was used in ALI, AGE, ...
im = im.resize((64, 64+14), PIL.Image.ANTIALIAS)
im = im.crop((0, 7, 64, 64 + 7))
else:
raise Exception('Unknown crop style specified')
return np.array(im).reshape(64, 64, 3) / 255.
#########################################################################################################
# VGG 2 Face
#########################################################################################################
class VGGFace2Reader(BaseReader):
def __init__(self, log, path_info, network_info, mode='train', verbose=True):
super(VGGFace2Reader,self).__init__(log, path_info, network_info, verbose)
self.crop_style=network_info['model_info']['crop_style'].strip()
self.img_shape = np.array([int(ishape.strip()) for ishape in network_info['model_info']['img_shape'].split(',')])
self.mode = mode
self.read_dataset(nlabel=self.n_label)
if verbose: self.show_class_information()
try: self.feature_b = 'true' == network_info['model_info']['feature_b'].strip().lower()
except: self.feature_b = False
if self.feature_b:
if self.mode == 'train':
self.all_features_for_b = np.load('%s/all_features_normalized.npy' % path_info['data_info']['data_path'])
else:
self.all_features_for_b = np.load('%s/all_features_of_unknown_normalized.npy' % path_info['data_info']['data_path'])
self.log.info('Load all features for b: %s' % np.array(len(self.all_features_for_b)))
try: self.fixed_b_path = network_info['training_info']['fixed_b_path'].strip()
except: self.fixed_b_path = None
if self.fixed_b_path is not None:
self.all_b = np.load(self.fixed_b_path)
self.log.info('Load all b: %s' % np.array(self.all_b.shape))
def read_dataset(self, nlabel=None):
self.log.info('-------------------------------------------------')
self.log.info('Construct Dataset')
self.log.info('-------------------------------------------------')
self.log.info('Loading VGG Face 2 dataset information')
self.log.info('Set image shape : %s' % self.img_shape)
# names = os.listdir(self.data_path+'/npy_128')
# if not npersion==None:
# names = names[:npersion]
# file_dict = {}
# file_dict.update([(name, os.listdir(self.data_path+'/images/%s' % name)) for name in names])
# self.x_list = np.concatenate([['%s/%s'%(name, path) for path in paths] for name, paths in file_dict.items()])
# self.y = np.concatenate([[name]*len(paths) for name, paths in file_dict.items()])
if self.mode == 'train': list_path = "%s/%s" % (self.data_path, 'train_list.txt')
else: list_path = "%s/%s" % (self.data_path, 'test_list.txt')
with open(list_path, 'r') as f:
self.x_list = f.read()
self.x_list = np.array(self.x_list.split('\n')[:-1])
x_table =
|
pd.Series(self.x_list)
|
pandas.Series
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.