repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
yaojenkuo/BuildingMachineLearningSystemsWithPython | ch03/rel_post_20news.py | 24 | 3903 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import sklearn.datasets
import scipy as sp
new_post = \
"""Disk drive problems. Hi, I have a problem with my hard disk.
After 1 year it is working only sporadically now.
I tried to format it, but now it doesn't boot any more.
Any ideas? Thanks.
"""
print("""\
Dear reader of the 1st edition of 'Building Machine Learning Systems with Python'!
For the 2nd edition we introduced a couple of changes that will result into
results that differ from the results in the 1st edition.
E.g. we now fully rely on scikit's fetch_20newsgroups() instead of requiring
you to download the data manually from MLCOMP.
If you have any questions, please ask at http://www.twotoreal.com
""")
all_data = sklearn.datasets.fetch_20newsgroups(subset="all")
print("Number of total posts: %i" % len(all_data.filenames))
# Number of total posts: 18846
groups = [
'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware', 'comp.windows.x', 'sci.space']
train_data = sklearn.datasets.fetch_20newsgroups(subset="train",
categories=groups)
print("Number of training posts in tech groups:", len(train_data.filenames))
# Number of training posts in tech groups: 3529
labels = train_data.target
num_clusters = 50 # sp.unique(labels).shape[0]
import nltk.stem
english_stemmer = nltk.stem.SnowballStemmer('english')
from sklearn.feature_extraction.text import TfidfVectorizer
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(TfidfVectorizer, self).build_analyzer()
return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))
vectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5,
stop_words='english', decode_error='ignore'
)
vectorized = vectorizer.fit_transform(train_data.data)
num_samples, num_features = vectorized.shape
print("#samples: %d, #features: %d" % (num_samples, num_features))
# samples: 3529, #features: 4712
from sklearn.cluster import KMeans
km = KMeans(n_clusters=num_clusters, n_init=1, verbose=1, random_state=3)
clustered = km.fit(vectorized)
print("km.labels_=%s" % km.labels_)
# km.labels_=[ 6 34 22 ..., 2 21 26]
print("km.labels_.shape=%s" % km.labels_.shape)
# km.labels_.shape=3529
from sklearn import metrics
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
# Homogeneity: 0.400
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
# Completeness: 0.206
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
# V-measure: 0.272
print("Adjusted Rand Index: %0.3f" %
metrics.adjusted_rand_score(labels, km.labels_))
# Adjusted Rand Index: 0.064
print("Adjusted Mutual Information: %0.3f" %
metrics.adjusted_mutual_info_score(labels, km.labels_))
# Adjusted Mutual Information: 0.197
print(("Silhouette Coefficient: %0.3f" %
metrics.silhouette_score(vectorized, labels, sample_size=1000)))
# Silhouette Coefficient: 0.006
new_post_vec = vectorizer.transform([new_post])
new_post_label = km.predict(new_post_vec)[0]
similar_indices = (km.labels_ == new_post_label).nonzero()[0]
similar = []
for i in similar_indices:
dist = sp.linalg.norm((new_post_vec - vectorized[i]).toarray())
similar.append((dist, train_data.data[i]))
similar = sorted(similar)
print("Count similar: %i" % len(similar))
show_at_1 = similar[0]
show_at_2 = similar[int(len(similar) / 10)]
show_at_3 = similar[int(len(similar) / 2)]
print("=== #1 ===")
print(show_at_1)
print()
print("=== #2 ===")
print(show_at_2)
print()
print("=== #3 ===")
print(show_at_3)
| mit |
hitszxp/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 40 | 12814 | import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros((100, 1)))
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
if __name__ == "__main__":
np.testing.run_module_suite()
| bsd-3-clause |
ChanChiChoi/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
rupak0577/ginga | ginga/web/pgw/Plot.py | 3 | 4306 | #
# Plot.py -- Plotting widget canvas wrapper.
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from io import BytesIO
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from ginga.web.pgw import Widgets
class PlotWidget(Widgets.Canvas):
"""
This class implements the server-side backend of the surface for a
web-based plot viewer. It uses a web socket to connect to an HTML5
canvas with javascript callbacks in a web browser on the client.
The viewer is created separately on the backend and connects to this
surface via the set_viewer() method.
"""
def __init__(self, plot, width=500, height=500):
super(PlotWidget, self).__init__(width=width, height=height)
self.widget = FigureCanvas(plot.get_figure())
self.logger = plot.logger
self._configured = False
self.refresh_delay = 0.010
self.set_plot(plot)
def set_plot(self, plot):
self.logger.debug("set_plot called")
self.plot = plot
self._dispatch_event_table = {
"activate": self.ignore_event,
"setbounds": self.map_event_cb,
"mousedown": self.ignore_event,
"mouseup": self.ignore_event,
"mousemove": self.ignore_event,
"mouseout": self.ignore_event,
"mouseover": self.ignore_event,
"mousewheel": self.ignore_event,
"wheel": self.ignore_event,
"click": self.ignore_event,
"dblclick": self.ignore_event,
"keydown": self.ignore_event,
"keyup": self.ignore_event,
"keypress": self.ignore_event,
"resize": self.resize_event,
"focus": self.ignore_event,
"focusout": self.ignore_event,
"blur": self.ignore_event,
"drop": self.ignore_event,
"paste": self.ignore_event,
# Hammer.js events
"pinch": self.ignore_event,
"pinchstart": self.ignore_event,
"pinchend": self.ignore_event,
"rotate": self.ignore_event,
"rotatestart": self.ignore_event,
"rotateend": self.ignore_event,
"tap": self.ignore_event,
"pan": self.ignore_event,
"panstart": self.ignore_event,
"panend": self.ignore_event,
"swipe": self.ignore_event,
}
self.plot.add_callback('draw-canvas', self.draw_cb)
self.add_timer('refresh', self.refresh_cb)
def get_plot(self):
return self.plot
def ignore_event(self, event):
pass
def refresh_cb(self):
app = self.get_app()
app.do_operation('refresh_canvas', id=self.id)
self.reset_timer('refresh', self.refresh_delay)
def get_rgb_buffer(self, plot):
buf = BytesIO()
fig = plot.get_figure()
fig.canvas.print_figure(buf, format='png')
wd, ht = self.width, self.height
return (wd, ht, buf.getvalue())
def draw_cb(self, plot):
self.logger.debug("getting RGB buffer")
wd, ht, buf = self.get_rgb_buffer(plot)
#self.logger.debug("clear_rect")
#self.clear_rect(0, 0, wd, ht)
self.logger.debug("drawing %dx%d image" % (wd, ht))
self.draw_image(buf, 0, 0, wd, ht)
self.reset_timer('refresh', self.refresh_delay)
def configure_window(self, wd, ht):
self.logger.debug("canvas resized to %dx%d" % (wd, ht))
fig = self.plot.get_figure()
fig.set_size_inches(float(wd) / fig.dpi, float(ht) / fig.dpi)
def map_event_cb(self, event):
wd, ht = event.width, event.height
self.configure_window(wd, ht)
self.plot.draw()
def resize_event(self, event):
wd, ht = event.x, event.y
self.configure_window(wd, ht)
self.plot.draw()
def _cb_redirect(self, event):
method = self._dispatch_event_table[event.type]
try:
method(event)
except Exception as e:
self.logger.error("error redirecting '%s' event: %s" % (
event.type, str(e)))
# TODO: dump traceback to debug log
#END
| bsd-3-clause |
AlexanderFabisch/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 46 | 2798 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
paladin74/neural-network-animation | matplotlib/tests/test_dviread.py | 15 | 1788 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from nose.tools import assert_equal
import matplotlib.dviread as dr
import os.path
original_find_tex_file = dr.find_tex_file
def setup():
dr.find_tex_file = lambda x: x
def teardown():
dr.find_tex_file = original_find_tex_file
def test_PsfontsMap():
filename = os.path.join(
os.path.dirname(__file__),
'baseline_images', 'dviread', 'test.map')
fontmap = dr.PsfontsMap(filename)
# Check all properties of a few fonts
for n in [1, 2, 3, 4, 5]:
key = 'TeXfont%d' % n
entry = fontmap[key]
assert_equal(entry.texname, key)
assert_equal(entry.psname, 'PSfont%d' % n)
if n not in [3, 5]:
assert_equal(entry.encoding, 'font%d.enc' % n)
elif n == 3:
assert_equal(entry.encoding, 'enc3.foo')
# We don't care about the encoding of TeXfont5, which specifies
# multiple encodings.
if n not in [1, 5]:
assert_equal(entry.filename, 'font%d.pfa' % n)
else:
assert_equal(entry.filename, 'font%d.pfb' % n)
if n == 4:
assert_equal(entry.effects, {'slant': -0.1, 'extend': 2.2})
else:
assert_equal(entry.effects, {})
# Some special cases
entry = fontmap['TeXfont6']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont7']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, 'font7.enc')
entry = fontmap['TeXfont8']
assert_equal(entry.filename, 'font8.pfb')
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont9']
assert_equal(entry.filename, '/absolute/font9.pfb')
| mit |
aabadie/scikit-learn | sklearn/utils/tests/test_testing.py | 24 | 7902 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message,
ignore_warnings)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
def test_ignore_warning():
# This check that ignore_warning decorateur and context manager are working
# as expected
def _warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
def _multiple_warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
warnings.warn("deprecation warning")
# Check the function directly
assert_no_warnings(ignore_warnings(_warning_function))
assert_no_warnings(ignore_warnings(_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning, ignore_warnings(_warning_function,
category=UserWarning))
assert_warns(UserWarning,
ignore_warnings(_multiple_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning,
ignore_warnings(_multiple_warning_function,
category=UserWarning))
assert_no_warnings(ignore_warnings(_warning_function,
category=(DeprecationWarning,
UserWarning)))
# Check the decorator
@ignore_warnings
def decorator_no_warning():
_warning_function()
_multiple_warning_function()
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def decorator_no_warning_multiple():
_multiple_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_warning():
_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_warning():
_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_multiple_warning():
_multiple_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_multiple_warning():
_multiple_warning_function()
assert_no_warnings(decorator_no_warning)
assert_no_warnings(decorator_no_warning_multiple)
assert_no_warnings(decorator_no_deprecation_warning)
assert_warns(DeprecationWarning, decorator_no_user_warning)
assert_warns(UserWarning, decorator_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, decorator_no_user_multiple_warning)
# Check the context manager
def context_manager_no_warning():
with ignore_warnings():
_warning_function()
def context_manager_no_warning_multiple():
with ignore_warnings(category=(DeprecationWarning, UserWarning)):
_multiple_warning_function()
def context_manager_no_deprecation_warning():
with ignore_warnings(category=DeprecationWarning):
_warning_function()
def context_manager_no_user_warning():
with ignore_warnings(category=UserWarning):
_warning_function()
def context_manager_no_deprecation_multiple_warning():
with ignore_warnings(category=DeprecationWarning):
_multiple_warning_function()
def context_manager_no_user_multiple_warning():
with ignore_warnings(category=UserWarning):
_multiple_warning_function()
assert_no_warnings(context_manager_no_warning)
assert_no_warnings(context_manager_no_warning_multiple)
assert_no_warnings(context_manager_no_deprecation_warning)
assert_warns(DeprecationWarning, context_manager_no_user_warning)
assert_warns(UserWarning, context_manager_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, context_manager_no_user_multiple_warning)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
phobson/statsmodels | statsmodels/sandbox/tsa/movstat.py | 34 | 14871 | '''using scipy signal and numpy correlate to calculate some time series
statistics
original developer notes
see also scikits.timeseries (movstat is partially inspired by it)
added 2009-08-29
timeseries moving stats are in c, autocorrelation similar to here
I thought I saw moving stats somewhere in python, maybe not)
TODO
moving statistics
- filters don't handle boundary conditions nicely (correctly ?)
e.g. minimum order filter uses 0 for out of bounds value
-> append and prepend with last resp. first value
- enhance for nd arrays, with axis = 0
Note: Equivalence for 1D signals
>>> np.all(signal.correlate(x,[1,1,1],'valid')==np.correlate(x,[1,1,1]))
True
>>> np.all(ndimage.filters.correlate(x,[1,1,1], origin = -1)[:-3+1]==np.correlate(x,[1,1,1]))
True
# multidimensional, but, it looks like it uses common filter across time series, no VAR
ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)
ndimage.filters.correlate(x,[1,1,1],origin = 1))
ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)[0]==\
ndimage.filters.correlate(x,[1,1,1],origin = 1))
True
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)[0]==ndimage.filters.correlate(x,[1,1,1],origin = 1))
update
2009-09-06: cosmetic changes, rearrangements
'''
from __future__ import print_function
import numpy as np
from scipy import signal
from numpy.testing import assert_array_equal, assert_array_almost_equal
import statsmodels.api as sm
def expandarr(x,k):
#make it work for 2D or nD with axis
kadd = k
if np.ndim(x) == 2:
kadd = (kadd, np.shape(x)[1])
return np.r_[np.ones(kadd)*x[0],x,np.ones(kadd)*x[-1]]
def movorder(x, order = 'med', windsize=3, lag='lagged'):
'''moving order statistics
Parameters
----------
x : array
time series data
order : float or 'med', 'min', 'max'
which order statistic to calculate
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
filtered array
'''
#if windsize is even should it raise ValueError
if lag == 'lagged':
lead = windsize//2
elif lag == 'centered':
lead = 0
elif lag == 'leading':
lead = -windsize//2 +1
else:
raise ValueError
if np.isfinite(order) == True: #if np.isnumber(order):
ord = order # note: ord is a builtin function
elif order == 'med':
ord = (windsize - 1)/2
elif order == 'min':
ord = 0
elif order == 'max':
ord = windsize - 1
else:
raise ValueError
#return signal.order_filter(x,np.ones(windsize),ord)[:-lead]
xext = expandarr(x, windsize)
#np.r_[np.ones(windsize)*x[0],x,np.ones(windsize)*x[-1]]
return signal.order_filter(xext,np.ones(windsize),ord)[windsize-lead:-(windsize+lead)]
def check_movorder():
'''graphical test for movorder'''
import matplotlib.pylab as plt
x = np.arange(1,10)
xo = movorder(x, order='max')
assert_array_equal(xo, x)
x = np.arange(10,1,-1)
xo = movorder(x, order='min')
assert_array_equal(xo, x)
assert_array_equal(movorder(x, order='min', lag='centered')[:-1], x[1:])
tt = np.linspace(0,2*np.pi,15)
x = np.sin(tt) + 1
xo = movorder(x, order='max')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max lagged')
xo = movorder(x, order='max', lag='centered')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max centered')
xo = movorder(x, order='max', lag='leading')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max leading')
# identity filter
##>>> signal.order_filter(x,np.ones(1),0)
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9.])
# median filter
##signal.medfilt(np.sin(x), kernel_size=3)
##>>> plt.figure()
##<matplotlib.figure.Figure object at 0x069BBB50>
##>>> x=np.linspace(0,3,100);plt.plot(x,np.sin(x),x,signal.medfilt(np.sin(x), kernel_size=3))
# remove old version
##def movmeanvar(x, windowsize=3, valid='same'):
## '''
## this should also work along axis or at least for columns
## '''
## n = x.shape[0]
## x = expandarr(x, windowsize - 1)
## takeslice = slice(windowsize-1, n + windowsize-1)
## avgkern = (np.ones(windowsize)/float(windowsize))
## m = np.correlate(x, avgkern, 'same')#[takeslice]
## print(m.shape)
## print(x.shape)
## xm = x - m
## v = np.correlate(x*x, avgkern, 'same') - m**2
## v1 = np.correlate(xm*xm, avgkern, valid) #not correct for var of window
###>>> np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')-np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')**2
## return m[takeslice], v[takeslice], v1
def movmean(x, windowsize=3, lag='lagged'):
'''moving window mean
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
moving mean, with same shape as x
Notes
-----
for leading and lagging the data array x is extended by the closest value of the array
'''
return movmoment(x, 1, windowsize=windowsize, lag=lag)
def movvar(x, windowsize=3, lag='lagged'):
'''moving window variance
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
moving variance, with same shape as x
'''
m1 = movmoment(x, 1, windowsize=windowsize, lag=lag)
m2 = movmoment(x, 2, windowsize=windowsize, lag=lag)
return m2 - m1*m1
def movmoment(x, k, windowsize=3, lag='lagged'):
'''non-central moment
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
k-th moving non-central moment, with same shape as x
Notes
-----
If data x is 2d, then moving moment is calculated for each
column.
'''
windsize = windowsize
#if windsize is even should it raise ValueError
if lag == 'lagged':
#lead = -0 + windsize #windsize//2
lead = -0# + (windsize-1) + windsize//2
sl = slice((windsize-1) or None, -2*(windsize-1) or None)
elif lag == 'centered':
lead = -windsize//2 #0#-1 #+ #(windsize-1)
sl = slice((windsize-1)+windsize//2 or None, -(windsize-1)-windsize//2 or None)
elif lag == 'leading':
#lead = -windsize +1#+1 #+ (windsize-1)#//2 +1
lead = -windsize +2 #-windsize//2 +1
sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
else:
raise ValueError
avgkern = (np.ones(windowsize)/float(windowsize))
xext = expandarr(x, windsize-1)
#Note: expandarr increases the array size by 2*(windsize-1)
#sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
print(sl)
if xext.ndim == 1:
return np.correlate(xext**k, avgkern, 'full')[sl]
#return np.correlate(xext**k, avgkern, 'same')[windsize-lead:-(windsize+lead)]
else:
print(xext.shape)
print(avgkern[:,None].shape)
# try first with 2d along columns, possibly ndim with axis
return signal.correlate(xext**k, avgkern[:,None], 'full')[sl,:]
#x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,[1],'full')
#x=0.5**np.arange(3);np.correlate(x,x,'same')
##>>> x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
##
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> xo
##xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> x=np.ones(10);xo=x-x.mean();a=np.correlate(xo,xo,'full')
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> d
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 9.,
## 8., 7., 6., 5., 4., 3., 2., 1.])
##def ccovf():
## pass
## #x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
__all__ = ['movorder', 'movmean', 'movvar', 'movmoment']
if __name__ == '__main__':
print('\ncheckin moving mean and variance')
nobs = 10
x = np.arange(nobs)
ws = 3
ave = np.array([ 0., 1/3., 1., 2., 3., 4., 5., 6., 7., 8.,
26/3., 9])
va = np.array([[ 0. , 0. ],
[ 0.22222222, 0.88888889],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.22222222, 0.88888889],
[ 0. , 0. ]])
ave2d = np.c_[ave, 2*ave]
print(movmean(x, windowsize=ws, lag='lagged'))
print(movvar(x, windowsize=ws, lag='lagged'))
print([np.var(x[i-ws:i]) for i in range(ws, nobs)])
m1 = movmoment(x, 1, windowsize=3, lag='lagged')
m2 = movmoment(x, 2, windowsize=3, lag='lagged')
print(m1)
print(m2)
print(m2 - m1*m1)
# this implicitly also tests moment
assert_array_almost_equal(va[ws-1:,0],
movvar(x, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,0],
movvar(x, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,0],
movvar(x, windowsize=ws, lag='lagged'))
print('\nchecking moving moment for 2d (columns only)')
x2d = np.c_[x, 2*x]
print(movmoment(x2d, 1, windowsize=3, lag='centered'))
print(movmean(x2d, windowsize=ws, lag='lagged'))
print(movvar(x2d, windowsize=ws, lag='lagged'))
assert_array_almost_equal(va[ws-1:,:],
movvar(x2d, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,:],
movvar(x2d, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,:],
movvar(x2d, windowsize=ws, lag='lagged'))
assert_array_almost_equal(ave2d[ws-1:],
movmoment(x2d, 1, windowsize=3, lag='leading'))
assert_array_almost_equal(ave2d[ws//2:-ws//2+1],
movmoment(x2d, 1, windowsize=3, lag='centered'))
assert_array_almost_equal(ave2d[:-ws+1],
movmean(x2d, windowsize=ws, lag='lagged'))
from scipy import ndimage
print(ndimage.filters.correlate1d(x2d, np.array([1,1,1])/3., axis=0))
#regression test check
xg = np.array([ 0. , 0.1, 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6,
4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5,
13.5, 14.5, 15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5,
22.5, 23.5, 24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5,
31.5, 32.5, 33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5,
40.5, 41.5, 42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5,
49.5, 50.5, 51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5,
58.5, 59.5, 60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5,
67.5, 68.5, 69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5,
76.5, 77.5, 78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5,
85.5, 86.5, 87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5,
94.5])
assert_array_almost_equal(xg, movmean(np.arange(100), 10,'lagged'))
xd = np.array([ 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6, 4.5, 5.5,
6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5, 13.5, 14.5,
15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5, 22.5, 23.5,
24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5, 31.5, 32.5,
33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5, 40.5, 41.5,
42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5, 49.5, 50.5,
51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5, 58.5, 59.5,
60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5, 67.5, 68.5,
69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5, 76.5, 77.5,
78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5, 85.5, 86.5,
87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5, 94.5, 95.4,
96.2, 96.9, 97.5, 98. , 98.4, 98.7, 98.9, 99. ])
assert_array_almost_equal(xd, movmean(np.arange(100), 10,'leading'))
xc = np.array([ 1.36363636, 1.90909091, 2.54545455, 3.27272727,
4.09090909, 5. , 6. , 7. ,
8. , 9. , 10. , 11. ,
12. , 13. , 14. , 15. ,
16. , 17. , 18. , 19. ,
20. , 21. , 22. , 23. ,
24. , 25. , 26. , 27. ,
28. , 29. , 30. , 31. ,
32. , 33. , 34. , 35. ,
36. , 37. , 38. , 39. ,
40. , 41. , 42. , 43. ,
44. , 45. , 46. , 47. ,
48. , 49. , 50. , 51. ,
52. , 53. , 54. , 55. ,
56. , 57. , 58. , 59. ,
60. , 61. , 62. , 63. ,
64. , 65. , 66. , 67. ,
68. , 69. , 70. , 71. ,
72. , 73. , 74. , 75. ,
76. , 77. , 78. , 79. ,
80. , 81. , 82. , 83. ,
84. , 85. , 86. , 87. ,
88. , 89. , 90. , 91. ,
92. , 93. , 94. , 94.90909091,
95.72727273, 96.45454545, 97.09090909, 97.63636364])
assert_array_almost_equal(xc, movmean(np.arange(100), 11,'centered'))
| bsd-3-clause |
intermezzo-fr/hillary-clinton-emails | scripts/outputCsvs.py | 5 | 3577 | import numpy as np
import pandas as pd
def normalize_address(raw_address):
for c in ["'", ",", "°", "•", "`", '"', "‘", "-"]:
raw_address = raw_address.replace(c, "")
raw_address = raw_address.lower()
if "<" in raw_address:
prefix = raw_address[:raw_address.index("<")].strip()
if prefix:
return prefix
return raw_address.strip()
emails = pd.read_csv("input/emailsNoId.csv")
emails["MetadataTo"].replace(np.nan, "", inplace=True)
emails["ExtractedTo"].replace(np.nan, "", inplace=True)
emails["MetadataFrom"].replace(np.nan, "", inplace=True)
emails["ExtractedFrom"].replace(np.nan, "", inplace=True)
emails.sort(columns=["DocNumber"], inplace=True)
emails.insert(0, "Id", list(range(1, len(emails)+1)))
emails.insert(5, "SenderPersonId", np.nan)
alias_person = pd.read_csv("versionedInput/alias_person.csv")
alias_person["AliasName"] = [normalize_address(alias) for alias in alias_person["AliasName"]]
persons = pd.DataFrame(columns=["Id", "Name"])
aliases = pd.DataFrame(columns=["Id", "Alias", "PersonId"])
email_receivers = pd.DataFrame(columns=["Id", "EmailId", "PersonId"]).astype(int)
def add_alias(aliases, persons, alias_name, person_name):
if len(np.where(aliases["Alias"]==alias_name)[0])>0:
return
locs = np.where(persons["Name"]==person_name)[0]
if len(locs)>0:
person_id = persons["Id"][locs[0]]
else:
person_id = len(persons)+1
persons.loc[person_id-1] = [person_id, person_name]
alias_id = len(aliases)+1
aliases.loc[alias_id-1] = [alias_id, alias_name.lower(), person_id]
for (i, alias_person) in alias_person.iterrows():
add_alias(aliases, persons, alias_person["AliasName"], alias_person["PersonName"])
log = open("working/outputCsvsLog.txt", "w")
for (i, email) in emails.iterrows():
from_person_id = None
from_address = normalize_address(email["MetadataFrom"].split(";")[0])
if from_address != "":
locs = np.where(aliases["Alias"]==from_address)[0]
if len(locs)==0:
add_alias(aliases, persons, from_address, from_address)
log.write("Added From Person: %s\n" % from_address)
loc = np.where(aliases["Alias"]==from_address)[0][0]
from_person_id = aliases["PersonId"][loc]
from_person_name = persons["Name"][from_person_id-1]
emails.loc[i, "SenderPersonId"] = from_person_id
if email["ExtractedFrom"] != "":
add_alias(aliases, persons, normalize_address(email["ExtractedFrom"]), from_person_name)
to_addresses = email["MetadataTo"].split(";") + email["ExtractedTo"].split(";")
to_addresses = sorted(list(set([normalize_address(x) for x in to_addresses])))
if "" in to_addresses:
to_addresses.remove("")
for to_address in to_addresses:
locs = np.where(aliases["Alias"]==to_address)[0]
if len(locs)==0:
add_alias(aliases, persons, to_address, to_address)
log.write("Added To Person: %s\n" % to_address)
loc = np.where(aliases["Alias"]==to_address)[0][0]
# don't add a receiver if they were also the sender
if from_person_id != aliases["PersonId"][loc]:
email_receivers.loc[len(email_receivers)] = [len(email_receivers)+1, email["Id"], aliases["PersonId"][loc]]
persons.to_csv("output/Persons.csv", index=False)
aliases.to_csv("output/Aliases.csv", index=False)
emails.to_csv("output/Emails.csv", index=False, float_format="%0.0f")
email_receivers.to_csv("output/EmailReceivers.csv", index=False, float_format="%0.0f")
log.close() | mit |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/tests/__init__.py | 17 | 2578 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import difflib
import os
from matplotlib import rcParams, rcdefaults, use
_multiprocess_can_split_ = True
# Check that the test directories exist
if not os.path.exists(os.path.join(
os.path.dirname(__file__), 'baseline_images')):
raise IOError(
'The baseline image directory does not exist. '
'This is most likely because the test data is not installed. '
'You may need to install matplotlib from source to get the '
'test data.')
def setup():
# The baseline images are created in this locale, so we should use
# it during all of the tests.
import locale
import warnings
from matplotlib.backends import backend_agg, backend_pdf, backend_svg
try:
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
except locale.Error:
warnings.warn(
"Could not set locale to English/United States. "
"Some date-related tests may fail")
use('Agg', warn=False) # use Agg backend for these tests
# These settings *must* be hardcoded for running the comparison
# tests and are not necessarily the default values as specified in
# rcsetup.py
rcdefaults() # Start with all defaults
rcParams['font.family'] = 'Bitstream Vera Sans'
rcParams['text.hinting'] = False
rcParams['text.hinting_factor'] = 8
# Clear the font caches. Otherwise, the hinting mode can travel
# from one test to another.
backend_agg.RendererAgg._fontd.clear()
backend_pdf.RendererPdf.truetype_font_cache.clear()
backend_svg.RendererSVG.fontd.clear()
def assert_str_equal(reference_str, test_str,
format_str=('String {str1} and {str2} do not '
'match:\n{differences}')):
"""
Assert the two strings are equal. If not, fail and print their
diffs using difflib.
"""
if reference_str != test_str:
diff = difflib.unified_diff(reference_str.splitlines(1),
test_str.splitlines(1),
'Reference', 'Test result',
'', '', 0)
raise ValueError(format_str.format(str1=reference_str,
str2=test_str,
differences=''.join(diff)))
| mit |
andreugrimalt/Theano-Tutorials | 5_convolutional_net.py | 1 | 3899 | import theano
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import numpy as np
from load import mnist
from theano.tensor.nnet.conv import conv2d
from theano.tensor.signal.downsample import max_pool_2d
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import cPickle
srng = RandomStreams()
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.01))
def rectify(X):
return T.maximum(X, 0.)
def softmax(X):
e_x = T.exp(X - X.max(axis=1).dimshuffle(0, 'x'))
return e_x / e_x.sum(axis=1).dimshuffle(0, 'x')
def dropout(X, p=0.):
if p > 0:
retain_prob = 1 - p
X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
X /= retain_prob
return X
def RMSprop(cost, params, lr=0.001, rho=0.9, epsilon=1e-6):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(p.get_value() * 0.)
acc_new = rho * acc + (1 - rho) * g ** 2
gradient_scaling = T.sqrt(acc_new + epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - lr * g))
return updates
def model(X, w, w2, w3, w4, p_drop_conv, p_drop_hidden):
l1a = rectify(conv2d(X, w, border_mode='full'))
l1 = max_pool_2d(l1a, (2, 2))
l1 = dropout(l1, p_drop_conv)
l2a = rectify(conv2d(l1, w2))
l2 = max_pool_2d(l2a, (2, 2))
l2 = dropout(l2, p_drop_conv)
l3a = rectify(conv2d(l2, w3))
l3b = max_pool_2d(l3a, (2, 2))
l3 = T.flatten(l3b, outdim=2)
l3 = dropout(l3, p_drop_conv)
l4 = rectify(T.dot(l3, w4))
l4 = dropout(l4, p_drop_hidden)
pyx = softmax(T.dot(l4, w_o))
return l1, l2, l3, l4, pyx
trX, teX, trY, teY = mnist(onehot=True)
trX = trX.reshape(-1, 1, 28, 28)
teX = teX.reshape(-1, 1, 28, 28)
X = T.ftensor4()
Y = T.fmatrix()
w = init_weights((32, 1, 3, 3))
w2 = init_weights((64, 32, 3, 3))
w3 = init_weights((128, 64, 3, 3))
w4 = init_weights((128 * 3 * 3, 625))
w_o = init_weights((625, 10))
noise_l1, noise_l2, noise_l3, noise_l4, noise_py_x = model(X, w, w2, w3, w4, 0.2, 0.5)
l1, l2, l3, l4, py_x = model(X, w, w2, w3, w4, 0., 0.)
y_x = T.argmax(py_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(noise_py_x, Y))
params = [w, w2, w3, w4, w_o]
updates = RMSprop(cost, params, lr=0.001)
train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)
predict = theano.function(inputs=[X], outputs=y_x, allow_input_downcast=True)
for i in range(50):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)):
cost = train(trX[start:end], trY[start:end])
print np.mean(np.argmax(teY, axis=1) == predict(teX))
f = file('objects.save', 'wb')
for obj in [l1, l2, l3, py_x]:
cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
def fillim(c):
im = w[0:784,c].eval()*50
im.shape = 28,28
return im
def plotWights():
im = w2[0,c,0:3,0:3].eval()*50
im.shape = 3,3
fig = plt.figure(1, (5., 5.))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols = (2, 16), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
for c in range(32):
grid[c].imshow(fillim(c),cmap=plt.cm.gray)
plt.show()
#todo: refactor
def plotConvImage():
input=floatX(trX[0:784])
out=conv2d(input, w, border_mode='full')
out=out[0,0,0:28,0:28].eval()
fig = plt.figure(1, (5., 5.))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols = (2, 16), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
grid[0].imshow(out,cmap=plt.cm.gray)
plt.show()
| mit |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_examples/pylab_examples/contourf_log.py | 9 | 1350 | '''
Demonstrate use of a log color scale in contourf
'''
from matplotlib import pyplot as P
import numpy as np
from numpy import ma
from matplotlib import colors, ticker, cm
from matplotlib.mlab import bivariate_normal
N = 100
x = np.linspace(-3.0, 3.0, N)
y = np.linspace(-2.0, 2.0, N)
X, Y = np.meshgrid(x, y)
# A low hump with a spike coming out of the top right.
# Needs to have z/colour axis on a log scale so we see both hump and spike.
# linear scale only shows the spike.
z = (bivariate_normal(X, Y, 0.1, 0.2, 1.0, 1.0)
+ 0.1 * bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0))
# Put in some negative values (lower left corner) to cause trouble with logs:
z[:5, :5] = -1
# The following is not strictly essential, but it will eliminate
# a warning. Comment it out to see the warning.
z = ma.masked_where(z<= 0, z)
# Automatic selection of levels works; setting the
# log locator tells contourf to use a log scale:
cs = P.contourf(X, Y, z, locator=ticker.LogLocator(), cmap=cm.PuBu_r)
# Alternatively, you can manually set the levels
# and the norm:
#lev_exp = np.arange(np.floor(np.log10(z.min())-1),
# np.ceil(np.log10(z.max())+1))
#levs = np.power(10, lev_exp)
#cs = P.contourf(X, Y, z, levs, norm=colors.LogNorm())
#The 'extend' kwarg does not work yet with a log scale.
cbar = P.colorbar()
P.show()
| apache-2.0 |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/matplotlib/tests/test_patheffects.py | 10 | 5445 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
try:
# mock in python 3.3+
from unittest import mock
except ImportError:
import mock
from nose.tools import assert_equal
@image_comparison(baseline_images=['patheffect1'], remove_text=True)
def test_patheffect1():
ax1 = plt.subplot(111)
ax1.imshow([[1, 2], [2, 3]])
txt = ax1.annotate("test", (1., 1.), (0., 0),
arrowprops=dict(arrowstyle="->",
connectionstyle="angle3", lw=2),
size=20, ha="center",
path_effects=[path_effects.withStroke(linewidth=3,
foreground="w")])
txt.arrow_patch.set_path_effects([path_effects.Stroke(linewidth=5,
foreground="w"),
path_effects.Normal()])
ax1.grid(True, linestyle="-")
pe = [path_effects.withStroke(linewidth=3, foreground="w")]
for l in ax1.get_xgridlines() + ax1.get_ygridlines():
l.set_path_effects(pe)
@image_comparison(baseline_images=['patheffect2'], remove_text=True)
def test_patheffect2():
ax2 = plt.subplot(111)
arr = np.arange(25).reshape((5, 5))
ax2.imshow(arr)
cntr = ax2.contour(arr, colors="k")
plt.setp(cntr.collections,
path_effects=[path_effects.withStroke(linewidth=3,
foreground="w")])
clbls = ax2.clabel(cntr, fmt="%2.0f", use_clabeltext=True)
plt.setp(clbls,
path_effects=[path_effects.withStroke(linewidth=3,
foreground="w")])
@image_comparison(baseline_images=['patheffect3'])
def test_patheffect3():
p1, = plt.plot([1, 3, 5, 4, 3], 'o-b', lw=4)
p1.set_path_effects([path_effects.SimpleLineShadow(),
path_effects.Normal()])
plt.title(r'testing$^{123}$',
path_effects=[path_effects.withStroke(linewidth=1, foreground="r")])
leg = plt.legend([p1], [r'Line 1$^2$'], fancybox=True, loc=2)
leg.legendPatch.set_path_effects([path_effects.withSimplePatchShadow()])
text = plt.text(2, 3, 'Drop test', color='white',
bbox={'boxstyle': 'circle,pad=0.1', 'color': 'red'})
pe = [path_effects.Stroke(linewidth=3.75, foreground='k'),
path_effects.withSimplePatchShadow((6, -3), shadow_rgbFace='blue')]
text.set_path_effects(pe)
text.get_bbox_patch().set_path_effects(pe)
pe = [path_effects.PathPatchEffect(offset=(4, -4), hatch='xxxx',
facecolor='gray'),
path_effects.PathPatchEffect(edgecolor='white', facecolor='black',
lw=1.1)]
t = plt.gcf().text(0.02, 0.1, 'Hatch shadow', fontsize=75, weight=1000,
va='center')
t.set_path_effects(pe)
@cleanup
def test_PathEffect_get_proxy():
pe = path_effects.AbstractPathEffect()
fig = plt.gcf()
renderer = fig.canvas.get_renderer()
with mock.patch('matplotlib.cbook.deprecated') as dep:
proxy_renderer = pe.get_proxy_renderer(renderer)
assert_equal(proxy_renderer._renderer, renderer)
assert_equal(proxy_renderer._path_effects, [pe])
dep.assert_called()
@cleanup
def test_PathEffect_points_to_pixels():
fig = plt.figure(dpi=150)
p1, = plt.plot(range(10))
p1.set_path_effects([path_effects.SimpleLineShadow(),
path_effects.Normal()])
renderer = fig.canvas.get_renderer()
pe_renderer = path_effects.SimpleLineShadow().get_proxy_renderer(renderer)
assert isinstance(pe_renderer, path_effects.PathEffectRenderer), (
'Expected a PathEffectRendere instance, got '
'a {} instance.'.format(type(pe_renderer)))
# Confirm that using a path effects renderer maintains point sizes
# appropriately. Otherwise rendered font would be the wrong size.
assert_equal(renderer.points_to_pixels(15),
pe_renderer.points_to_pixels(15))
def test_SimplePatchShadow_offset_xy():
with mock.patch('matplotlib.cbook.deprecated') as dep:
pe = path_effects.SimplePatchShadow(offset_xy=(4, 5))
assert_equal(pe._offset, (4, 5))
dep.assert_called()
@image_comparison(baseline_images=['collection'])
def test_collection():
x, y = np.meshgrid(np.linspace(0, 10, 150), np.linspace(-5, 5, 100))
data = np.sin(x) + np.cos(y)
cs = plt.contour(data)
pe = [path_effects.PathPatchEffect(edgecolor='black', facecolor='none',
linewidth=12),
path_effects.Stroke(linewidth=5)]
for collection in cs.collections:
collection.set_path_effects(pe)
for text in plt.clabel(cs, colors='white'):
text.set_path_effects([path_effects.withStroke(foreground='k',
linewidth=3)])
text.set_bbox({'boxstyle': 'sawtooth', 'facecolor': 'none',
'edgecolor': 'blue'})
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/feature_extraction/text.py | 1 | 49725 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..externals import six
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _check_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Parameters
----------
input: string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents: {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer: string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor: callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer: callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range: tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words: string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
lowercase: boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern: string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, optional, (2 ** 20) by default
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, optional
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', charset=None, encoding='utf-8',
decode_error='strict', charset_error=None,
strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
if charset is not None:
warnings.warn("The charset parameter is deprecated as of version "
"0.14 and will be removed in 0.16. Use encoding "
"instead.",
DeprecationWarning)
self.encoding = charset
if charset_error is not None:
warnings.warn("The charset_error parameter is deprecated as of "
"version 0.14 and will be removed in 0.16. Use "
"decode_error instead.",
DeprecationWarning)
self.decode_error = charset_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
`vocabulary_` : dict
A mapping of terms to feature indices.
`stop_words_` : set
Terms that were ignored because
they occurred in either too many
(`max_df`) or in too few (`min_df`) documents.
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8', charset=None,
decode_error='strict', charset_error=None,
strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
if charset is not None:
warnings.warn("The charset parameter is deprecated as of version "
"0.14 and will be removed in 0.16. Use encoding "
"instead.",
DeprecationWarning)
self.encoding = charset
if charset_error is not None:
warnings.warn("The charset_error parameter is deprecated as of "
"version 0.14 and will be removed in 0.16. Use "
"decode_error instead.",
DeprecationWarning)
self.decode_error = charset_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
# some Python/Scipy versions won't accept an array.array:
if j_indices:
j_indices = np.frombuffer(j_indices, dtype=np.intc)
else:
j_indices = np.array([], dtype=np.int32)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._check_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._check_vocabulary()
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log1p instead of log makes sure terms with zero idf don't get
# suppressed entirely
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
if not hasattr(self, "_idf_diag"):
raise ValueError("idf vector not fitted")
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a term frequency
strictly higher than the given threshold (corpus specific stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a term frequency
strictly lower than the given threshold.
This value is also called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, False by default.
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
``idf_`` : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
"""
def __init__(self, input='content', encoding='utf-8', charset=None,
decode_error='strict', charset_error=None,
strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, charset=charset, charset_error=charset_error,
encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| apache-2.0 |
raghavrv/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 12 | 20126 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_lsh_forest_deprecation():
assert_warns_message(DeprecationWarning,
"LSHForest has poor performance and has been "
"deprecated in 0.19. It will be removed "
"in version 0.21.", LSHForest)
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
print('accuracies:', accuracies)
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=0, n_candidates=n_points, random_state=42).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = ignore_warnings(LSHForest, category=DeprecationWarning)(
radius=1, random_state=0).fit(X1)
forest_dense = ignore_warnings(LSHForest, category=DeprecationWarning)(
radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
parrt/lolviz | prince_dtree.py | 1 | 12296 | import IPython, graphviz, re
from io import StringIO
from IPython.display import Image
import numpy as np
import pandas as pd
import math
from sklearn import tree
from sklearn.datasets import load_boston, load_iris
from collections import defaultdict
import string
import re
YELLOW = "#fefecd" # "#fbfbd0" # "#FBFEB0"
BLUE = "#D9E6F5"
GREEN = "#cfe2d4"
color_blind_friendly_colors = {
'redorange': '#f46d43',
'orange': '#fdae61', 'yellow': '#fee090', 'sky': '#e0f3f8',
'babyblue': '#abd9e9', 'lightblue': '#74add1', 'blue': '#4575b4'
}
color_blind_friendly_colors = [
None, # 0 classes
None, # 1 class
[YELLOW,BLUE], # 2 classes
[YELLOW,BLUE,GREEN], # 3 classes
[YELLOW,BLUE,GREEN,'#a1dab4'], # 4
[YELLOW,BLUE,GREEN,'#a1dab4','#41b6c4'], # 5
[YELLOW,'#c7e9b4','#7fcdbb','#41b6c4','#2c7fb8','#253494'], # 6
[YELLOW,'#c7e9b4','#7fcdbb','#41b6c4','#1d91c0','#225ea8','#0c2c84'], # 7
[YELLOW,'#edf8b1','#c7e9b4','#7fcdbb','#41b6c4','#1d91c0','#225ea8','#0c2c84'], # 8
[YELLOW,'#ece7f2','#d0d1e6','#a6bddb','#74a9cf','#3690c0','#0570b0','#045a8d','#023858'], # 9
[YELLOW,'#e0f3f8','#313695','#fee090','#4575b4','#fdae61','#abd9e9','#74add1','#d73027','#f46d43'] # 10
]
for x in color_blind_friendly_colors[2:]:
print(x)
max_class_colors = len(color_blind_friendly_colors)-1
def tree_traverse(n_nodes, children_left, children_right):
"""
Derives code from http://scikit-learn.org/stable/auto_examples/tree/plot_unveil_tree_structure.html
to walk tree
Traversing tree structure to compute compute various properties such
as the depth of each node and whether or not it is a leaf.
Input -
n_nodes: number of nodes in the tree
children_left: array of length n_nodes. left children node indexes
children_right: array of length n_nodes. right children node indexes
:return:
is_leaf: array of length n_nodes with boolean whether node i is leaf or not,
node_depth: depth of each node from root to node. root is depth 0
"""
node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
is_leaf = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop() # (0,-1)
node_depth[node_id] = parent_depth + 1
# If we have a non-leaf node
if children_left[node_id] != children_right[node_id]:
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaf[node_id] = True
return is_leaf, node_depth
# def dectree_max_depth(tree):
# n_nodes = tree.node_count
# children_left = tree.children_left
# children_right = tree.children_right
#
# def walk(node_id):
# if (children_left[node_id] != children_right[node_id]):
# left_max = 1 + walk(children_left[node_id])
# right_max = 1 + walk(children_right[node_id])
# # if node_id<100: print(f"node {node_id}: {left_max}, {right_max}")
# return max(left_max, right_max)
# else: # leaf
# return 1
#
# root_node_id = 0
# return walk(root_node_id)
def dtreeviz(tree, X, y, precision=1, classnames=None, orientation="LR"):
def get_feature(i):
name = X.columns[feature[i]]
node_name = ''.join(c for c in name if c not in string.punctuation)+str(i)
node_name = re.sub("["+string.punctuation+string.whitespace+"]", '_', node_name)
return name, node_name
def round(v,ndigits=precision):
return format(v, '.' + str(ndigits) + 'f')
def dec_node_box(name, node_name, split):
html = """<table BORDER="0" CELLPADDING="0" CELLBORDER="0" CELLSPACING="0">
<tr>
<td colspan="3" align="center" cellspacing="0" cellpadding="0" bgcolor="#fefecd" border="1" sides="b"><font face="Helvetica" color="#444443" point-size="12">{name}</font></td>
</tr>
<tr>
<td colspan="3" cellpadding="1" border="0" bgcolor="#fefecd"></td>
</tr>
<tr>
<td cellspacing="0" cellpadding="0" bgcolor="#fefecd" border="1" sides="r" align="right"><font face="Helvetica" color="#444443" point-size="11">split</font></td>
<td cellspacing="0" cellpadding="0" border="0"></td>
<td cellspacing="0" cellpadding="0" bgcolor="#fefecd" align="left"><font face="Helvetica" color="#444443" point-size="11">{split}</font></td>
</tr>
</table>""".format(name=name, split=split)
return '{node_name} [shape=box label=<{label}>]\n'.format(label=html, node_name=node_name)
def dec_node(name, node_name, split):
html = """<font face="Helvetica" color="#444443" point-size="12">{name}<br/>@{split}</font>""".format(name=name, split=split)
return '{node_name} [shape=none label=<{label}>]\n'.format(label=html, node_name=node_name)
def prop_size(n):
# map to 0.03 to .35
margin_range = (0.03, 0.35)
if sample_count_range>0:
zero_to_one = (n - min_samples) / sample_count_range
return zero_to_one * (margin_range[1] - margin_range[0]) + margin_range[0]
else:
return margin_range[0]
# parsing the tree structure
n_nodes = tree.node_count # total nodes in the tree
children_left = tree.children_left # left children node index
children_right = tree.children_right # right children node index
feature = tree.feature # feature index at splits (-2 means leaf)
threshold = tree.threshold # split threshold values at given feature
is_leaf, node_depth = tree_traverse(n_nodes, children_left, children_right)
ranksep = ".22"
if orientation=="TD":
ranksep = ".35"
st = '\ndigraph G {splines=line;\n \
nodesep=0.1;\n \
ranksep=%s;\n \
rankdir=%s;\n \
node [margin="0.03" penwidth="0.5" width=.1, height=.1];\n \
edge [arrowsize=.4 penwidth="0.5"]\n' % (ranksep,orientation)
# Define decision nodes (non leaf nodes) as feature names
for i in range(n_nodes):
if not is_leaf[i]: # non leaf nodes
name, node_name = get_feature(i)
# st += dec_node_box(name, node_name, split=round(threshold[i]))
st += dec_node(name, node_name, split=round(threshold[i]))
# non leaf edges with > and <=
for i in range(n_nodes):
if not is_leaf[i]:
name, node_name = get_feature(i)
left, left_node_name = get_feature(children_left[i])
if is_leaf[children_left[i]]:
left = left_node_name ='leaf%d' % children_left[i]
right_name, right_node_name = get_feature(children_right[i])
if is_leaf[children_right[i]]:
right = right_node_name ='leaf%d' % children_right[i]
split = round(threshold[i])
left_html = '<font face="Helvetica" color="#444443" point-size="11"><</font>'
right_html = '<font face="Helvetica" color="#444443" point-size="11">≥</font>'
if orientation=="TD":
ldistance = ".9"
rdistance = ".9"
langle = "-28"
rangle = "28"
else:
ldistance = "1.3" # not used in LR mode; just label not taillable.
rdistance = "1.3"
langle = "-90"
rangle = "90"
blankedge = 'label=<<font face="Helvetica" color="#444443" point-size="1"> </font>>'
st += '{name} -> {left} [{blankedge} labelangle="{angle}" labeldistance="{ldistance}" {tail}label=<{label}>]\n'\
.format(label="",#left_html,
angle=langle,
ldistance=ldistance,
name=node_name,
blankedge = "",#blankedge,
tail="tail",#""tail" if orientation=="TD" else "",
left=left_node_name)
st += '{name} -> {right} [{blankedge} labelangle="{angle}" labeldistance="{rdistance}" {tail}label=<{label}>]\n' \
.format(label="",#right_html,
angle=rangle,
rdistance=rdistance,
name=node_name,
blankedge="",#blankedge,
tail="tail",# "tail" if orientation == "TD" else "",
right=right_node_name)
# find range of leaf sample count
leaf_sample_counts = [tree.n_node_samples[i] for i in range(n_nodes) if is_leaf[i]]
min_samples = min(leaf_sample_counts)
max_samples = max(leaf_sample_counts)
sample_count_range = max_samples - min_samples
print(leaf_sample_counts)
print("range is ", sample_count_range)
# is_classifier = hasattr(tree, 'n_classes')
is_classifier = tree.n_classes > 1
color_values = list(reversed(color_blind_friendly_colors))
n_classes = tree.n_classes[0]
color_values = color_blind_friendly_colors[n_classes]
# color_values = [c+"EF" for c in color_values] # add alpha
# Define leaf nodes (after edges so >= edges shown properly)
for i in range(n_nodes):
if is_leaf[i]:
node_samples = tree.n_node_samples[i]
impurity = tree.impurity
if is_classifier:
counts = np.array(tree.value[i][0])
predicted_class = np.argmax(counts)
predicted = predicted_class
if classnames:
predicted = classnames[predicted_class]
ratios = counts / node_samples # convert counts to ratios totalling 1.0
ratios = [round(r,3) for r in ratios]
color_spec = ["{c};{r}".format(c=color_values[i],r=r) for i,r in enumerate(ratios)]
color_spec = ':'.join(color_spec)
if n_classes > max_class_colors:
color_spec = YELLOW
html = """<font face="Helvetica" color="black" point-size="12">{predicted}<br/> </font>""".format(predicted=predicted)
margin = prop_size(node_samples)
st += 'leaf{i} [height=0 width="0.4" margin="{margin}" style={style} fillcolor="{colors}" shape=circle label=<{label}>]\n' \
.format(i=i, label=html, name=node_name, colors=color_spec, margin=margin,
style='wedged' if n_classes<=max_class_colors else 'filled')
else:
value = tree.value[i][0]
html = """<font face="Helvetica" color="#444443" point-size="11">"""+round(value[0])+"""</font>"""
margin = prop_size(node_samples)
st += 'leaf{i} [height=0 width="0.4" margin="{margin}" style=filled fillcolor="{color}" shape=circle label=<{label}>]\n'\
.format(i=i, label=html, name=node_name, color=YELLOW, margin=margin)
# end of string
st = st+'}'
return st
def boston():
regr = tree.DecisionTreeRegressor(max_depth=4, random_state=666)
boston = load_boston()
print(boston.data.shape, boston.target.shape)
data = pd.DataFrame(boston.data)
data.columns =boston.feature_names
regr = regr.fit(data, boston.target)
# st = dectreeviz(regr.tree_, data, boston.target)
st = dtreeviz(regr.tree_, data, boston.target, orientation="TD")
with open("/tmp/t3.dot", "w") as f:
f.write(st)
return st
def iris():
clf = tree.DecisionTreeClassifier(max_depth=4, random_state=666)
iris = load_iris()
print(iris.data.shape, iris.target.shape)
data = pd.DataFrame(iris.data)
data.columns = iris.feature_names
clf = clf.fit(data, iris.target)
# st = dectreeviz(clf.tree_, data, boston.target)
st = dtreeviz(clf.tree_, data, iris.target, orientation="TD"
, classnames=["setosa", "versicolor", "virginica"]
)
with open("/tmp/t3.dot", "w") as f:
f.write(st)
print(clf.tree_.value)
return st
# st = iris()
st = boston()
print(st)
graphviz.Source(st).view()
| bsd-3-clause |
priseborough/InertialNav | code/plot_states.py | 6 | 2287 | #!/bin/python
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import numpy as np
import math
# State vector:
# 0-3: quaternions (q0, q1, q2, q3)
# 4-6: Velocity - m/sec (North, East, Down)
# 7-9: Position - m (North, East, Down)
# 10-12: Delta Angle bias - rad (X,Y,Z)
# 13: Accel offset
# 14-15: Wind Vector - m/sec (North,East)
# 16-18: Earth Magnetic Field Vector - milligauss (North, East, Down)
# 19-21: Body Magnetic Field Vector - milligauss (X,Y,Z)
# 22: Terrain
try:
data = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,
skip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',
'Bx', 'By', 'Bz', 'Aoff', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd', 'dist'])
except ValueError:
try:
data = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,
skip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',
'Bx', 'By', 'Bz', 'Aoff', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd'])
except ValueError:
data = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,
skip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',
'Bx', 'By', 'Bz', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd'])
fig = plt.figure()
ax1 = fig.add_subplot(611)
ax1.set_title("Offsets")
ax1.set_ylabel('X gyro offset')
ax1.set_ylim([-0.0025,0.0025])
ax1.plot(data['time'], data['Bx'], color='r', label='Pn')
ax2 = fig.add_subplot(612)
ax2.set_ylabel('Y gyro offset')
ax2.set_ylim([-0.0025,0.0025])
ax2.plot(data['time'], data['By'], color='g', label='Pe')
ax3 = fig.add_subplot(613)
ax3.set_ylabel('Z gyro offset')
ax3.set_ylim([-0.0025,0.0025])
ax3.plot(data['time'], data['Bz'], color='b', label='Pd')
ax4 = fig.add_subplot(614)
ax4.set_ylabel('Mag offset N')
ax4.set_ylim([-0.4,0.4])
ax4.plot(data['time'], data['Mbn'], color='b', label='Pd')
ax5 = fig.add_subplot(615)
ax5.set_ylabel('Mag offset E')
ax5.set_ylim([-0.4,0.4])
ax5.plot(data['time'], data['Mbe'], color='b', label='Pd')
ax6 = fig.add_subplot(616)
ax6.set_xlabel('time (s)')
ax6.set_ylabel('Mag offset D')
ax6.set_ylim([-0.4,0.4])
ax6.plot(data['time'], data['Mbd'], color='b', label='Pd')
plt.show() | bsd-3-clause |
ThomasMiconi/nupic.research | projects/l2_pooling/multi_column_convergence.py | 2 | 22360 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file plots the convergence of L4-L2 as you increase the number of columns,
or adjust the confusion between objects.
"""
import random
import os
from math import ceil
import pprint
import numpy
import cPickle
from multiprocessing import Pool
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
from htmresearch.frameworks.layers.l2_l4_inference import L4L2Experiment
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
def locateConvergencePoint(stats, minOverlap, maxOverlap):
"""
Walk backwards through stats until you locate the first point that diverges
from target overlap values. We need this to handle cases where it might get
to target values, diverge, and then get back again. We want the last
convergence point.
"""
for i,v in enumerate(stats[::-1]):
if not (v >= minOverlap and v <= maxOverlap):
return len(stats)-i + 1
# Never differs - converged in one iteration
return 1
def averageConvergencePoint(inferenceStats, prefix, minOverlap, maxOverlap,
settlingTime):
"""
inferenceStats contains activity traces while the system visits each object.
Given the i'th object, inferenceStats[i] contains activity statistics for
each column for each region for the entire sequence of sensations.
For each object, compute the convergence time - the first point when all
L2 columns have converged.
Return the average convergence time across all objects.
Given inference statistics for a bunch of runs, locate all traces with the
given prefix. For each trace locate the iteration where it finally settles
on targetValue. Return the average settling iteration across all runs.
"""
convergenceSum = 0.0
# For each object
for stats in inferenceStats:
# For each L2 column locate convergence time
convergencePoint = 0.0
for key in stats.iterkeys():
if prefix in key:
columnConvergence = locateConvergencePoint(
stats[key], minOverlap, maxOverlap)
# Ensure this column has converged by the last iteration
# assert(columnConvergence <= len(stats[key]))
convergencePoint = max(convergencePoint, columnConvergence)
convergenceSum += ceil(float(convergencePoint)/settlingTime)
return convergenceSum/len(inferenceStats)
def objectConfusion(objects):
"""
For debugging, print overlap between each pair of objects.
"""
sumCommonLocations = 0
sumCommonFeatures = 0
sumCommonPairs = 0
numObjects = 0
commonPairHistogram = numpy.zeros(len(objects[0]), dtype=numpy.int32)
for o1,s1 in objects.iteritems():
for o2,s2 in objects.iteritems():
if o1 != o2:
# Count number of common locations id's and common feature id's
commonLocations = 0
commonFeatures = 0
for pair1 in s1:
for pair2 in s2:
if pair1[0] == pair2[0]: commonLocations += 1
if pair1[1] == pair2[1]: commonFeatures += 1
# print "Confusion",o1,o2,", common pairs=",len(set(s1)&set(s2)),
# print ", common locations=",commonLocations,"common features=",commonFeatures
assert(len(set(s1)&set(s2)) != len(s1) ), "Two objects are identical!"
sumCommonPairs += len(set(s1)&set(s2))
sumCommonLocations += commonLocations
sumCommonFeatures += commonFeatures
commonPairHistogram[len(set(s1)&set(s2))] += 1
numObjects += 1
print "Average common pairs=", sumCommonPairs / float(numObjects),
print ", locations=",sumCommonLocations / float(numObjects),
print ", features=",sumCommonFeatures / float(numObjects)
print "Common pair histogram=",commonPairHistogram
def runExperiment(args):
"""
Run experiment. What did you think this does?
args is a dict representing the parameters. We do it this way to support
multiprocessing. args contains one or more of the following keys:
@param noiseLevel (float) Noise level to add to the locations and features
during inference. Default: None
@param profile (bool) If True, the network will be profiled after
learning and inference. Default: False
@param numObjects (int) The number of objects we will train.
Default: 10
@param numPoints (int) The number of points on each object.
Default: 10
@param pointRange (int) Creates objects each with points ranging from
[numPoints,...,numPoints+pointRange-1]
A total of numObjects * pointRange objects will be
created.
Default: 1
@param numLocations (int) For each point, the number of locations to choose
from. Default: 10
@param numFeatures (int) For each point, the number of features to choose
from. Default: 10
@param numColumns (int) The total number of cortical columns in network.
Default: 2
@param settlingTime (int) Number of iterations we wait to let columns
stabilize. Important for multicolumn experiments
with lateral connections.
@param includeRandomLocation (bool) If True, a random location SDR will be
generated during inference for each feature.
The method returns the args dict updated with two additional keys:
convergencePoint (int) The average number of iterations it took
to converge across all objects
objects (pairs) The list of objects we trained on
"""
numObjects = args.get("numObjects", 10)
numLocations = args.get("numLocations", 10)
numFeatures = args.get("numFeatures", 10)
numColumns = args.get("numColumns", 2)
profile = args.get("profile", False)
noiseLevel = args.get("noiseLevel", None) # TODO: implement this?
numPoints = args.get("numPoints", 10)
trialNum = args.get("trialNum", 42)
pointRange = args.get("pointRange", 1)
plotInferenceStats = args.get("plotInferenceStats", True)
settlingTime = args.get("settlingTime", 3)
includeRandomLocation = args.get("includeRandomLocation", False)
# Create the objects
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=150,
externalInputSize=2400,
numCorticalColumns=numColumns,
numFeatures=numFeatures,
seed=trialNum
)
for p in range(pointRange):
objects.createRandomObjects(numObjects, numPoints=numPoints+p,
numLocations=numLocations,
numFeatures=numFeatures)
objectConfusion(objects.getObjects())
# print "Total number of objects created:",len(objects.getObjects())
# print "Objects are:"
# for o in objects:
# pairs = objects[o]
# pairs.sort()
# print str(o) + ": " + str(pairs)
# Setup experiment and train the network
name = "convergence_O%03d_L%03d_F%03d_C%03d_T%03d" % (
numObjects, numLocations, numFeatures, numColumns, trialNum
)
exp = L4L2Experiment(
name,
numCorticalColumns=numColumns,
inputSize=150,
externalInputSize=2400,
numInputBits=20,
seed=trialNum
)
exp.learnObjects(objects.provideObjectsToLearn())
if profile:
exp.printProfile(reset=True)
# For inference, we will check and plot convergence for each object. For each
# object, we create a sequence of random sensations for each column. We will
# present each sensation for settlingTime time steps to let it settle and
# ensure it converges.
for objectId in objects:
obj = objects[objectId]
objectSensations = {}
for c in range(numColumns):
objectSensations[c] = []
if numColumns > 1:
# Create sequence of random sensations for this object for all columns At
# any point in time, ensure each column touches a unique loc,feature pair
# on the object. It is ok for a given column to sense a loc,feature pair
# more than once. The total number of sensations is equal to the number of
# points on the object.
for sensationNumber in range(len(obj)):
# Randomly shuffle points for each sensation
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
for c in range(numColumns):
# stay multiple steps on each sensation
for _ in xrange(settlingTime):
objectSensations[c].append(objectCopy[c])
else:
# Create sequence of sensations for this object for one column. The total
# number of sensations is equal to the number of points on the object. No
# point should be visited more than once.
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
for pair in objectCopy:
# stay multiple steps on each sensation
for _ in xrange(settlingTime):
objectSensations[0].append(pair)
inferConfig = {
"object": objectId,
"numSteps": len(objectSensations[0]),
"pairs": objectSensations,
"includeRandomLocation": includeRandomLocation,
}
inferenceSDRs = objects.provideObjectToInfer(inferConfig)
exp.infer(inferenceSDRs, objectName=objectId)
if profile:
exp.printProfile(reset=True)
if plotInferenceStats:
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
experimentID=objectId,
onePlot=False,
)
convergencePoint = averageConvergencePoint(
exp.getInferenceStats(),"L2 Representation", 30, 40, settlingTime)
print
print "# objects {} # features {} # locations {} # columns {} trial # {}".format(
numObjects, numFeatures, numLocations, numColumns, trialNum)
print "Average convergence point=",convergencePoint
# Return our convergence point as well as all the parameters and objects
args.update({"objects": objects.getObjects()})
args.update({"convergencePoint":convergencePoint})
# Can't pickle experiment so can't return it for batch multiprocessing runs.
# However this is very useful for debugging when running in a single thread.
if plotInferenceStats:
args.update({"experiment": exp})
return args
def runExperimentPool(numObjects,
numLocations,
numFeatures,
numColumns,
numWorkers=7,
nTrials=1,
pointRange=1,
numPoints=10,
includeRandomLocation=False,
resultsName="convergence_results.pkl"):
"""
Allows you to run a number of experiments using multiple processes.
For each parameter except numWorkers, pass in a list containing valid values
for that parameter. The cross product of everything is run, and each
combination is run nTrials times.
Returns a list of dict containing detailed results from each experiment.
Also pickles and saves the results in resultsName for later analysis.
Example:
results = runExperimentPool(
numObjects=[10],
numLocations=[5],
numFeatures=[5],
numColumns=[2,3,4,5,6],
numWorkers=8,
nTrials=5)
"""
# Create function arguments for every possibility
args = []
for t in range(nTrials):
for c in numColumns:
for o in numObjects:
for l in numLocations:
for f in numFeatures:
args.append(
{"numObjects": o,
"numLocations": l,
"numFeatures": f,
"numColumns": c,
"trialNum": t,
"pointRange": pointRange,
"numPoints": numPoints,
"plotInferenceStats": False,
"includeRandomLocation": includeRandomLocation,
"settlingTime": 3,
}
)
print "{} experiments to run, {} workers".format(len(args), numWorkers)
# Run the pool
if numWorkers > 1:
pool = Pool(processes=numWorkers)
result = pool.map(runExperiment, args)
else:
result = []
for arg in args:
result.append(runExperiment(arg))
# print "Full results:"
# pprint.pprint(result, width=150)
# Pickle results for later use
with open(resultsName,"wb") as f:
cPickle.dump(result,f)
return result
def plotConvergenceByColumn(results, columnRange, featureRange, numTrials):
"""
Plots the convergence graph: iterations vs number of columns.
Each curve shows the convergence for a given number of unique features.
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# Convergence[f,c] = how long it took it to converge with f unique features
# and c columns.
convergence = numpy.zeros((max(featureRange), max(columnRange) + 1))
for r in results:
convergence[r["numFeatures"] - 1,
r["numColumns"]] += r["convergencePoint"]
convergence /= numTrials
# For each column, print convergence as fct of number of unique features
for c in range(1, max(columnRange) + 1):
print c, convergence[:, c]
# Print everything anyway for debugging
print "Average convergence array=", convergence
########################################################################
#
# Create the plot. x-axis=
plt.figure()
plotPath = os.path.join("plots", "convergence_by_column.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
print columnRange
print convergence[f-1,columnRange]
legendList.append('Unique features={}'.format(f))
plt.plot(columnRange, convergence[f-1,columnRange],
color=colorList[i])
# format
plt.legend(legendList, loc="upper right")
plt.xlabel("Number of columns")
plt.xticks(columnRange)
plt.yticks(range(0,int(convergence.max())+1))
plt.ylabel("Average number of touches")
plt.title("Number of touches to recognize one object (multiple columns)")
# save
plt.savefig(plotPath)
plt.close()
def plotConvergenceByObject(results, objectRange, featureRange):
"""
Plots the convergence graph: iterations vs number of objects.
Each curve shows the convergence for a given number of unique features.
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# Convergence[f,o] = how long it took it to converge with f unique features
# and o objects.
convergence = numpy.zeros((max(featureRange), max(objectRange) + 1))
for r in results:
if r["numFeatures"] in featureRange:
convergence[r["numFeatures"] - 1, r["numObjects"]] += r["convergencePoint"]
convergence /= numTrials
########################################################################
#
# Create the plot. x-axis=
plt.figure()
plotPath = os.path.join("plots", "convergence_by_object_random_location.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
print "features={} objectRange={} convergence={}".format(
f,objectRange, convergence[f-1,objectRange])
legendList.append('Unique features={}'.format(f))
plt.plot(objectRange, convergence[f-1,objectRange],
color=colorList[i])
# format
plt.legend(legendList, loc="lower right", prop={'size':10})
plt.xlabel("Number of objects in training set")
plt.xticks(range(0,max(objectRange)+1,10))
plt.yticks(range(0,int(convergence.max())+2))
plt.ylabel("Average number of touches")
plt.title("Number of touches to recognize one object (single column)")
# save
plt.savefig(plotPath)
plt.close()
def plotConvergenceByObjectMultiColumn(results, objectRange, columnRange):
"""
Plots the convergence graph: iterations vs number of objects.
Each curve shows the convergence for a given number of columns.
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# Convergence[c,o] = how long it took it to converge with f unique features
# and c columns.
convergence = numpy.zeros((max(columnRange), max(objectRange) + 1))
for r in results:
if r["numColumns"] in columnRange:
convergence[r["numColumns"] - 1, r["numObjects"]] += r["convergencePoint"]
convergence /= numTrials
# print "Average convergence array=", convergence
########################################################################
#
# Create the plot. x-axis=
plt.figure()
plotPath = os.path.join("plots", "convergence_by_object_multicolumn.jpg")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(columnRange)):
c = columnRange[i]
print "columns={} objectRange={} convergence={}".format(
c, objectRange, convergence[c-1,objectRange])
if c == 1:
legendList.append('1 column')
else:
legendList.append('{} columns'.format(c))
plt.plot(objectRange, convergence[c-1,objectRange],
color=colorList[i])
# format
plt.legend(legendList, loc="upper left", prop={'size':10})
plt.xlabel("Number of objects in training set")
plt.xticks(range(0,max(objectRange)+1,10))
plt.yticks(range(0,int(convergence.max())+2))
plt.ylabel("Average number of touches")
plt.title("Object recognition with multiple columns (unique features = 5)")
# save
plt.savefig(plotPath)
plt.close()
if __name__ == "__main__":
# This is how you run a specific experiment in single process mode. Useful
# for debugging, profiling, etc.
if True:
results = runExperiment(
{
"numObjects": 30,
"numPoints": 10,
"numLocations": 10,
"numFeatures": 10,
"numColumns": 1,
"trialNum": 4,
"pointRange": 1,
"plotInferenceStats": True, # Outputs detailed graphs
"settlingTime": 3,
"includeRandomLocation": False
}
)
# Here we want to see how the number of columns affects convergence.
# This experiment is run using a process pool
if False:
columnRange = [1, 2, 3, 4, 5, 6, 7, 8]
featureRange = [5, 10, 20, 30]
objectRange = [100]
numTrials = 10
# Comment this out if you are re-running analysis on already saved results
# Very useful for debugging the plots
runExperimentPool(
numObjects=objectRange,
numLocations=[10],
numFeatures=featureRange,
numColumns=columnRange,
numPoints=10,
nTrials=numTrials,
numWorkers=7,
resultsName="column_convergence_results.pkl")
with open("column_convergence_results.pkl","rb") as f:
results = cPickle.load(f)
plotConvergenceByColumn(results, columnRange, featureRange,
numTrials=numTrials)
# Here we want to see how the number of objects affects convergence for a
# single column.
# This experiment is run using a process pool
if False:
# We run 10 trials for each column number and then analyze results
numTrials = 10
columnRange = [1]
featureRange = [5,10,20,30]
objectRange = [2,10,20,30,40,50,60,80,100]
# Comment this out if you are re-running analysis on already saved results.
# Very useful for debugging the plots
runExperimentPool(
numObjects=objectRange,
numLocations=[10],
numFeatures=featureRange,
numColumns=columnRange,
numPoints=10,
nTrials=numTrials,
numWorkers=7,
resultsName="object_convergence_results.pkl")
# Analyze results
with open("object_convergence_results.pkl","rb") as f:
results = cPickle.load(f)
plotConvergenceByObject(results, objectRange, featureRange)
# Here we want to see how the number of objects affects convergence for
# multiple columns.
if False:
# We run 10 trials for each column number and then analyze results
numTrials = 10
columnRange = [1,2,4,6]
featureRange = [5]
objectRange = [2,5,10,20,30,40,50,60,80,100]
# Comment this out if you are re-running analysis on already saved results.
# Very useful for debugging the plots
runExperimentPool(
numObjects=objectRange,
numLocations=[10],
numFeatures=featureRange,
numColumns=columnRange,
numPoints=10,
numWorkers=7,
nTrials=numTrials,
resultsName="object_convergence_multi_column_results.pkl")
# Analyze results
with open("object_convergence_multi_column_results.pkl","rb") as f:
results = cPickle.load(f)
plotConvergenceByObjectMultiColumn(results, objectRange, columnRange)
| agpl-3.0 |
airware/jsbsim | tests/TestScriptOutput.py | 2 | 3376 | # TestScriptInputOutput.py
#
# Check that <output> tags specified in a script are properly handled
#
# Copyright (c) 2015 Bertrand Coconnier
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>
#
import sys, unittest
import xml.etree.ElementTree as et
import pandas as pd
import numpy as np
from JSBSim_utils import CreateFDM, SandBox, ExecuteUntil
class TestScriptOutput(unittest.TestCase):
def setUp(self):
self.sandbox = SandBox()
self.script_path = self.sandbox.path_to_jsbsim_file('scripts',
'c1722.xml')
def tearDown(self):
self.sandbox.erase()
def test_no_output(self):
fdm = CreateFDM(self.sandbox)
fdm.load_script(self.script_path)
fdm.run_ic()
ExecuteUntil(fdm, 10.)
self.assertFalse(self.sandbox.exists('output.csv'),
msg="Results have unexpectedly been written to 'output.csv'")
def test_output_from_file(self):
tree = et.parse(self.sandbox.elude(self.script_path))
output_tag = et.SubElement(tree.getroot(), 'output')
output_tag.attrib['file'] = self.sandbox.elude(self.sandbox.path_to_jsbsim_file('tests', 'output.xml'))
tree.write(self.sandbox('c1722_0.xml'))
fdm = CreateFDM(self.sandbox)
fdm.load_script('c1722_0.xml')
fdm.run_ic()
ExecuteUntil(fdm, 10.)
self.assertTrue(self.sandbox.exists('output.csv'),
msg="The file 'output.csv' has not been created")
def test_output(self):
tree = et.parse(self.sandbox.elude(self.script_path))
output_tag = et.SubElement(tree.getroot(), 'output')
output_tag.attrib['name'] = 'test.csv'
output_tag.attrib['type'] = 'CSV'
output_tag.attrib['rate'] = '10'
property_tag = et.SubElement(output_tag, 'property')
property_tag.text = 'position/vrp-radius-ft'
tree.write(self.sandbox('c1722_0.xml'))
fdm = CreateFDM(self.sandbox)
fdm.load_script('c1722_0.xml')
fdm.run_ic()
ExecuteUntil(fdm, 10.)
self.assertTrue(self.sandbox.exists(output_tag.attrib['name']),
msg="The file 'output.csv' has not been created")
orig = pd.read_csv(self.sandbox('JSBout172B.csv'))
test = pd.read_csv(self.sandbox('test.csv'))
self.assertEqual(np.max(orig['Time']-test['Time']), 0.0)
pname = '/fdm/jsbsim/' + property_tag.text
self.assertEqual(np.max(orig[pname]-test[pname]), 0.0)
suite = unittest.TestLoader().loadTestsFromTestCase(TestScriptOutput)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if test_result.failures or test_result.errors:
sys.exit(-1) # 'make test' will report the test failed.
| lgpl-2.1 |
cbertinato/pandas | pandas/tests/frame/test_combine_concat.py | 1 | 34741 | from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameConcatCommon:
def test_concat_multiple_frames_dtypes(self):
# GH 2759
A = DataFrame(data=np.ones((10, 2)), columns=[
'foo', 'bar'], dtype=np.float64)
B = DataFrame(data=np.ones((10, 2)), dtype=np.float32)
results = pd.concat((A, B), axis=1).get_dtype_counts()
expected = Series(dict(float64=2, float32=2))
assert_series_equal(results, expected)
@pytest.mark.parametrize('data', [
pd.date_range('2000', periods=4),
pd.date_range('2000', periods=4, tz="US/Central"),
pd.period_range('2000', periods=4),
pd.timedelta_range(0, periods=4),
])
def test_combine_datetlike_udf(self, data):
# https://github.com/pandas-dev/pandas/issues/23079
df = pd.DataFrame({"A": data})
other = df.copy()
df.iloc[1, 0] = None
def combiner(a, b):
return b
result = df.combine(other, combiner)
tm.assert_frame_equal(result, other)
def test_concat_multiple_tzs(self):
# GH 12467
# combining datetime tz-aware and naive DataFrames
ts1 = Timestamp('2015-01-01', tz=None)
ts2 = Timestamp('2015-01-01', tz='UTC')
ts3 = Timestamp('2015-01-01', tz='EST')
df1 = DataFrame(dict(time=[ts1]))
df2 = DataFrame(dict(time=[ts2]))
df3 = DataFrame(dict(time=[ts3]))
results = pd.concat([df1, df2]).reset_index(drop=True)
expected = DataFrame(dict(time=[ts1, ts2]), dtype=object)
assert_frame_equal(results, expected)
results = pd.concat([df1, df3]).reset_index(drop=True)
expected = DataFrame(dict(time=[ts1, ts3]), dtype=object)
assert_frame_equal(results, expected)
results = pd.concat([df2, df3]).reset_index(drop=True)
expected = DataFrame(dict(time=[ts2, ts3]))
assert_frame_equal(results, expected)
@pytest.mark.parametrize(
't1',
[
'2015-01-01',
pytest.param(pd.NaT, marks=pytest.mark.xfail(
reason='GH23037 incorrect dtype when concatenating'))])
def test_concat_tz_NaT(self, t1):
# GH 22796
# Concating tz-aware multicolumn DataFrames
ts1 = Timestamp(t1, tz='UTC')
ts2 = Timestamp('2015-01-01', tz='UTC')
ts3 = Timestamp('2015-01-01', tz='UTC')
df1 = DataFrame([[ts1, ts2]])
df2 = DataFrame([[ts3]])
result = pd.concat([df1, df2])
expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0])
assert_frame_equal(result, expected)
def test_concat_tz_not_aligned(self):
# GH 22796
ts = pd.to_datetime([1, 2]).tz_localize("UTC")
a = pd.DataFrame({"A": ts})
b = pd.DataFrame({"A": ts, "B": ts})
result = pd.concat([a, b], sort=True, ignore_index=True)
expected = pd.DataFrame({"A": list(ts) + list(ts),
"B": [pd.NaT, pd.NaT] + list(ts)})
assert_frame_equal(result, expected)
def test_concat_tuple_keys(self):
# GH 14438
df1 = pd.DataFrame(np.ones((2, 2)), columns=list('AB'))
df2 = pd.DataFrame(np.ones((3, 2)) * 2, columns=list('AB'))
results = pd.concat((df1, df2), keys=[('bee', 'bah'), ('bee', 'boo')])
expected = pd.DataFrame(
{'A': {('bee', 'bah', 0): 1.0,
('bee', 'bah', 1): 1.0,
('bee', 'boo', 0): 2.0,
('bee', 'boo', 1): 2.0,
('bee', 'boo', 2): 2.0},
'B': {('bee', 'bah', 0): 1.0,
('bee', 'bah', 1): 1.0,
('bee', 'boo', 0): 2.0,
('bee', 'boo', 1): 2.0,
('bee', 'boo', 2): 2.0}})
assert_frame_equal(results, expected)
def test_append_series_dict(self):
df = DataFrame(np.random.randn(5, 4),
columns=['foo', 'bar', 'baz', 'qux'])
series = df.loc[4]
msg = 'Indexes have overlapping values'
with pytest.raises(ValueError, match=msg):
df.append(series, verify_integrity=True)
series.name = None
msg = 'Can only append a Series if ignore_index=True'
with pytest.raises(TypeError, match=msg):
df.append(series, verify_integrity=True)
result = df.append(series[::-1], ignore_index=True)
expected = df.append(DataFrame({0: series[::-1]}, index=df.columns).T,
ignore_index=True)
assert_frame_equal(result, expected)
# dict
result = df.append(series.to_dict(), ignore_index=True)
assert_frame_equal(result, expected)
result = df.append(series[::-1][:3], ignore_index=True)
expected = df.append(DataFrame({0: series[::-1][:3]}).T,
ignore_index=True, sort=True)
assert_frame_equal(result, expected.loc[:, result.columns])
# can append when name set
row = df.loc[4]
row.name = 5
result = df.append(row)
expected = df.append(df[-1:], ignore_index=True)
assert_frame_equal(result, expected)
def test_append_list_of_series_dicts(self):
df = DataFrame(np.random.randn(5, 4),
columns=['foo', 'bar', 'baz', 'qux'])
dicts = [x.to_dict() for idx, x in df.iterrows()]
result = df.append(dicts, ignore_index=True)
expected = df.append(df, ignore_index=True)
assert_frame_equal(result, expected)
# different columns
dicts = [{'foo': 1, 'bar': 2, 'baz': 3, 'peekaboo': 4},
{'foo': 5, 'bar': 6, 'baz': 7, 'peekaboo': 8}]
result = df.append(dicts, ignore_index=True, sort=True)
expected = df.append(DataFrame(dicts), ignore_index=True, sort=True)
assert_frame_equal(result, expected)
def test_append_missing_cols(self):
# GH22252
# exercise the conditional branch in append method where the data
# to be appended is a list and does not contain all columns that are in
# the target DataFrame
df = DataFrame(np.random.randn(5, 4),
columns=['foo', 'bar', 'baz', 'qux'])
dicts = [{'foo': 9}, {'bar': 10}]
with tm.assert_produces_warning(None):
result = df.append(dicts, ignore_index=True, sort=True)
expected = df.append(DataFrame(dicts), ignore_index=True, sort=True)
assert_frame_equal(result, expected)
def test_append_empty_dataframe(self):
# Empty df append empty df
df1 = DataFrame()
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Non-empty df append empty df
df1 = DataFrame(np.random.randn(5, 2))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Empty df with columns append empty df
df1 = DataFrame(columns=['bar', 'foo'])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Non-Empty df with columns append empty df
df1 = DataFrame(np.random.randn(5, 2), columns=['bar', 'foo'])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
def test_append_dtypes(self):
# GH 5754
# row appends of different dtypes (so need to do by-item)
# can sometimes infer the correct type
df1 = DataFrame({'bar': Timestamp('20130101')}, index=range(5))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
df1 = DataFrame({'bar': Timestamp('20130101')}, index=range(1))
df2 = DataFrame({'bar': 'foo'}, index=range(1, 2))
result = df1.append(df2)
expected = DataFrame({'bar': [Timestamp('20130101'), 'foo']})
assert_frame_equal(result, expected)
df1 = DataFrame({'bar': Timestamp('20130101')}, index=range(1))
df2 = DataFrame({'bar': np.nan}, index=range(1, 2))
result = df1.append(df2)
expected = DataFrame(
{'bar': Series([Timestamp('20130101'), np.nan], dtype='M8[ns]')})
assert_frame_equal(result, expected)
df1 = DataFrame({'bar': Timestamp('20130101')}, index=range(1))
df2 = DataFrame({'bar': np.nan}, index=range(1, 2), dtype=object)
result = df1.append(df2)
expected = DataFrame(
{'bar': Series([Timestamp('20130101'), np.nan], dtype='M8[ns]')})
assert_frame_equal(result, expected)
df1 = DataFrame({'bar': np.nan}, index=range(1))
df2 = DataFrame({'bar': Timestamp('20130101')}, index=range(1, 2))
result = df1.append(df2)
expected = DataFrame(
{'bar': Series([np.nan, Timestamp('20130101')], dtype='M8[ns]')})
assert_frame_equal(result, expected)
df1 = DataFrame({'bar': Timestamp('20130101')}, index=range(1))
df2 = DataFrame({'bar': 1}, index=range(1, 2), dtype=object)
result = df1.append(df2)
expected = DataFrame({'bar': Series([Timestamp('20130101'), 1])})
assert_frame_equal(result, expected)
def test_update(self):
df = DataFrame([[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3],
[1.5, np.nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = DataFrame([[1.5, np.nan, 3],
[3.6, 2, 3],
[1.5, np.nan, 3],
[1.5, np.nan, 7.]])
assert_frame_equal(df, expected)
def test_update_dtypes(self):
# gh 3016
df = DataFrame([[1., 2., False, True], [4., 5., True, False]],
columns=['A', 'B', 'bool1', 'bool2'])
other = DataFrame([[45, 45]], index=[0], columns=['A', 'B'])
df.update(other)
expected = DataFrame([[45., 45., False, True], [4., 5., True, False]],
columns=['A', 'B', 'bool1', 'bool2'])
assert_frame_equal(df, expected)
def test_update_nooverwrite(self):
df = DataFrame([[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3],
[1.5, np.nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other, overwrite=False)
expected = DataFrame([[1.5, np.nan, 3],
[1.5, 2, 3],
[1.5, np.nan, 3],
[1.5, np.nan, 3.]])
assert_frame_equal(df, expected)
def test_update_filtered(self):
df = DataFrame([[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3],
[1.5, np.nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other, filter_func=lambda x: x > 2)
expected = DataFrame([[1.5, np.nan, 3],
[1.5, np.nan, 3],
[1.5, np.nan, 3],
[1.5, np.nan, 7.]])
assert_frame_equal(df, expected)
@pytest.mark.parametrize('bad_kwarg, exception, msg', [
# errors must be 'ignore' or 'raise'
({'errors': 'something'}, ValueError, 'The parameter errors must.*'),
({'join': 'inner'}, NotImplementedError, 'Only left join is supported')
])
def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg):
df = DataFrame([[1.5, 1, 3.]])
with pytest.raises(exception, match=msg):
df.update(df, **bad_kwarg)
def test_update_raise_on_overlap(self):
df = DataFrame([[1.5, 1, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3],
[1.5, np.nan, 3]])
other = DataFrame([[2., np.nan],
[np.nan, 7]], index=[1, 3], columns=[1, 2])
with pytest.raises(ValueError, match="Data overlaps"):
df.update(other, errors='raise')
@pytest.mark.parametrize('raise_conflict', [True, False])
def test_update_deprecation(self, raise_conflict):
df = DataFrame([[1.5, 1, 3.]])
other = DataFrame()
with tm.assert_produces_warning(FutureWarning):
df.update(other, raise_conflict=raise_conflict)
def test_update_from_non_df(self):
d = {'a': Series([1, 2, 3, 4]), 'b': Series([5, 6, 7, 8])}
df = DataFrame(d)
d['a'] = Series([5, 6, 7, 8])
df.update(d)
expected = DataFrame(d)
assert_frame_equal(df, expected)
d = {'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}
df = DataFrame(d)
d['a'] = [5, 6, 7, 8]
df.update(d)
expected = DataFrame(d)
assert_frame_equal(df, expected)
def test_update_datetime_tz(self):
# GH 25807
result = DataFrame([pd.Timestamp('2019', tz='UTC')])
result.update(result)
expected = DataFrame([pd.Timestamp('2019', tz='UTC')])
assert_frame_equal(result, expected)
def test_join_str_datetime(self):
str_dates = ['20120209', '20120222']
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
A = DataFrame(str_dates, index=range(2), columns=['aa'])
C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)
tst = A.join(C, on='aa')
assert len(tst.columns) == 3
def test_join_multiindex_leftright(self):
# GH 10741
df1 = (pd.DataFrame([['a', 'x', 0.471780], ['a', 'y', 0.774908],
['a', 'z', 0.563634], ['b', 'x', -0.353756],
['b', 'y', 0.368062], ['b', 'z', -1.721840],
['c', 'x', 1], ['c', 'y', 2], ['c', 'z', 3]],
columns=['first', 'second', 'value1'])
.set_index(['first', 'second']))
df2 = (pd.DataFrame([['a', 10], ['b', 20]],
columns=['first', 'value2'])
.set_index(['first']))
exp = pd.DataFrame([[0.471780, 10], [0.774908, 10], [0.563634, 10],
[-0.353756, 20], [0.368062, 20],
[-1.721840, 20],
[1.000000, np.nan], [2.000000, np.nan],
[3.000000, np.nan]],
index=df1.index, columns=['value1', 'value2'])
# these must be the same results (but columns are flipped)
assert_frame_equal(df1.join(df2, how='left'), exp)
assert_frame_equal(df2.join(df1, how='right'),
exp[['value2', 'value1']])
exp_idx = pd.MultiIndex.from_product([['a', 'b'], ['x', 'y', 'z']],
names=['first', 'second'])
exp = pd.DataFrame([[0.471780, 10], [0.774908, 10], [0.563634, 10],
[-0.353756, 20], [0.368062, 20], [-1.721840, 20]],
index=exp_idx, columns=['value1', 'value2'])
assert_frame_equal(df1.join(df2, how='right'), exp)
assert_frame_equal(df2.join(df1, how='left'),
exp[['value2', 'value1']])
def test_concat_named_keys(self):
# GH 14252
df = pd.DataFrame({'foo': [1, 2], 'bar': [0.1, 0.2]})
index = Index(['a', 'b'], name='baz')
concatted_named_from_keys = pd.concat([df, df], keys=index)
expected_named = pd.DataFrame(
{'foo': [1, 2, 1, 2], 'bar': [0.1, 0.2, 0.1, 0.2]},
index=pd.MultiIndex.from_product((['a', 'b'], [0, 1]),
names=['baz', None]))
assert_frame_equal(concatted_named_from_keys, expected_named)
index_no_name = Index(['a', 'b'], name=None)
concatted_named_from_names = pd.concat(
[df, df], keys=index_no_name, names=['baz'])
assert_frame_equal(concatted_named_from_names, expected_named)
concatted_unnamed = pd.concat([df, df], keys=index_no_name)
expected_unnamed = pd.DataFrame(
{'foo': [1, 2, 1, 2], 'bar': [0.1, 0.2, 0.1, 0.2]},
index=pd.MultiIndex.from_product((['a', 'b'], [0, 1]),
names=[None, None]))
assert_frame_equal(concatted_unnamed, expected_unnamed)
def test_concat_axis_parameter(self):
# GH 14369
df1 = pd.DataFrame({'A': [0.1, 0.2]}, index=range(2))
df2 = pd.DataFrame({'A': [0.3, 0.4]}, index=range(2))
# Index/row/0 DataFrame
expected_index = pd.DataFrame(
{'A': [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1])
concatted_index = pd.concat([df1, df2], axis='index')
assert_frame_equal(concatted_index, expected_index)
concatted_row = pd.concat([df1, df2], axis='rows')
assert_frame_equal(concatted_row, expected_index)
concatted_0 = pd.concat([df1, df2], axis=0)
assert_frame_equal(concatted_0, expected_index)
# Columns/1 DataFrame
expected_columns = pd.DataFrame(
[[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=['A', 'A'])
concatted_columns = pd.concat([df1, df2], axis='columns')
assert_frame_equal(concatted_columns, expected_columns)
concatted_1 = pd.concat([df1, df2], axis=1)
assert_frame_equal(concatted_1, expected_columns)
series1 = pd.Series([0.1, 0.2])
series2 = pd.Series([0.3, 0.4])
# Index/row/0 Series
expected_index_series = pd.Series(
[0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
concatted_index_series = pd.concat([series1, series2], axis='index')
assert_series_equal(concatted_index_series, expected_index_series)
concatted_row_series = pd.concat([series1, series2], axis='rows')
assert_series_equal(concatted_row_series, expected_index_series)
concatted_0_series = pd.concat([series1, series2], axis=0)
assert_series_equal(concatted_0_series, expected_index_series)
# Columns/1 Series
expected_columns_series = pd.DataFrame(
[[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1])
concatted_columns_series = pd.concat(
[series1, series2], axis='columns')
assert_frame_equal(concatted_columns_series, expected_columns_series)
concatted_1_series = pd.concat([series1, series2], axis=1)
assert_frame_equal(concatted_1_series, expected_columns_series)
# Testing ValueError
with pytest.raises(ValueError, match='No axis named'):
pd.concat([series1, series2], axis='something')
def test_concat_numerical_names(self):
# #15262 # #12223
df = pd.DataFrame({'col': range(9)},
dtype='int32',
index=(pd.MultiIndex
.from_product([['A0', 'A1', 'A2'],
['B0', 'B1', 'B2']],
names=[1, 2])))
result = pd.concat((df.iloc[:2, :], df.iloc[-2:, :]))
expected = pd.DataFrame({'col': [0, 1, 7, 8]},
dtype='int32',
index=pd.MultiIndex.from_tuples([('A0', 'B0'),
('A0', 'B1'),
('A2', 'B1'),
('A2', 'B2')],
names=[1, 2]))
tm.assert_frame_equal(result, expected)
def test_concat_astype_dup_col(self):
# gh 23049
df = pd.DataFrame([{'a': 'b'}])
df = pd.concat([df, df], axis=1)
result = df.astype('category')
expected = pd.DataFrame(np.array(["b", "b"]).reshape(1, 2),
columns=["a", "a"]).astype("category")
tm.assert_frame_equal(result, expected)
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(['a', 'b'], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({'A': a, 'B': b})
a = Series(['a', 'b'], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({'A': a, 'B': b})
exp = pd.DataFrame({'A': list('abab'), 'B': [0., 1., 0., 1.]},
index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
assert_series_equal(combined['A'], reordered_frame['A'])
# same index
fcopy = float_frame.copy()
fcopy['A'] = 1
del fcopy['C']
fcopy2 = float_frame.copy()
fcopy2['B'] = 0
del fcopy2['D']
combined = fcopy.combine_first(fcopy2)
assert (combined['A'] == 1).all()
assert_series_equal(combined['B'], fcopy['B'])
assert_series_equal(combined['C'], fcopy2['C'])
assert_series_equal(combined['D'], fcopy['D'])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head['A'] = 1
combined = head.combine_first(tail)
assert (combined['A'][:10] == 1).all()
# reverse overlap
tail['A'][:10] = 0
combined = tail.combine_first(head)
assert (combined['A'][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
assert_series_equal(combined['A'].reindex(f.index), f['A'])
assert_series_equal(combined['A'].reindex(g.index), g['A'])
# corner cases
comb = float_frame.combine_first(DataFrame())
assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({'a': [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=['b'])
result = df.combine_first(df2)
assert 'b' in result
def test_combine_first_mixed_bug(self):
idx = Index(['a', 'b', 'c', 'e'])
ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)
ser2 = Series(['a', 'b', 'c', 'e'], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1,
"col2": ser2,
"col3": ser3})
idx = Index(['a', 'b', 'c', 'f'])
ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)
ser2 = Series(['a', 'b', 'c', 'f'], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1,
"col2": ser2,
"col5": ser3})
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
# gh 3016 (same as in update)
df = DataFrame([[1., 2., False, True], [4., 5., True, False]],
columns=['A', 'B', 'bool1', 'bool2'])
other = DataFrame([[45, 45]], index=[0], columns=['A', 'B'])
result = df.combine_first(other)
assert_frame_equal(result, df)
df.loc[0, 'A'] = np.nan
result = df.combine_first(other)
df.loc[0, 'A'] = 45
assert_frame_equal(result, df)
# doc example
df1 = DataFrame({'A': [1., np.nan, 3., 5., np.nan],
'B': [np.nan, 2., 3., np.nan, 6.]})
df2 = DataFrame({'A': [5., 2., 4., np.nan, 3., 7.],
'B': [np.nan, np.nan, 3., 4., 6., 8.]})
result = df1.combine_first(df2)
expected = DataFrame(
{'A': [1, 2, 3, 5, 3, 7.], 'B': [np.nan, 2, 3, 4, 6, 8]})
assert_frame_equal(result, expected)
# GH3552, return object dtype with bools
df1 = DataFrame(
[[np.nan, 3., True], [-4.6, np.nan, True], [np.nan, 7., False]])
df2 = DataFrame(
[[-42.6, np.nan, True], [-5., 1.6, False]], index=[1, 2])
result = df1.combine_first(df2)[2]
expected = Series([True, True, False], name=2)
assert_series_equal(result, expected)
# GH 3593, converting datetime64[ns] incorrectly
df0 = DataFrame({"a": [datetime(2000, 1, 1),
datetime(2000, 1, 2),
datetime(2000, 1, 3)]})
df1 = DataFrame({"a": [None, None, None]})
df2 = df1.combine_first(df0)
assert_frame_equal(df2, df0)
df2 = df0.combine_first(df1)
assert_frame_equal(df2, df0)
df0 = DataFrame({"a": [datetime(2000, 1, 1),
datetime(2000, 1, 2),
datetime(2000, 1, 3)]})
df1 = DataFrame({"a": [datetime(2000, 1, 2), None, None]})
df2 = df1.combine_first(df0)
result = df0.copy()
result.iloc[0, :] = df1.iloc[0, :]
assert_frame_equal(df2, result)
df2 = df0.combine_first(df1)
assert_frame_equal(df2, df0)
def test_combine_first_align_nan(self):
# GH 7509 (not fixed)
dfa = pd.DataFrame([[pd.Timestamp('2011-01-01'), 2]],
columns=['a', 'b'])
dfb = pd.DataFrame([[4], [5]], columns=['b'])
assert dfa['a'].dtype == 'datetime64[ns]'
assert dfa['b'].dtype == 'int64'
res = dfa.combine_first(dfb)
exp = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'), pd.NaT],
'b': [2., 5.]}, columns=['a', 'b'])
tm.assert_frame_equal(res, exp)
assert res['a'].dtype == 'datetime64[ns]'
# ToDo: this must be int64
assert res['b'].dtype == 'float64'
res = dfa.iloc[:0].combine_first(dfb)
exp = pd.DataFrame({'a': [np.nan, np.nan],
'b': [4, 5]}, columns=['a', 'b'])
tm.assert_frame_equal(res, exp)
# ToDo: this must be datetime64
assert res['a'].dtype == 'float64'
# ToDo: this must be int64
assert res['b'].dtype == 'int64'
def test_combine_first_timezone(self):
# see gh-7630
data1 = pd.to_datetime('20100101 01:01').tz_localize('UTC')
df1 = pd.DataFrame(columns=['UTCdatetime', 'abc'],
data=data1,
index=pd.date_range('20140627', periods=1))
data2 = pd.to_datetime('20121212 12:12').tz_localize('UTC')
df2 = pd.DataFrame(columns=['UTCdatetime', 'xyz'],
data=data2,
index=pd.date_range('20140628', periods=1))
res = df2[['UTCdatetime']].combine_first(df1)
exp = pd.DataFrame({'UTCdatetime': [pd.Timestamp('2010-01-01 01:01',
tz='UTC'),
pd.Timestamp('2012-12-12 12:12',
tz='UTC')],
'abc': [pd.Timestamp('2010-01-01 01:01:00',
tz='UTC'), pd.NaT]},
columns=['UTCdatetime', 'abc'],
index=pd.date_range('20140627', periods=2,
freq='D'))
tm.assert_frame_equal(res, exp)
assert res['UTCdatetime'].dtype == 'datetime64[ns, UTC]'
assert res['abc'].dtype == 'datetime64[ns, UTC]'
# see gh-10567
dts1 = pd.date_range('2015-01-01', '2015-01-05', tz='UTC')
df1 = pd.DataFrame({'DATE': dts1})
dts2 = pd.date_range('2015-01-03', '2015-01-05', tz='UTC')
df2 = pd.DataFrame({'DATE': dts2})
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res['DATE'].dtype == 'datetime64[ns, UTC]'
dts1 = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
df1 = pd.DataFrame({'DATE': dts1}, index=[1, 3, 5, 7])
dts2 = pd.DatetimeIndex(['2012-01-01', '2012-01-02',
'2012-01-03'], tz='US/Eastern')
df2 = pd.DataFrame({'DATE': dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.DatetimeIndex(['2011-01-01', '2012-01-01', 'NaT',
'2012-01-02', '2011-01-03', '2011-01-04'],
tz='US/Eastern')
exp = pd.DataFrame({'DATE': exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
# different tz
dts1 = pd.date_range('2015-01-01', '2015-01-05', tz='US/Eastern')
df1 = pd.DataFrame({'DATE': dts1})
dts2 = pd.date_range('2015-01-03', '2015-01-05')
df2 = pd.DataFrame({'DATE': dts2})
# if df1 doesn't have NaN, keep its dtype
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res['DATE'].dtype == 'datetime64[ns, US/Eastern]'
dts1 = pd.date_range('2015-01-01', '2015-01-02', tz='US/Eastern')
df1 = pd.DataFrame({'DATE': dts1})
dts2 = pd.date_range('2015-01-01', '2015-01-03')
df2 = pd.DataFrame({'DATE': dts2})
res = df1.combine_first(df2)
exp_dts = [pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-03')]
exp = pd.DataFrame({'DATE': exp_dts})
tm.assert_frame_equal(res, exp)
assert res['DATE'].dtype == 'object'
def test_combine_first_timedelta(self):
data1 = pd.TimedeltaIndex(['1 day', 'NaT', '3 day', '4day'])
df1 = pd.DataFrame({'TD': data1}, index=[1, 3, 5, 7])
data2 = pd.TimedeltaIndex(['10 day', '11 day', '12 day'])
df2 = pd.DataFrame({'TD': data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.TimedeltaIndex(['1 day', '10 day', 'NaT',
'11 day', '3 day', '4 day'])
exp = pd.DataFrame({'TD': exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res['TD'].dtype == 'timedelta64[ns]'
def test_combine_first_period(self):
data1 = pd.PeriodIndex(['2011-01', 'NaT', '2011-03',
'2011-04'], freq='M')
df1 = pd.DataFrame({'P': data1}, index=[1, 3, 5, 7])
data2 = pd.PeriodIndex(['2012-01-01', '2012-02',
'2012-03'], freq='M')
df2 = pd.DataFrame({'P': data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.PeriodIndex(['2011-01', '2012-01', 'NaT',
'2012-02', '2011-03', '2011-04'],
freq='M')
exp = pd.DataFrame({'P': exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res['P'].dtype == data1.dtype
# different freq
dts2 = pd.PeriodIndex(['2012-01-01', '2012-01-02',
'2012-01-03'], freq='D')
df2 = pd.DataFrame({'P': dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = [pd.Period('2011-01', freq='M'),
pd.Period('2012-01-01', freq='D'),
pd.NaT,
pd.Period('2012-01-02', freq='D'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')]
exp = pd.DataFrame({'P': exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res['P'].dtype == 'object'
def test_combine_first_int(self):
# GH14687 - integer series that do no align exactly
df1 = pd.DataFrame({'a': [0, 1, 3, 5]}, dtype='int64')
df2 = pd.DataFrame({'a': [1, 4]}, dtype='int64')
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res['a'].dtype == 'int64'
@pytest.mark.parametrize("val", [1, 1.0])
def test_combine_first_with_asymmetric_other(self, val):
# see gh-20699
df1 = pd.DataFrame({'isNum': [val]})
df2 = pd.DataFrame({'isBool': [True]})
res = df1.combine_first(df2)
exp = pd.DataFrame({'isBool': [True], 'isNum': [val]})
tm.assert_frame_equal(res, exp)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test': range(10)})
# it works!
pd.concat([df1, df2_obj])
class TestDataFrameUpdate:
def test_update_nan(self):
# #15593 #15617
# test 1
df1 = DataFrame({'A': [1.0, 2, 3], 'B': date_range('2000', periods=3)})
df2 = DataFrame({'A': [None, 2, 3]})
expected = df1.copy()
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
# test 2
df1 = DataFrame({'A': [1.0, None, 3],
'B': date_range('2000', periods=3)})
df2 = DataFrame({'A': [None, 2, 3]})
expected = DataFrame({'A': [1.0, 2, 3],
'B': date_range('2000', periods=3)})
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
| bsd-3-clause |
jungla/ICOM-fluidity-toolbox | 2D/RST/plot_T_spec_res.py | 1 | 8498 | import os, sys
import myfun
import numpy as np
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy import interpolate
import lagrangian_stats
import scipy.fftpack
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
label = 'm_50_6f'
label_50 = 'm_50_6f'
label_25 = 'm_25_1'
label_10 = 'm_10_1'
basename = 'mli'
dayi = 36
dayf = 49
days = 1
#label = sys.argv[1]
#basename = sys.argv[2]
#dayi = int(sys.argv[3])
#dayf = int(sys.argv[4])
#days = int(sys.argv[5])
path = './Temperature_CG/'
try: os.stat('./plot/'+label)
except OSError: os.mkdir('./plot/'+label)
# dimensions archives
# ML exp
Xlist_50 = np.linspace(0,2000,41)
Ylist_50 = np.linspace(0,2000,41)
Xlist_25 = np.linspace(0,2000,81)
Ylist_25 = np.linspace(0,2000,81)
Xlist_10 = np.linspace(0,2000,161)
Ylist_10 = np.linspace(0,2000,161)
Zlist = np.linspace(0,-50,51)
dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]
Zlist = np.cumsum(dl)
xn_50 = len(Xlist_50)
yn_50 = len(Ylist_50)
xn_25 = len(Xlist_25)
yn_25 = len(Ylist_25)
xn_10 = len(Xlist_10)
yn_10 = len(Ylist_10)
zn = len(Zlist)
dx_50 = np.diff(Xlist_50)
dx_25 = np.diff(Xlist_25)
dx_10 = np.diff(Xlist_10)
for time in range(dayi,dayf,days):
print 'time:', time
tlabel = str(time)
while len(tlabel) < 3: tlabel = '0'+tlabel
#Temperature_CG_m_50_6e_9.csv
file0_50 = path+'Temperature_CG_'+label_50+'_'+str(time)+'.csv'
file0_25 = path+'Temperature_CG_'+label_25+'_'+str(time)+'.csv'
file0_10 = path+'Temperature_CG_'+label_10+'_'+str(time)+'.csv'
file1 = 'Temperature_CG_'+label+'_'+str(time)
file1_50 = 'Temperature_CG_'+label_50+'_'+str(time)
file1_25 = 'Temperature_CG_'+label_25+'_'+str(time)
file1_10 = 'Temperature_CG_'+label_10+'_'+str(time)
#
# xn_50 = 101
# yn_50 = 101
# xn_25 = 101
# yn_25 = 101
T_50 = lagrangian_stats.read_Scalar(file0_50,zn,xn_50,yn_50)
T_25 = lagrangian_stats.read_Scalar(file0_25,zn,xn_25,yn_25)
T_10 = lagrangian_stats.read_Scalar(file0_10,zn,xn_10,yn_10)
# xn_50 = 41
# yn_50 = 41
# xn_25 = 81
# yn_25 = 81
# T_50 = T_50[:,0:xn_50,0:yn_50]
# T_25 = T_25[:,0:xn_25,0:yn_25]
# Xlist_50 = np.linspace(0,2000,xn_50)
# Ylist_50 = np.linspace(0,2000,yn_50)
# Xlist_25 = np.linspace(0,2000,xn_25)
# Ylist_25 = np.linspace(0,2000,yn_25)
FT_50 = np.zeros((xn_50/1,yn_50))
FT_25 = np.zeros((xn_25/1,yn_25))
FT_10 = np.zeros((xn_10/1,yn_10))
#
for k in range(1):
for j in range(len(Ylist_50)):
tempfft = scipy.fftpack.fft(T_50[k,j,:],xn_50)
FT_50[:,j] = abs(tempfft)**2
w_50 = scipy.fftpack.fftfreq(xn_50, dx_50[1])
# w_50 = scipy.fftpack.fftshift(w_50)
FTp_50 = np.mean(FT_50,1)/xn_50
for j in range(len(Ylist_25)):
tempfft = scipy.fftpack.fft(T_25[k,j,:],xn_25)
FT_25[:,j] = abs(tempfft)**2
w_25 = scipy.fftpack.fftfreq(xn_25, dx_25[1])
# w_25 = scipy.fftpack.fftshift(w_25)
FTp_25 = np.mean(FT_25,1)/xn_25
for j in range(len(Ylist_10)):
tempfft = scipy.fftpack.fft(T_10[k,j,:],xn_10)
FT_10[:,j] = abs(tempfft)**2
w_10 = scipy.fftpack.fftfreq(xn_10, dx_10[1])
# w_10 = scipy.fftpack.fftshift(w_10)
FTp_10 = np.mean(FT_10,1)/xn_10
fig = plt.figure(figsize=(10,8))
p50, = plt.loglog(w_50[w_50>0], FTp_50[w_50>0],'b',linewidth=2)
p25, = plt.loglog(w_25[w_25>0], FTp_25[w_25>0],'r',linewidth=2)
p10, = plt.loglog(w_10[w_10>0], FTp_10[w_10>0],'k',linewidth=2)
plt.legend([p50,p25,p10],['$B50_m$','$B25_m$','$B10_m$'],fontsize=24,loc=3)
# pU, = plt.plot(w_50, FTp_50,'b',linewidth=2)
# pU, = plt.plot(w_25, FTp_25,'r',linewidth=2)
# plt.ylim(0,1)
# plt.plot([0.5*10**-3, 4*10**-3],[4*10**-3, 0.5*10**-3],'k',linewidth=1.5)
# plt.plot([0.5*10**-3, 4*10**-3],[3.*4*10**-3, 0.5*10**-3],'k',linewidth=1.5)
plt.plot([4*10**-3, 4*10**-2],[4*10**-1, 4*10**-(1+5/3.)],'k',linewidth=1.5)
plt.plot([4*10**-3, 4*10**-2],[4*10**-1, 4*10**-(1+3.)],'k',linewidth=1.5)
# plt.plot([4*10**-3, 4*10**-2],[4*10**-1, 4*10**-(1+1.)],'k',linewidth=1.5)
plt.text(5*10**-2, 4*10**-(1+5/3.), '-5/3',fontsize=24)
plt.text(5*10**-2, 4*10**-(1+3.), '-3',fontsize=24)
# plt.text(5*10**-2, 4*10**-(1+1.), '-1',fontsize=24)
# plt.text(0.3*10**-3, 3.*4*10**-3, '-3')
# plt.text(0.3*10**-3, 5./3.*4*10**-3, '-5/3')
plt.xscale('log')
# pU, = plt.loglog(w_10[w_10>0], FTp_10[w_10>0],'k.',linewidth=2)
plt.xlabel(r'k $[m^{-1}]$',fontsize=26)
plt.ylabel('Temperature PSD',fontsize=24)
# plt.xticks(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7),np.round(1/np.linspace(np.min(w[w>0]),np.max(w[w>0]),7)),fontsize=16)
#plt.xticks(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7),np.round(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7)*360000)/100,fontsize=16)
plt.yticks(fontsize=24)
# plt.xticks(fontsize=24)
plt.xticks([0.1,0.01,0.001,1/500.],[10**-1,10**-2,10**-3,1/500.],fontsize=24)
plt.xlim([1/2000.,1/10.])
plt.savefig('./plot/'+label+'/'+file1+'_'+str(Zlist[k])+'_spec.eps',bbox_inches='tight')
print './plot/'+label+'/'+file1+'_'+str(Zlist[k])+'_spec.eps'
plt.close()
#
# PDF
vals50,bins50 = np.histogram(T_50[k,:,:],50,(18.6,20.1),normed=True)
vals25,bins25 = np.histogram(T_25[k,:,:],50,(18.6,20.1),normed=True)
vals10,bins10 = np.histogram(T_10[k,:,:],50,(18.6,20.1),normed=True)
bins = np.linspace(18.6,19.8,50)
fig = plt.figure(figsize=(8,8))
ph50, = plt.plot(bins,vals50,'k--')
ph25, = plt.plot(bins,vals25,'k.-')
ph10, = plt.plot(bins,vals10,'k',linewidth=2)
plt.ylabel(r'PDF',fontsize=22)
plt.xlabel('Temperature $[^\circ C]$',fontsize=22)
# plt.xticks(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7),np.round(1/np.linspace(np.min(w[w>0]),np.max(w[w>0]),7)),fontsize=16)
#plt.xticks(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7),np.round(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7)*360000)/100,fontsize=16)
plt.yticks(fontsize=20)
plt.xticks(np.linspace(18.6,20.1,7),np.linspace(18.6,20.1,7),fontsize=20)
plt.tight_layout()
plt.legend([ph50,ph25,ph10],['$B50_m$','$B25_m$','$B10_m$'],loc=2,fontsize=20)
plt.savefig('./plot/'+label+'/'+file1+'_'+str(Zlist[k])+'_hist.eps')
print './plot/'+label+'/'+file1+'_'+str(Zlist[k])+'_hist.eps'
plt.close()
Tm = 18.6 #min(np.min(T_10[k,:,:]),np.min(T_25[k,:,:]),np.min(T_50[k,:,:]))
TM = 19.8 #max(np.max(T_10[k,:,:]),np.max(T_25[k,:,:]),np.max(T_50[k,:,:]))
# print Tm,TM
plt.contourf(Xlist_50/1000,Ylist_50/1000,T_50[k,:,:],np.linspace(Tm,TM,30),extend='both')
cb = plt.colorbar(ticks=np.linspace(Tm,TM,5))
cb.ax.tick_params(labelsize=22)
plt.xlabel('X [km]',fontsize=24)
plt.ylabel('Y [km]',fontsize=24)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.axes().set_aspect('equal')
plt.xlim(0,2)
plt.ylim(0,2)
#plt.xticks(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7),np.round(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7)*360000)/100,fontsize=16)
#plt.yticks(fontsize=16)
plt.savefig('./plot/'+label+'/'+file1_50+'_'+str(Zlist[k])+'.eps',bbox_inches='tight')
print './plot/'+label+'/'+file1_50+'_'+str(Zlist[k])+'.eps'
plt.close()
###
plt.contourf(Xlist_25/1000,Ylist_25/1000,T_25[k,:,:],np.linspace(Tm,TM,30),extend='both')
cb = plt.colorbar(ticks=np.linspace(Tm,TM,5))
cb.ax.tick_params(labelsize=22)
plt.xlabel('X [km]',fontsize=24)
plt.ylabel('Y [km]',fontsize=24)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.axes().set_aspect('equal')
plt.xlim(0,2)
plt.ylim(0,2)
#plt.xticks(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7),np.round(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7)*360000)/100,fontsize=16)
#plt.yticks(fontsize=16)
plt.savefig('./plot/'+label+'/'+file1_25+'_'+str(Zlist[k])+'.eps',bbox_inches='tight')
print './plot/'+label+'/'+file1_25+'_'+str(Zlist[k])+'.eps'
plt.close()
##
plt.contourf(Xlist_10/1000,Ylist_10/1000,T_10[k,:,:],np.linspace(Tm,TM,30),extend='both')
cb = plt.colorbar(ticks=np.linspace(Tm,TM,5))
cb.ax.tick_params(labelsize=22)
plt.xlabel('X [km]',fontsize=24)
plt.ylabel('Y [km]',fontsize=24)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.axes().set_aspect('equal')
plt.xlim(0,2)
plt.ylim(0,2)
#plt.xticks(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7),np.round(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7)*360000)/100,fontsize=16)
#plt.yticks(fontsize=16)
plt.savefig('./plot/'+label+'/'+file1_10+'_'+str(Zlist[k])+'.eps',bbox_inches='tight')
print './plot/'+label+'/'+file1_10+'_'+str(Zlist[k])+'.eps'
plt.close()
###
##
| gpl-2.0 |
trankmichael/scikit-learn | sklearn/neighbors/approximate.py | 128 | 22351 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
dankolbman/BCIM | src/post.py | 1 | 7125 | import glob
import os
import sys
import re
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import python.DataIO as DataIO
import python.graphics as graphics
import python.clusters as clusters
import python.counts as counts
# Format settings
from matplotlib import rc
font = {'size' : 32}
rc('font', **font)
rc('lines', **{'linewidth' : '4' } )
rc('axes', **{'labelsize' : '28', 'titlesize' : 32 } )
rc('axes', color_cycle=['#E82C2C', '#245BFF', 'c', 'm'])
rc('xtick', **{'labelsize' : '22' } )
rc('ytick', **{'labelsize' : '22', 'major.size' : '10', 'minor.size' : '10' } )
def averageMSD(path, out_path=None):
"""
Computes the average MSD of an experiment given an experiment's directory path
Parameters
----------
path
the path to an experiment's output directory
out_path : string, optional
the path to save the average msd output to
Default is 'avg_msd.dat' in the experiment's directory
"""
# Set out file to the experiment's directory if not specified
if( out_path == None ):
out_path = os.path.join(path, 'avg_msd.dat')
# Read in msd data from each file
msds = []
# Iterates the experiment's directory to find the msd data files
for root, dirs, files in os.walk(path):
for f in files:
if f == "msd.dat":
msd_file = os.path.join(root, f)
msds.append( np.loadtxt( msd_file ) )
# Average the msds
N = len(msds)
avg_msd = msds[0]/N
if len(msds) > 1:
for msd in msds[1:]:
avg_msd += msd/N
np.savetxt( out_path, avg_msd, header='# [ time msd ... ]')
return avg_msd
def param_str1(params):
"""
Creates a text box description of a system parameter dictionary
Parameters
----------
params : Dict
The parameter dictionary (usually dimensionless parameters)
Returns
-------
A string of the parameters formatted for a textbox summary
"""
pstr = ''
pstr += 'Particles: {0}\n'.format(params['npart'])
pstr += 'Packing Frac: {0}\n'.format(params['phi'])
pstr += 'Repulsion: {0}\n'.format(params['rep'])
pstr += 'Adhesion: {0}\n'.format(params['adh'])
pstr += 'Propulsion: {0}\n'.format(params['prop'])
return pstr
def param_str2(params):
pstr = ''
pstr += 'Contact: {0}\n'.format(params['contact'])
pstr += 'Time unit: {0}\n'.format(params['utime'])
pstr += 'pretrad: {0}\n'.format(params['pretrad'])
pstr += 'prerotd: {0}\n'.format(params['prerotd'])
return pstr
# Do all the post processing
def main(args):
"""
Does all post processing for an experiment
Computes the average MSD from msd files in experiment directory
Then plots the average MSD on log-log
Reads the parameter file and puts a textbox under the MSD with the experiment
parameters.
Parameters
----------
path
a path of an experiment directory
"""
path = args[1]
# Check for that the experiment exists
if not os.path.exists(path):
raise IOError('The specified experiment path does not exist')
elif not os.path.exists(os.path.join(path, 'param_dim.dat')):
raise IOError('There is no dimensionless parameter file in the specified \
directory')
# Compute average msd
avg_msd = averageMSD(path)
# 2 X 3 grid
gs = gridspec.GridSpec(5,2)
# Read parameters
params = dict()
for f in os.listdir(path):
if f == 'param_dim.dat':
params = DataIO.read_params(os.path.join(path, f))
break
if False:
fig = plt.figure(dpi=72, figsize=( 12,3))
gs = gridspec.GridSpec(1,4)
ax = plt.subplot(gs[0], projection='3d')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
parts = DataIO.read_parts(os.path.join(path, 'trial1/parts.dat'), 99)
graphics.plot_config(parts, params)
ax = plt.subplot(gs[1], projection='3d')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
parts = DataIO.read_parts(os.path.join(path, 'trial1/parts.dat'), 80)
graphics.plot_config(parts, params)
ax = plt.subplot(gs[2], projection='3d')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
parts = DataIO.read_parts(os.path.join(path, 'trial1/parts.dat'), 70)
graphics.plot_config(parts, params)
ax = plt.subplot(gs[3], projection='3d')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
parts = DataIO.read_parts(os.path.join(path, 'trial1/parts.dat'), 1)
graphics.plot_config(parts, params)
#plt.suptitle('$\phi=0.40$')
#plt.tight_layout()
plt.savefig('configs.png')
plt.show()
exit()
gs = gridspec.GridSpec(5,2)
fig = plt.figure(dpi=72, figsize=( 8,6))
ax = plt.subplot(gs[0:4, :])
# MSD plot
graphics.plot_msd(avg_msd)
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
# Parameters
ax = plt.subplot(gs[-1,0:1])
plt.axis('off')
# Plot parameter in textbox below MSD plot
fig.text(0.1, 0.0, param_str1(params), fontsize=18)
fig.text(0.4, 0.0, param_str2(params), fontsize=18)
# Save
plt.savefig(os.path.join(path, 'overview.png'))
plt.show()
# Final conf plot
parts = DataIO.read_parts(os.path.join(path, 'trial1/parts.dat'))
ax = plt.subplot(gs[:], projection='3d')
plt.title('Final System Configuration')
graphics.plot_config(parts, params)
plt.savefig(os.path.join(path, 'configuration.png'))
plt.show()
# Cluster sizes
size_hist = clusters.size_hist(parts, params, eps=1.1)
graphics.plot_cluster_hist( size_hist, params )
plt.tight_layout()
plt.savefig(os.path.join(path, 'clusters.png'))
plt.show()
# Cell counts
t, count = counts.counts( os.path.join(path, 'trial1/parts.dat'), params )
graphics.plot_counts(t, count, params)
plt.show()
# Species cluster sizes
if False:
sp_hist = clusters.specie_size(parts, params, 1.1)
f = plt.figure( figsize=( 12,6 ) )
f.text(0.5, 0.04, 'Cluster Size (Cells)', ha='center', va='center')
ax = f.add_subplot( 1, 2, 1)
graphics.plot_cluster_hist( sp_hist[0], params, color='#E82C2C' )
ax.set_title('Healthy')
ax.set_xlabel('')
ax = f.add_subplot( 1, 2, 2)
graphics.plot_cluster_hist( sp_hist[1], params, color='#245BFF' )
ax.set_title('Cancerous')
ax.set_xlabel('')
ax.set_ylabel('')
plt.suptitle('Contact Distance, $\epsilon=0.1\sigma$')
plt.tight_layout()
plt.savefig(os.path.join(path, 'specie_clusters.png'))
plt.show()
vel_hist = clusters.vel_hist( parts, params, eps=1.1 )
graphics.plot_cluster_hist( vel_hist, params )
plt.title('Cluster Speed')
plt.ylabel('Mean Speed')
plt.tight_layout()
plt.savefig(os.path.join(path, 'cluster_speeds.png'))
plt.show()
#t, avg_size = clusters.cluster_time( os.path.join(path, 'trial1/parts.dat'), params )
#print(os.path.join( path, 'cluster_sizes.txt'))
#np.savetxt( os.path.join( path, 'cluster_sizes.txt'), np.column_stack( (t, avg_size) ))
#plt.plot(t, avg_size)
#plt.show()
if __name__ == "__main__":
if(len(sys.argv) < 2):
print("Usage: python post.py experiment_dir/")
else:
main(sys.argv)
| mit |
sugartom/tensorflow-alien | tensorflow/examples/learn/text_classification.py | 39 | 5106 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import encoders
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def bag_of_words_model(features, target):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(target, 15, 1, 0)
features = encoders.bow_encoder(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def rnn_model(features, target):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(target, 15, 1, 0)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_transform_train = vocab_processor.fit_transform(x_train)
x_transform_test = vocab_processor.transform(x_test)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
model_fn = bag_of_words_model
classifier = learn.Estimator(model_fn=model_fn)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
aflaxman/scikit-learn | sklearn/metrics/regression.py | 47 | 19967 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Karan Desai <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
from ..externals.six import string_types
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"mean_squared_log_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ('raw_values', 'uniform_average',
'variance_weighted')
if isinstance(multioutput, string_types):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}".format(
allowed_multioutput_str,
multioutput))
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_log_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared logarithmic error regression loss
Read more in the :ref:`User Guide <mean_squared_log_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average'] \
or array-like of shape = (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors when the input is of multioutput
format.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_log_error
>>> y_true = [3, 5, 2.5, 7]
>>> y_pred = [2.5, 5, 4, 8]
>>> mean_squared_log_error(y_true, y_pred) # doctest: +ELLIPSIS
0.039...
>>> y_true = [[0.5, 1], [1, 2], [7, 6]]
>>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]]
>>> mean_squared_log_error(y_true, y_pred) # doctest: +ELLIPSIS
0.044...
>>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.004..., 0.083...])
>>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.060...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if not (y_true >= 0).all() and not (y_pred >= 0).all():
raise ValueError("Mean Squared Logarithmic Error cannot be used when "
"targets contain negative values.")
return mean_squared_error(np.log(y_true + 1), np.log(y_pred + 1),
sample_weight, multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred, sample_weight=None,
multioutput="uniform_average"):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default is "uniform_average".
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
.. versionchanged:: 0.19
Default value of multioutput is 'uniform_average'.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted')
... # doctest: +ELLIPSIS
0.938...
>>> y_true = [1,2,3]
>>> y_pred = [1,2,3]
>>> r2_score(y_true, y_pred)
1.0
>>> y_true = [1,2,3]
>>> y_pred = [2,2,2]
>>> r2_score(y_true, y_pred)
0.0
>>> y_true = [1,2,3]
>>> y_pred = [3,2,1]
>>> r2_score(y_true, y_pred)
-3.0
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
capturePointer/vigra | vigranumpy/examples/grid_graph_shortestpath.py | 8 | 3978 | import vigra
import vigra.graphs as vigraph
import pylab
import numpy
np=numpy
import sys
import matplotlib
import pylab as plt
import math
from matplotlib.widgets import Slider, Button, RadioButtons
def makeWeights(gamma):
global hessian,gradmag,gridGraph
print "hessian",hessian.min(),hessian.max()
print "raw ",raw.min(),raw.max()
wImg= numpy.exp((gradmag**0.5)*gamma*-1.0)#**0.5
wImg = numpy.array(wImg).astype(numpy.float32)
w=vigra.graphs.implicitMeanEdgeMap(gridGraph,wImg)
return w
def makeVisuImage(path,img):
coords = (path[:,0],path[:,1])
visuimg =img.copy()
iR=visuimg[:,:,0]
iG=visuimg[:,:,1]
iB=visuimg[:,:,2]
iR[coords]=255
iG[coords]=0
iB[coords]=0
visuimg-=visuimg.min()
visuimg/=visuimg.max()
return visuimg
f = '100075.jpg'
f = '69015.jpg'
#f = "/media/tbeier/GSP1RMCPRFR/iso.03530.png"
img = vigra.impex.readImage(f)
print img.shape
if(img.shape[2]==1):
img = numpy.concatenate([img]*3,axis=2)
imgLab = img
imgLab = vigra.taggedView(imgLab,'xyc')
else:
imgLab = vigra.colors.transform_RGB2Lab(img)
sigma = 1.0
imgLab-=imgLab.min()
imgLab/=imgLab.max()
imgLab*=255
img-=img.min()
img/=img.max()
img*=255
print imgLab.shape
print "interpolate image"
imgLabSmall = imgLab
# make a few edge weights
gradmag = numpy.squeeze(vigra.filters.gaussianGradientMagnitude(imgLabSmall,sigma))
hessian = numpy.squeeze(vigra.filters.hessianOfGaussianEigenvalues(imgLabSmall[:,:,0],sigma))[:,:,0]
hessian-=hessian.min()
raw = 256-imgLabSmall[:,:,0].copy()
gridGraph = vigraph.gridGraph(imgLab.shape[:2],False)
weights = makeWeights(3.0)
pathFinder = vigraph.ShortestPathPathDijkstra(gridGraph)
visuimg =img.copy()
ax = plt.gca()
fig = plt.gcf()
visuimg-=visuimg.min()
visuimg/=visuimg.max()
implot = ax.imshow(numpy.swapaxes(visuimg,0,1),cmap='gray')
clickList=[]
frozen = False
axslider = plt.axes([0.0, 0.00, 0.4, 0.075])
axfreeze = plt.axes([0.6, 0.00, 0.1, 0.075])
axunfreeze = plt.axes([0.8, 0.00, 0.1, 0.075])
bfreeze = Button(axfreeze, 'freeze')
bunfreeze = Button(axunfreeze, 'unfrease and clear')
sgamma = Slider(axslider, 'gamma', 0.01, 5.0, valinit=1.0)
def onclick(event):
global clickList
global weights
global img
if event.xdata != None and event.ydata != None:
xRaw,yRaw = event.xdata,event.ydata
if not frozen and xRaw >=0.0 and yRaw>=0.0 and xRaw<img.shape[0] and yRaw<img.shape[1]:
x,y = long(math.floor(event.xdata)),long(math.floor(event.ydata))
clickList.append((x,y))
if len(clickList)==2:
source = gridGraph.coordinateToNode(clickList[0])
target = gridGraph.coordinateToNode(clickList[1])
weights = makeWeights(sgamma.val)
#path = pathFinder.run(weights, source,target).path(pathType='coordinates')
path = pathFinder.run(weights, source).path(pathType='coordinates',target=target)
visuimg = makeVisuImage(path,img)
implot.set_data(numpy.swapaxes(visuimg,0,1))
plt.draw()
def freeze(event):
global frozen
frozen=True
def unfreeze(event):
global frozen,clickList
frozen=False
clickList = []
def onslide(event):
global img,gradmag,weights,clickList,sgamma
weights = makeWeights(sgamma.val)
print "onslide",clickList
if len(clickList)>=2:
print "we have path"
source = gridGraph.coordinateToNode(clickList[0])
target = gridGraph.coordinateToNode(clickList[1])
path = pathFinder.run(weights, source,target).path(pathType='coordinates')
visuimg = makeVisuImage(path,img)
implot.set_data(numpy.swapaxes(visuimg,0,1))
plt.draw()
bfreeze.on_clicked(freeze)
bunfreeze.on_clicked(unfreeze)
sgamma.on_changed(onslide)
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
| mit |
trungnt13/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
anirudhjayaraman/scikit-learn | sklearn/utils/tests/test_extmath.py | 70 | 16531 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
SuperJohn/scikit-class | grid_search.py | 6 | 1243 | import pandas as pd
import numpy as np
df = pd.read_csv('tweets.csv')
target = df['is_there_an_emotion_directed_at_a_brand_or_product']
text = df['tweet_text']
fixed_text = text[pd.notnull(text)]
fixed_target = target[pd.notnull(text)]
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
p = Pipeline(steps=[('counts', CountVectorizer()),
('feature_selection', SelectKBest(chi2)),
('multinomialnb', MultinomialNB())])
from sklearn.grid_search import GridSearchCV
parameters = {
'counts__max_df': (0.5, 0.75, 1.0),
'counts__min_df': (1, 2, 3),
'counts__ngram_range': ((1,1), (1,2)),
# 'feature_selection__k': (1000, 10000, 100000)
}
grid_search = GridSearchCV(p, parameters, n_jobs=1, verbose=1, cv=10)
grid_search.fit(fixed_text, fixed_target)
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| gpl-2.0 |
radiasoft/radtrack | experimental/hermite/testHermite02.py | 1 | 6919 | #
# Test executable #2 to exercise the Gauss-Hermite class
# Here, we fit a Gauss-Hermite expansion to an arbitrary profile.
# The SciPy least squares method is used.
#
# Copyright (c) 2013 RadiaBeam Technologies. All rights reserved
#
# python imports
import math
# SciPy imports
import numpy as np
import matplotlib.pyplot as plt
# RadiaBeam imports
from radtrack.fields import RbGaussHermiteMN
# SciPy imports
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
# ---------------------------------------------------------
# Make sure the residual() method has access to necessary
# 'global' data:
global mMax, nMax, numFuncCalls, hermiteSeries
# Specify the central laser wavelength
lambda0 = 10.e-06
# Need a place holder for the waist size
w0 = 10.*lambda0
# Define the maximum order(s) of the Hermite expansion
mMax = 0 # horizontal
nMax = 0 # vertical
# Create an instance of the Hermite expansion class
hermiteSeries = RbGaussHermiteMN.RbGaussHermiteMN(lambda0,w0,w0,0.)
# Specify the desired grid size
numX = 50
numY = 50
nCells = numX * numY
# load up the x,y locations of the mesh
xMin = -4.*w0
xMax = 4.*w0
yMin = xMin
yMax = xMax
xArr = np.zeros(numX)
for iLoop in range(numX):
xArr[iLoop] = xMin + iLoop * (xMax-xMin) / (numX-1)
yArr = np.zeros(numY)
for jLoop in range(numY):
yArr[jLoop] = yMin + jLoop * (yMax-yMin) / (numY-1)
xGrid = np.zeros((numX, numY))
yGrid = np.zeros((numX, numY))
for iLoop in range(numX):
for jLoop in range(numY):
xGrid[iLoop,jLoop] = xMin + iLoop * (xMax-xMin) / (numX-1)
yGrid[iLoop,jLoop] = yMin + jLoop * (yMax-yMin) / (numY-1)
# Create transverse field profile (#1 simple Gaussian)
ExGrid = np.zeros((numX, numY))
exMax = 1.0e+09 # this gets scaled out before plotting/fitting
phi1 = math.pi/17.5
xs1 = 1.07 * w0
ys1 = -0.98 * w0
waistx = 0.9 * w0
waisty = 1.8 * w0
maxValue = 0.
for iLoop in range(numX):
for jLoop in range(numY):
xArg = (xArr[iLoop]-xs1)*math.cos(phi1) + (yArr[jLoop]-ys1)*math.sin(phi1)
yArg = -(xArr[iLoop]-xs1)*math.sin(phi1) + (yArr[jLoop]-ys1)*math.cos(phi1)
ExGrid[iLoop, jLoop] = exMax*math.exp(-(xArg/waistx)**2)*math.exp(-(yArg/waisty)**2)
maxValue = max(ExGrid[iLoop, jLoop], maxValue)
# Divide out the maximum value
ExGrid /= maxValue
# Calculate residuals for the least squares analysis
# params - array of fitting parameters
numFuncCalls = 0
def residuals(params, e, x, y):
global mMax, nMax, numFuncCalls, hermiteSeries
hermiteSeries.setWaistX(params[0])
hermiteSeries.setWaistY(params[1])
hermiteSeries.setWRotAngle(params[2])
hermiteSeries.setXShift(params[3])
hermiteSeries.setYShift(params[4])
hermiteSeries.setMCoef(params[5:mMax+6])
hermiteSeries.setNCoef(params[mMax+6:mMax+nMax+7])
# let the user know what's going on if many function calls are required
if numFuncCalls == 0:
print ' '
print 'Number of calls to method residual():'
numFuncCalls += 1
if 10*int(numFuncCalls/10.) == numFuncCalls:
print ' ', numFuncCalls
return e-hermiteSeries.evaluateEx(x,y,0.,0.)
# plot the transverse field profile
ncLevels = 12
vLevels = [0.001, 0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95, 1.05]
plt.figure(1)
cs1 = plt.contourf(xGrid, yGrid, ExGrid, vLevels)
plt.colorbar(cs1)
plt.axis([xMin, xMax, yMin, yMax])
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.title('x-section #1: Gaussian w/ slight asymmetry & rotation')
# choose initial guesses for all fitting parameters
# also, specify the scale of variations for each
paramGuess = np.zeros(mMax+nMax+7)
paramGuess[0] = 1.2*w0 # horizontal waist
paramGuess[1] = 0.9*w0 # vertical waist
paramGuess[2] = 0.0 # rotation angle
paramGuess[3] = 0.0 # horizontal shift
paramGuess[4] = 0.0 # vertical shift
paramGuess[5] = 1.0 # 0th horiz. coeff
for iLoop in range(6,mMax+6):
paramGuess[iLoop] = 0.0 # other horiz. coeff's
paramGuess[mMax+6] = 1.0 # 0th vertical coeff
for iLoop in range(mMax+7,mMax+nMax+7):
paramGuess[iLoop] = 0.0 # other vertical coeff's
# invoke the least squares algorithm
result = leastsq(residuals, paramGuess, \
args=(np.reshape(ExGrid,nCells), \
np.reshape(xGrid,nCells), \
np.reshape(yGrid,nCells)), \
full_output=True, ftol=1e-6, \
maxfev=200)
parFit = result[0]
nEvals = result[2]['nfev']
resVals = result[2]['fvec']
message = result[3]
iError = result[4]
print ' '
print ' iError = ', iError
print ' message = ', message
print ' nEvals = ', nEvals
print ' resVals = ', resVals
# load the results into named variables (for clarity)
wxFit = parFit[0]
wyFit = parFit[1]
tmpPhi = parFit[2]
phiFit = tmpPhi - 2.*math.pi*int(0.5*tmpPhi/math.pi)
if phiFit > 2.*math.pi: phiFit -= 2.*math.pi
if phiFit < 0.: phiFit += 2.*math.pi
xsFit = parFit[3]
ysFit = parFit[4]
mCFit = np.zeros(mMax+1)
mCFit[0:mMax+1] = parFit[5:mMax+6]
nCFit = np.zeros(nMax+1)
nCFit[0:nMax+1] = parFit[mMax+6:mMax+nMax+7]
# check the results
print ' '
print 'The least squares minimimization has completed:'
print ' wx = ', waistx, '; ', wxFit
print ' wy = ', waisty, '; ', wyFit
print ' phi = ', phi1, '; ', phiFit
print ' xS = ', xs1, '; ', xsFit
print ' yS = ', ys1, '; ', ysFit
print ' C0x * C0y = 1.0; ', mCFit[0]*nCFit[0]
# print ' C1x = 0.0 ; ', mCFit[1]
# print ' C2x = 0.0 ; ', mCFit[2]
# print ' C3x = 0.0 ; ', mCFit[3]
# print ' C4x = 0.0 ; ', mCFit[4]
# print ' C1y = 0.0 ; ', nCFit[1]
# print ' C2y = 0.0 ; ', nCFit[2]
# print ' C3y = 0.0 ; ', nCFit[3]
# print ' C4y = 0.0 ; ', nCFit[4]
# load up the fitted electric field at all grid points
hermiteSeries.setWaistX(wxFit)
hermiteSeries.setWaistY(wyFit)
hermiteSeries.setWRotAngle(phiFit)
hermiteSeries.setXShift(xsFit)
hermiteSeries.setYShift(ysFit)
hermiteSeries.setMCoef(mCFit)
hermiteSeries.setNCoef(nCFit)
ExFit = np.reshape(hermiteSeries.evaluateEx(
np.reshape(xGrid,nCells), \
np.reshape(yGrid,nCells), 0., 0.), \
(numX, numY))
# plot the fitted transverse field profile
plt.figure(2)
cs2 = plt.contourf(xGrid, yGrid, ExFit, vLevels)
plt.colorbar(cs2)
plt.axis([xMin, xMax, yMin, yMax])
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.title('x-section #1: Result of the least squares fit')
# plot the transverse profile of the difference
plt.figure(3)
cs3 = plt.contourf(xGrid, yGrid, ExFit-ExGrid, ncLevels)
plt.colorbar(cs3)
plt.axis([xMin, xMax, yMin, yMax])
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.title('x-section #1: Absolute differences in Ex')
plt.show()
| apache-2.0 |
ritviksahajpal/Py6S | Py6S/SixSHelpers/all_angles.py | 1 | 13499 | # This file is part of Py6S.
#
# Copyright 2012 Robin Wilson and contributors listed in the CONTRIBUTORS file.
#
# Py6S is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Py6S is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Py6S. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from matplotlib.pyplot import *
import itertools
from multiprocessing.dummy import Pool
import copy
class Angles:
@classmethod
def run360(cls, s, solar_or_view, na=36, nz=10, output_name=None, n=None):
"""Runs Py6S for lots of angles to produce a polar contour plot.
The calls to 6S for each angle will be run in parallel, making this function far faster than simply
running a for loop over all of the angles.
Arguments:
* ``s`` -- A :class:`.SixS` instance configured with all of the parameters you want to run the simulation with
* ``solar_or_view`` -- Set to ``'solar'`` if you want to iterate over the solar zenith/azimuth angles or ``'view'`` if you want to iterate over the view zenith/azimuth angles
* ``output_name`` -- (Optional) The name of the output from the 6S simulation to plot. This should be a string containing exactly what you would put after ``s.outputs`` to print the output. For example `pixel_reflectance`.
* ``na`` -- (Optional) The number of azimuth angles to iterate over to generate the data for the plot (defaults to 36, giving data every 10 degrees)
* ``nz`` -- (Optional) The number of zenith angles to iterate over to generate the data for the plot (defaults to 10, giving data every 10 degrees)
* ``n`` -- (Optional) The number of threads to run in parallel. This defaults to the number of CPU cores in your system, and is unlikely to need changing.
For example::
s = SixS()
s.ground_reflectance = GroundReflectance.HomogeneousWalthall(0.48, 0.50, 2.95, 0.6)
s.geometry.solar_z = 30
s.geometry.solar_a = 0
data = SixSHelpers.Angles.run360(s, 'view', output_name='pixel_reflectance')
"""
results = []
azimuths = np.linspace(0, 360, na)
zeniths = np.linspace(0, 89, nz)
def f(args):
azimuth, zenith = args
s.outputs = None
a = copy.deepcopy(s)
if solar_or_view == 'view':
a.geometry.view_a = azimuth
a.geometry.view_z = zenith
elif solar_or_view == 'solar':
a.geometry.solar_a = azimuth
a.geometry.solar_z = zenith
else:
raise ParameterException("all_angles", "You must choose to vary either the solar or view angle.")
a.run()
if output_name is None:
return a.outputs
else:
return getattr(a.outputs, output_name)
# Run the map
if n is None:
pool = Pool()
else:
pool = Pool(n)
print "Running for many angles - this may take a long time"
results = pool.map(f, itertools.product(azimuths, zeniths))
results = np.array(results)
return (results, azimuths, zeniths, s.geometry.solar_a, s.geometry.solar_z)
@classmethod
def plot360(cls, data, output_name=None, show_sun=True, colorbarlabel=None):
"""Plot the data returned from :meth:`run360` as a polar contour plot, selecting an output if required.
Arguments:
* ``data`` -- The return value from :meth:`run360`
* ``output_name`` -- (Optional) The output name to extract (eg. "pixel_reflectance") if the given data is provided as instances of the Outputs class
* ``show_sun`` -- (Optional) Whether to show the location of the sun on the resulting polar plot.
* ``colorbarlabel`` -- (Optional) The label to use on the color bar shown with the plot
"""
results, azimuths, zeniths, sa, sz = data
if not isinstance(results[0], float):
# The results are not floats, so a float must be extracted from the output
if output_name is None:
raise ParameterException("output_name", "You must specify an output name when plotting data which is given as Outputs instances")
results = cls.extract_output(results, output_name)
fig, ax, cax = cls.plot_polar_contour(results, azimuths, zeniths, colorbarlabel=colorbarlabel)
if show_sun:
ax.autoscale(False)
ax.plot(np.radians(sa), sz, '*', markersize=20, markerfacecolor='yellow', markeredgecolor='red')
show()
return fig, ax
@classmethod
def run_and_plot_360(cls, s, solar_or_view, output_name, show_sun=True, na=36, nz=10, colorbarlabel=None):
"""Runs Py6S for lots of angles to produce a polar contour plot.
Arguments:
* ``s`` -- A :class:`.SixS` instance configured with all of the parameters you want to run the simulation with
* ``solar_or_view`` -- Set to ``'solar'`` if you want to iterate over the solar zenith/azimuth angles or ``'view'`` if you want to iterate over the view zenith/azimuth angles
* ``output_name`` -- The name of the output from SixS to plot. This should be a string containing exactly what you would put after ``s.outputs`` to print the output. For example `pixel_reflectance`.
* ``show_sun`` -- (Optional) Whether to place a marker showing the location of the sun on the contour plot (defaults to True, has no effect when ``solar_or_view`` set to ``'solar'``.)
* ``na`` -- (Optional) The number of azimuth angles to iterate over to generate the data for the plot (defaults to 36, giving data every 10 degrees)
* ``nz`` -- (Optional) The number of zenith angles to iterate over to generate the data for the plot (defaults to 10, giving data every 10 degrees)
* ``colorbarlabel`` -- (Optional) The label to use on the color bar shown with the plot
For example::
s = SixS()
s.ground_reflectance = GroundReflectance.HomogeneousWalthall(0.48, 0.50, 2.95, 0.6)
s.geometry.solar_z = 30
s.geometry.solar_a = 0
SixSHelpers.Angles.run_and_plot_360(s, 'view', 'pixel_reflectance')
"""
if solar_or_view == 'solar':
show_sun = False
res = cls.run360(s, solar_or_view, na, nz)
plot_res = cls.plot360(res, output_name, show_sun, colorbarlabel=colorbarlabel)
return plot_res
@classmethod
def extract_output(cls, results, output_name):
"""Extracts data for one particular SixS output from a list of SixS.Outputs instances.
Basically just a wrapper around a list comprehension.
Arguments:
* ``results`` -- A list of :class:`.SixS.Outputs` instances
* ``output_name`` -- The name of the output to extract. This should be a string containing whatever is put after the `s.outputs` when printing the output, for example `'pixel_reflectance'`.
"""
results_output = [getattr(r, output_name) for r in results]
return results_output
@classmethod
def plot_polar_contour(cls, values, azimuths, zeniths, filled=True, colorbarlabel=""):
"""Plot a polar contour plot, with 0 degrees at the North.
Arguments:
* ``values`` -- A list (or other iterable - eg. a NumPy array) of the values to plot on the contour plot (the `z` values)
* ``azimuths`` -- A list of azimuths (in degrees)
* ``zeniths`` -- A list of zeniths (that is, radii)
* ``filled`` -- (Optional) Whether to plot a filled contour plot, or just the contours (defaults to filled)
* ``yaxislabel`` -- (Optional) The label to use for the colorbar
* ``colorbarlabel`` -- (Optional) The label to use on the color bar shown with the plot
The shapes of these lists are important, and are designed for a particular use case (but should be more generally useful).
The values list should be `len(azimuths) * len(zeniths)` long with data for the first azimuth for all the zeniths, then
the second azimuth for all the zeniths etc.
This is designed to work nicely with data that is produced using a loop as follows::
values = []
for azimuth in azimuths:
for zenith in zeniths:
# Do something and get a result
values.append(result)
After that code the azimuths, zeniths and values lists will be ready to be passed into this function.
"""
theta = np.radians(azimuths)
zeniths = np.array(zeniths)
values = np.array(values)
values = values.reshape(len(azimuths), len(zeniths))
r, theta = np.meshgrid(zeniths, np.radians(azimuths))
fig, ax = subplots(subplot_kw=dict(projection='polar'))
ax.set_theta_zero_location("N")
ax.set_theta_direction(-1)
if filled:
cax = ax.contourf(theta, r, values, 30)
else:
cax = ax.contour(theta, r, values, 30)
cb = fig.colorbar(cax)
cb.set_label(colorbarlabel)
return fig, ax, cax
@classmethod
def run_principal_plane(cls, s, output_name=None, n=None):
"""Runs the given 6S simulation to get the outputs for the solar principal plane.
This function runs the simulation for all zenith angles in the azimuthal line of the sun. For example,
if the solar azimuth is 90 degrees, this function will run simulations for::
Azimuth Zenith
90 85
90 80
90 75
90 70
90 65
90 60
90 55
... ..
90 0
270 5
270 10
270 15
... ..
270 80
270 85
The calls to 6S for each angle will be run in parallel, making this function far faster than simply
running a for loop over each angle.
Arguments:
* ``s`` -- A :class:`.SixS` instance configured with all of the parameters you want to run the simulation with
* ``output_name`` -- (Optional) The output name to extract (eg. "pixel_reflectance") if the given data is provided as instances of the Outputs class
* ``n`` -- (Optional) The number of threads to run in parallel. This defaults to the number of CPU cores in your system, and is unlikely to need changing.
Return values:
A tuple containing zenith angles and the corresponding values or Outputs instances (depending on the arguments given).
The zenith angles returned have been modified so that the zenith angles on the 'sun-side' are positive, and those
on the other side (ie. past the vertical) are negative, for ease of plotting.
"""
# Get the solar azimuth and zenith angles from the SixS instance
sa = s.geometry.solar_a
# Compute the angles in the principal plane
# Get the solar azimuth on the opposite side for the other half of the principal plane
opp_sa = (sa + 180) % 360
# Calculate the first side (the solar zenith angle side)
first_side_z = np.arange(85, -5, -5)
first_side_a = np.repeat(sa, len(first_side_z))
# Calculate the other side
temp = first_side_z[:-1]
second_side_z = temp[::-1] # Reverse array
second_side_a = np.repeat(opp_sa, len(second_side_z))
# Join the two sides together
all_zeniths = np.hstack((first_side_z, second_side_z))
all_zeniths_for_return = np.hstack((first_side_z, -1 * second_side_z))
all_azimuths = np.hstack((first_side_a, second_side_a))
def f(arg):
zenith, azimuth = arg
s.outputs = None
a = copy.deepcopy(s)
a.geometry.view_z = zenith
a.geometry.view_a = azimuth
a.run()
if output_name is None:
return a.outputs
else:
return getattr(a.outputs, output_name)
# Run the map
if n is None:
pool = Pool()
else:
pool = Pool(n)
print "Running for many angles - this may take a long time"
results = pool.map(f, zip(all_zeniths, all_azimuths))
results = np.array(results)
results = np.array(results)
return all_zeniths_for_return, results
def plot_principal_plane(zeniths, values, y_axis_label):
"""Plot the results from a principal plane simulation (eg. a run of :meth:`.run_principal_plane`).
Arguments:
* ``zeniths`` -- A list of view zenith angles in degrees
* ``values`` -- A list of simulated values for each of these angles
* ``y_axis_label`` -- A string to use as the label for the y axis
"""
plot(zeniths, values)
xlabel("View zenith angle (degrees)")
ylabel(y_axis_label)
show()
| lgpl-3.0 |
wogsland/QSTK | build/lib.linux-x86_64-2.7/QSTK/qstkfeat/classes.py | 8 | 1658 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Nov 7, 2011
@author: John Cornwell
@contact: [email protected]
@summary: File containing various classification functions
'''
# 3rd Party Imports
import pandas as pand
import numpy as np
def class_fut_ret( d_data, i_lookforward=21, s_rel=None, b_use_open=False ):
'''
@summary: Calculate classification, uses future returns
@param d_data: Dictionary of data to use
@param i_lookforward: Number of days to look in the future
@param s_rel: Stock symbol that this should be relative to, ususally $SPX.
@param b_use_open: If True, stock will be purchased at T+1 open, sold at
T+i_lookforward close
@return: DataFrame containing values
'''
if b_use_open:
df_val = d_data['open'].copy()
else:
df_val = d_data['close'].copy()
na_val = df_val.values
if b_use_open:
na_val[:-(i_lookforward + 1), :] = ((na_val[i_lookforward + 1:, :] -
na_val[1:-(i_lookforward), :]) /
na_val[1:-(i_lookforward), :])
na_val[-(i_lookforward+1):, :] = np.nan
else:
na_val[:-i_lookforward, :] = ((na_val[i_lookforward:, :] -
na_val[:-i_lookforward, :]) /
na_val[:-i_lookforward, :])
na_val[-i_lookforward:, :] = np.nan
return df_val
if __name__ == '__main__':
pass
| bsd-3-clause |
meduz/scikit-learn | examples/linear_model/plot_ransac.py | 73 | 1859 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
lw = 2
plt.scatter(X[inlier_mask], y[inlier_mask], color='yellowgreen', marker='.',
label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask], color='gold', marker='.',
label='Outliers')
plt.plot(line_X, line_y, color='navy', linestyle='-', linewidth=lw,
label='Linear regressor')
plt.plot(line_X, line_y_ransac, color='cornflowerblue', linestyle='-',
linewidth=lw, label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
nagyistoce/kaggle-galaxies | try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_pysexgen1_dup.py | 7 | 17744 | import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_pysexgen1_dup.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_pysexgen1_dup.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes, processor_class=ra.LoadAndProcessPysexGen1CenteringRescaling)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexGen1CenteringRescaling)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexGen1CenteringRescaling)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexGen1CenteringRescaling)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
layers.dump_params(l6, e=e)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
| bsd-3-clause |
liyu1990/sklearn | examples/ensemble/plot_gradient_boosting_oob.py | 50 | 4764 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv.split(X_train, y_train):
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
matbra/bokeh | examples/interactions/interactive_bubble/data.py | 49 | 1265 | import numpy as np
from bokeh.palettes import Spectral6
def process_data():
from bokeh.sampledata.gapminder import fertility, life_expectancy, population, regions
# Make the column names ints not strings for handling
columns = list(fertility.columns)
years = list(range(int(columns[0]), int(columns[-1])))
rename_dict = dict(zip(columns, years))
fertility = fertility.rename(columns=rename_dict)
life_expectancy = life_expectancy.rename(columns=rename_dict)
population = population.rename(columns=rename_dict)
regions = regions.rename(columns=rename_dict)
# Turn population into bubble sizes. Use min_size and factor to tweak.
scale_factor = 200
population_size = np.sqrt(population / np.pi) / scale_factor
min_size = 3
population_size = population_size.where(population_size >= min_size).fillna(min_size)
# Use pandas categories and categorize & color the regions
regions.Group = regions.Group.astype('category')
regions_list = list(regions.Group.cat.categories)
def get_color(r):
return Spectral6[regions_list.index(r.Group)]
regions['region_color'] = regions.apply(get_color, axis=1)
return fertility, life_expectancy, population_size, regions, years, regions_list
| bsd-3-clause |
harterj/moose | modules/tensor_mechanics/test/tests/capped_mohr_coulomb/small_deform_hard_21.py | 12 | 1567 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(ini, res, ini_x, res_x):
lo2 = 0.5 * (res_x - ini_x)
alpha = (ini - res) / 4.0 / lo2**3
beta = -3.0 * alpha * lo2**2
data = [ini_x + i*(res_x - ini_x)/100 for i in range(100)]
data = [(x, alpha * (x - ini_x - lo2)**3 + beta * (x - ini_x - lo2) + (ini + res) / 2.0) for x in data]
return zip(*data)
def moose(fn):
sinphi = np.sin(30.0 * np.pi / 180.0)
cosphi = np.cos(30.0 * np.pi / 180.0)
f = open(fn)
data = [map(float, line.strip().split(",")) for line in f.readlines()[4:-1]]
f.close()
intnl = [d[2] for d in data]
coh = [(0.5 * (d[5] - d[7]) + 0.5 * (d[5] + d[7]) * sinphi) / cosphi for d in data]
return (intnl, coh)
plt.figure()
expect21 = expected(10.0, 20.0, 0.0, 5E-6)
m21 = moose("gold/small_deform_hard21.csv")
plt.plot(expect21[0], expect21[1], 'k-', linewidth = 3.0, label = 'expected')
plt.plot(m21[0], m21[1], 'k^', label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("internal parameter")
plt.ylabel("Cohesion")
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.title("Cohesion hardening")
plt.savefig("figures/small_deform_hard_21.eps")
sys.exit(0)
| lgpl-2.1 |
wkfwkf/statsmodels | examples/run_all.py | 34 | 1740 | """run all examples to make sure we don't get an exception
Note:
If an example contaings plt.show(), then all plot windows have to be closed
manually, at least in my setup.
uncomment plt.show() to show all plot windows
"""
from __future__ import print_function
from statsmodels.compat import input
stop_on_error = True
filelist = ['example_glsar.py', 'example_wls.py', 'example_gls.py',
'example_glm.py', 'example_ols_tftest.py', # 'example_rpy.py',
'example_ols.py', 'example_rlm.py',
'example_discrete.py', 'example_predict.py',
'example_ols_table.py',
# time series
'tsa/ex_arma2.py', 'tsa/ex_dates.py']
if __name__ == '__main__':
#temporarily disable show
import matplotlib.pyplot as plt
plt_show = plt.show
def noop(*args):
pass
plt.show = noop
msg = """Are you sure you want to run all of the examples?
This is done mainly to check that they are up to date.
(y/n) >>> """
cont = input(msg)
if 'y' in cont.lower():
for run_all_f in filelist:
try:
print('\n\nExecuting example file', run_all_f)
print('-----------------------' + '-' * len(run_all_f))
exec(open(run_all_f).read())
except:
# f might be overwritten in the executed file
print('**********************' + '*' * len(run_all_f))
print('ERROR in example file', run_all_f)
print('**********************' + '*' * len(run_all_f))
if stop_on_error:
raise
# reenable show after closing windows
plt.close('all')
plt.show = plt_show
plt.show()
| bsd-3-clause |
massmutual/scikit-learn | sklearn/utils/estimator_checks.py | 1 | 54609 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM, BaseSVC
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import NMF, ProjectedGradientNMF
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils import ConvergenceWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
# Estimators with deprecated transform methods. Should be removed in 0.19 when
# _LearntSelectorMixin is removed.
DEPRECATED_TRANSFORM = [
"RandomForestClassifier", "RandomForestRegressor", "ExtraTreesClassifier",
"ExtraTreesRegressor", "RandomTreesEmbedding", "DecisionTreeClassifier",
"DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor",
"LinearSVC", "SGDClassifier", "SGDRegressor", "Perceptron",
"LogisticRegression", "LogisticRegressionCV",
"GradientBoostingClassifier", "GradientBoostingRegressor"]
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
if name not in DEPRECATED_TRANSFORM:
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
if isinstance(estimator, NMF):
if not isinstance(estimator, ProjectedGradientNMF):
estimator.set_params(solver='cd')
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
if name in DEPRECATED_TRANSFORM:
funcs = ["score"]
else:
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
if name in DEPRECATED_TRANSFORM:
funcs = ["fit", "score", "partial_fit", "fit_predict"]
else:
funcs = [
"fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
if name in DEPRECATED_TRANSFORM:
methods = ["predict", "decision_function", "predict_proba"]
else:
methods = [
"predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
if name in DEPRECATED_TRANSFORM:
check_methods = ["predict", "decision_function", "predict_proba"]
else:
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_testing_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self'
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
neuroidss/nupic.research | projects/sequence_classification/run_encoder_with_union.py | 9 | 8995 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Run sequence classification experiment with
Input -> RDSE encoder -> Union model
Search for the optimal union window
One needs to run the script "run_encoder_only.py" first to get the
optimal encoder resolution
"""
import pickle
import time
import matplotlib.pyplot as plt
import multiprocessing
from util_functions import *
from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder
plt.ion()
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams.update({'figure.autolayout': True})
def unionForOneSequence(activeColumns, unionLength=1):
activeColumnTrace = []
unionStepInBatch = 0
unionBatchIdx = 0
unionCols = set()
for t in range(len(activeColumns)):
unionCols = unionCols.union(activeColumns[t])
unionStepInBatch += 1
if unionStepInBatch == unionLength:
activeColumnTrace.append(unionCols)
unionStepInBatch = 0
unionBatchIdx += 1
unionCols = set()
if unionStepInBatch > 0:
activeColumnTrace.append(unionCols)
return activeColumnTrace
def runUnionStep(activeColumns, unionLength=1):
"""
Run encoder -> tm network over dataset, save activeColumn and activeCells
traces
:param tm:
:param encoder:
:param dataset:
:return:
"""
numSequence = len(activeColumns)
activeColumnUnionTrace = []
for i in range(numSequence):
activeColumnTrace = unionForOneSequence(activeColumns[i], unionLength)
activeColumnUnionTrace.append(activeColumnTrace)
# print "{} out of {} done ".format(i, numSequence)
return activeColumnUnionTrace
def runEncoderOverDataset(encoder, dataset):
activeColumnsData = []
for i in range(dataset.shape[0]):
activeColumnsTrace = []
for element in dataset[i, :]:
encoderOutput = encoder.encode(element)
activeColumns = set(np.where(encoderOutput > 0)[0])
activeColumnsTrace.append(activeColumns)
activeColumnsData.append(activeColumnsTrace)
return activeColumnsData
def calcualteEncoderModelWorker(taskQueue, resultQueue, *args):
while True:
nextTask = taskQueue.get()
print "Next task is : ", nextTask
if nextTask is None:
break
nBuckets = nextTask["nBuckets"]
accuracyColumnOnly = calculateEncoderModelAccuracy(nBuckets, *args)
resultQueue.put({nBuckets: accuracyColumnOnly})
print "Column Only model, Resolution: {} Accuracy: {}".format(
nBuckets, accuracyColumnOnly)
return
def calculateEncoderModelAccuracy(nBuckets, numCols, w, trainData, trainLabel):
maxValue = np.max(trainData)
minValue = np.min(trainData)
resolution = (maxValue - minValue) / nBuckets
encoder = RandomDistributedScalarEncoder(resolution, w=w, n=numCols)
activeColumnsTrain = runEncoderOverDataset(encoder, trainData)
distMatColumnTrain = calculateDistanceMatTrain(activeColumnsTrain)
meanAccuracy, outcomeColumn = calculateAccuracy(distMatColumnTrain,
trainLabel, trainLabel)
accuracyColumnOnly = np.mean(outcomeColumn)
return accuracyColumnOnly
def runDataSet(dataName, datasetName):
if not os.path.exists('results'):
os.makedirs('results')
trainData, trainLabel, testData, testLabel = loadDataset(dataName,
datasetName)
numTest = len(testLabel)
numTrain = len(trainLabel)
sequenceLength = len(trainData[0])
classList = np.unique(trainLabel).tolist()
numClass = len(classList)
print "Processing {}".format(dataName)
print "Train Sample # {}, Test Sample # {}".format(numTrain, numTest)
print "Sequence Length {} Class # {}".format(sequenceLength, len(classList))
if (max(numTrain, numTest) * sequenceLength < 600 * 600):
print "skip this small dataset for now"
return
try:
unionLengthList = [1, 5, 10, 15, 20]
for unionLength in unionLengthList:
expResultTM = pickle.load(
open('results/modelPerformance/{}_columnOnly_union_{}'.format(
dataName, unionLength), 'r'))
return
except:
print "run data set: ", dataName
EuclideanDistanceMat = calculateEuclideanDistanceMat(testData, trainData)
outcomeEuclidean = calculateEuclideanModelAccuracy(trainData, trainLabel,
testData, testLabel)
accuracyEuclideanDist = np.mean(outcomeEuclidean)
print
print "Euclidean model accuracy: {}".format(accuracyEuclideanDist)
print
# # Use SDR overlap instead of Euclidean distance
print "Running Encoder model"
maxValue = np.max(trainData)
minValue = np.min(trainData)
numCols = 2048
w = 41
try:
searchResolution = pickle.load(
open('results/optimalEncoderResolution/{}'.format(dataName), 'r'))
nBucketList = searchResolution['nBucketList']
accuracyVsResolution = searchResolution['accuracyVsResolution']
optNumBucket = nBucketList[smoothArgMax(np.array(accuracyVsResolution))]
optimalResolution = (maxValue - minValue) / optNumBucket
except:
return
print "optimal bucket # {}".format((maxValue - minValue) / optimalResolution)
encoder = RandomDistributedScalarEncoder(optimalResolution, w=w, n=numCols)
print "encoding train data ..."
activeColumnsTrain = runEncoderOverDataset(encoder, trainData)
print "encoding test data ..."
activeColumnsTest = runEncoderOverDataset(encoder, testData)
print "calculate column distance matrix ..."
# run encoder -> union model, search for the optimal union window
unionLengthList = [1, 5, 10, 15, 20]
for unionLength in unionLengthList:
activeColumnUnionTrain = runUnionStep(activeColumnsTrain, unionLength)
activeColumnUnionTest = runUnionStep(activeColumnsTest, unionLength)
distMatColumnTrain = calculateDistanceMatTrain(activeColumnUnionTrain)
distMatColumnTest = calculateDistanceMat(activeColumnUnionTest,
activeColumnUnionTrain)
trainAccuracyColumnOnly, outcomeColumn = calculateAccuracy(distMatColumnTest,
trainLabel,
testLabel)
testAccuracyColumnOnly, outcomeColumn = calculateAccuracy(distMatColumnTest,
trainLabel,
testLabel)
expResults = {'distMatColumnTrain': distMatColumnTrain,
'distMatColumnTest': distMatColumnTest,
'trainAccuracyColumnOnly': trainAccuracyColumnOnly,
'testAccuracyColumnOnly': testAccuracyColumnOnly}
if not os.path.exists('results/distanceMat'):
os.makedirs('results/distanceMat')
outputFile = open('results/distanceMat/{}_columnOnly_union_{}'.format(
dataName, unionLength), 'w')
pickle.dump(expResults, outputFile)
outputFile.close()
print '--> wrote results to "results/distanceMat"'
def runDataSetWorker(taskQueue, datasetName):
while True:
nextTask = taskQueue.get()
print "Next task is : ", nextTask
if nextTask is None:
break
dataName = nextTask["dataName"]
runDataSet(dataName, datasetName)
return
if __name__ == "__main__":
datasetName = "SyntheticData"
dataSetList = listDataSets(datasetName)
datasetName = 'UCR_TS_Archive_2015'
dataSetList = listDataSets(datasetName)
# dataSetList = ["synthetic_control"]
numCPU = multiprocessing.cpu_count()
numWorker = 2
# Establish communication queues
taskQueue = multiprocessing.JoinableQueue()
for dataName in dataSetList:
taskQueue.put({"dataName": dataName,
"datasetName": datasetName})
for _ in range(numWorker):
taskQueue.put(None)
jobs = []
for i in range(numWorker):
print "Start process ", i
p = multiprocessing.Process(target=runDataSetWorker,
args=(taskQueue, datasetName))
jobs.append(p)
p.daemon = True
p.start()
while not taskQueue.empty():
time.sleep(5)
| agpl-3.0 |
hrjn/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
GuLinux/PySpectrum | import_image.py | 1 | 5892 | from pyui.import_image import Ui_ImportImage
from PyQt5.QtWidgets import QWidget, QToolBar, QDialog, QDialogButtonBox, QProgressDialog, QMessageBox
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt, QCoreApplication
from qmathplotwidget import QMathPlotWidget, QImPlotWidget
import matplotlib.pyplot as plt
from qtcommons import QtCommons
from pyspectrum_commons import *
import os
import numpy as np
from astropy.io import fits
from object_properties_dialog import ObjectPropertiesDialog
from object_properties import ObjectProperties
from rotate_image_dialog import RotateImageDialog
from project import Project
class ImportImage(QWidget):
def icon():
return QIcon(':/image_20')
ACTION_TEXT = 'Import Image'
def pick(on_ok, settings):
open_file_sticky('Open FITS Image',FITS_IMG_EXTS, on_ok, settings, IMPORT_IMG )
def __init__(self, fits_file, settings, project = None):
super(ImportImage, self).__init__()
self.settings = settings
self.fits_file = fits_file
self.project = project
try:
image_hdu_index = fits_file.index_of('IMAGE')
except KeyError:
image_hdu_index = 0
original_image = fits.ImageHDU(data=fits_file[image_hdu_index].data, header=fits_file[image_hdu_index].header, name='IMAGE')
for hdu in [h for h in self.fits_file if h.name == 'IMAGE']: self.fits_file.remove(hdu)
self.fits_file.append(original_image)
self.ui = Ui_ImportImage()
self.ui.setupUi(self)
self.rotate_dialog = RotateImageDialog(self.fits_file, image_hdu_index, project=project)
self.rotate_dialog.rotated.connect(self.rotated)
self.image_plot = QtCommons.nestWidget(self.ui.image_widget, QImPlotWidget(self.rotate_dialog.data_rotated, cmap='gray'))
self.spatial_plot = QtCommons.nestWidget(self.ui.spatial_plot_widget, QMathPlotWidget())
self.spectrum_plot = QtCommons.nestWidget(self.ui.spectrum_plot_widget, QMathPlotWidget())
self.image_view = self.image_plot.axes_image
self.toolbar = QToolBar('Image Toolbar')
self.toolbar.addAction(QIcon(':/rotate_20'), "Rotate", lambda: self.rotate_dialog.show())
self.toolbar.addAction(QIcon(':/save_20'), "Save", self.save_profile)
self.toolbar.addAction(QIcon(':/select_all_20'), "Select spectrum data", lambda: self.spatial_plot.add_span_selector('select_spectrum', self.spectrum_span_selected,direction='horizontal'))
self.toolbar.addAction(QIcon.fromTheme('edit-select-invert'), "Select background data", lambda: self.spatial_plot.add_span_selector('select_background', self.background_span_selected,direction='horizontal', rectprops = dict(facecolor='blue', alpha=0.5))).setEnabled(False)
#self.toolbar.addAction('Stack', self.show_stack_images_dialog)
self.toolbar.addSeparator()
self.object_properties = ObjectProperties(self.fits_file, project=project)
self.object_properties_dialog = ObjectPropertiesDialog(settings, self.object_properties)
self.toolbar.addAction("Object properties", self.object_properties_dialog.show)
self.rotated()
def rotated(self):
self.image_view.set_data(self.rotate_dialog.data_rotated)
self.image_view.axes.relim()
self.image_view.axes.autoscale_view()
self.image_view.set_extent([self.rotate_dialog.data_rotated.shape[1],0, self.rotate_dialog.data_rotated.shape[0],0])
self.image_view.figure.canvas.draw()
self.draw_plot(self.spectrum_plot.axes, self.spectrum_profile())
self.draw_plot(self.spatial_plot.axes, self.spatial_profile())
def background_span_selected(self, min, max):
self.background_span_selection = (min, max)
self.spatial_plot.add_span('background_window', min, max, 'v', facecolor='gray', alpha=0.5)
self.image_plot.add_span('background_window', min, max, 'h', facecolor='red', alpha=0.5, clip_on=True)
self.draw_plot(self.spectrum_plot.axes, self.spectrum_profile())
def spectrum_span_selected(self, min, max):
self.spectrum_span_selection = (min, max)
self.spatial_plot.add_span('spectrum_window', min, max, 'v', facecolor='g', alpha=0.5)
self.image_plot.add_span('spectrum_window', min, max, 'h', facecolor='y', alpha=0.25, clip_on=True)
self.draw_plot(self.spectrum_plot.axes, self.spectrum_profile())
def draw_plot(self, axes, data):
axes.clear()
axes.plot(data)
axes.figure.tight_layout()
axes.figure.canvas.draw()
def spatial_profile(self):
return self.rotate_dialog.data_rotated.sum(1)
def spectrum_profile(self):
return self.rotate_dialog.data_rotated[self.spectrum_span_selection[0]:self.spectrum_span_selection[1]+1,:].sum(0) if hasattr(self, 'spectrum_span_selection') else self.rotate_dialog.data_rotated.sum(0)
def save(self, save_file):
data = self.spectrum_profile()
data -= np.amin(data)
data /= np.amax(data)
hdu = self.fits_file[0]
hdu.data = data
hdu.header['ORIGIN'] = 'PySpectrum'
self.fits_file.writeto(save_file, clobber=True)
def save_profile(self):
if not self.project:
save_file_sticky('Save plot...', 'FITS file (.fit)', lambda f: self.save(f[0]), self.settings, RAW_PROFILE )
return
if not self.object_properties.name:
QMessageBox.information(self, 'Save FITS', 'Please set file information (name, date, etc) using the Object Properties button before saving')
return
file_path = self.project.add_file(Project.RAW_PROFILE, object_properties = self.object_properties, on_added=self.save)
#self.save(file_path)
| gpl-3.0 |
INM-6/python-neo | neo/io/neuralynxio_v1.py | 2 | 105289 | """
Class for reading data from Neuralynx files.
This IO supports NCS, NEV and NSE file formats.
This module is an older implementation with old neo.io API.
A new class NeuralynxIO compunded by NeuralynxRawIO and BaseFromIO
superseed this one.
Depends on: numpy
Supported: Read
Author: Julia Sprenger, Carlos Canova
Adapted from the exampleIO of python-neo
"""
import sys
import os
import warnings
import codecs
import copy
import re
import datetime
import pkg_resources
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
import neo.io.neuralynxio
from neo.core import (Block, Segment, ChannelIndex, AnalogSignal, SpikeTrain,
Event, Unit)
from os import listdir, sep
from os.path import isfile, getsize
import hashlib
import pickle
if hasattr(pkg_resources, 'pkg_resources'):
parse_version = pkg_resources.pkg_resources.parse_version
else:
parse_version = pkg_resources.parse_version
class NeuralynxIO(BaseIO):
"""
Class for reading Neuralynx files.
It enables reading:
- :class:'Block'
- :class:'Segment'
- :class:'AnalogSignal'
- :class:'SpikeTrain'
Usage:
from neo import io
import quantities as pq
import matplotlib.pyplot as plt
session_folder = '../Data/2014-07-24_10-31-02'
NIO = io.NeuralynxIO(session_folder,print_diagnostic = True)
block = NIO.read_block(t_starts = 0.1*pq.s, t_stops = 0.2*pq.s,
events=True)
seg = block.segments[0]
analogsignal = seg.analogsignals[0]
plt.plot(analogsignal.times.rescale(pq.ms), analogsignal.magnitude)
plt.show()
"""
is_readable = True # This class can only read data
is_writable = False # write is not supported
# This class is able to directly or indirectly handle the following objects
# You can notice that this greatly simplifies the full Neo object hierarchy
supported_objects = [Segment, AnalogSignal, SpikeTrain, Event]
# This class can return either a Block or a Segment
# The first one is the default ( self.read )
# These lists should go from highest object to lowest object because
# common_io_test assumes it.
readable_objects = [Segment, AnalogSignal, SpikeTrain]
# This class is not able to write objects
writeable_objects = []
has_header = False
is_streameable = False
# This is for GUI stuff : a definition for parameters when reading.
# This dict should be keyed by object (`Block`). Each entry is a list
# of tuple. The first entry in each tuple is the parameter name. The
# second entry is a dict with keys 'value' (for default value),
# and 'label' (for a descriptive name).
# Note that if the highest-level object requires parameters,
# common_io_test will be skipped.
read_params = {
Segment: [('waveforms', {'value': True})],
Block: [('waveforms', {'value': False})]
}
# do not supported write so no GUI stuff
write_params = None
name = 'Neuralynx'
description = 'This IO reads .nse/.ncs/.nev files of the Neuralynx (' \
'Cheetah) recordings system (tetrodes).'
extensions = ['nse', 'ncs', 'nev', 'ntt']
# mode can be 'file' or 'dir' or 'fake' or 'database'
# the main case is 'file' but some reader are base on a directory or
# a database this info is for GUI stuff also
mode = 'dir'
# hardcoded parameters from manual, which are not present in Neuralynx
# data files
# unit of timestamps in different files
nev_time_unit = pq.microsecond
ncs_time_unit = pq.microsecond
nse_time_unit = pq.microsecond
ntt_time_unit = pq.microsecond
# unit of sampling rate in different files
ncs_sr_unit = pq.Hz
nse_sr_unit = pq.Hz
ntt_sr_unit = pq.Hz
def __init__(self, sessiondir=None, cachedir=None, use_cache='hash',
print_diagnostic=False, filename=None):
"""
Arguments:
sessiondir: the directory the files of the recording session are
collected. Default 'None'.
print_diagnostic: indicates, whether information about the
loading of
data is printed in terminal or not. Default 'False'.
cachedir: the directory where metadata about the recording
session is
read from and written to.
use_cache: method used for cache identification. Possible values:
'hash'/
'always'/'datesize'/'never'. Default 'hash'
filename: this argument is handles the same as sessiondir and is
only
added for external IO interfaces. The value of
sessiondir
has priority over filename.
"""
warnings.warn('{} is deprecated and will be removed in neo version 0.10. Use {} instead.'
''.format(self.__class__, neo.io.neuralynxio.NeuralynxIO), FutureWarning)
BaseIO.__init__(self)
# possiblity to provide filename instead of sessiondir for IO
# compatibility
if filename is not None and sessiondir is None:
sessiondir = filename
if sessiondir is None:
raise ValueError('Must provide a directory containing data files of'
' of one recording session.')
# remove filename if specific file was passed
if any([sessiondir.endswith('.%s' % ext) for ext in self.extensions]):
sessiondir = sessiondir[:sessiondir.rfind(sep)]
# remove / for consistent directory handling
if sessiondir.endswith(sep):
sessiondir = sessiondir.rstrip(sep)
# set general parameters of this IO
self.sessiondir = sessiondir
self.filename = sessiondir.split(sep)[-1]
self._print_diagnostic = print_diagnostic
self.associated = False
self._associate(cachedir=cachedir, usecache=use_cache)
self._diagnostic_print(
'Initialized IO for session %s' % self.sessiondir)
def read_block(self, lazy=False, cascade=True, t_starts=None,
t_stops=None,
electrode_list=None, unit_list=None, analogsignals=True,
events=False,
waveforms=False):
"""
Reads data in a requested time window and returns block with as many
segments
es necessary containing these data.
Arguments:
lazy : Postpone actual reading of the data files. Default 'False'.
cascade : Do not postpone reading subsequent neo types (segments).
Default 'True'.
t_starts : list of quantities or quantity describing the start of
the requested time window to load. If None or [None]
the complete session is loaded. Default 'None'.
t_stops : list of quantities or quantity describing the end of the
requested time window to load. Has to contain the
same number of values as t_starts. If None or [None]
the complete session is loaded. Default 'None'.
electrode_list : list of integers containing the IDs of the
requested to load. If [] or None all available
channels will be loaded.
Default: None.
unit_list : list of integers containing the IDs of the requested
units to load. If [] or None all available units
will be loaded.
Default: None.
analogsignals : boolean, indication whether analogsignals should be
read. Default: True.
events : Loading events. If True all available events in the given
time window will be read. Default: False.
waveforms : Load waveform for spikes in the requested time
window. Default: False.
Returns: Block object containing the requested data in neo structures.
Usage:
from neo import io
import quantities as pq
import matplotlib.pyplot as plt
session_folder = '../Data/2014-07-24_10-31-02'
NIO = io.NeuralynxIO(session_folder,print_diagnostic = True)
block = NIO.read_block(lazy = False, cascade = True,
t_starts = 0.1*pq.s, t_stops = 0.2*pq.s,
electrode_list = [1,5,10],
unit_list = [1,2,3],
events = True, waveforms = True)
plt.plot(block.segments[0].analogsignals[0])
plt.show()
"""
# Create block
bl = Block(file_origin=self.sessiondir)
bl.name = self.filename
if not cascade:
return bl
# Checking input of t_start and t_stop
# For lazy users that specify x,x instead of [x],[x] for t_starts,
# t_stops
if t_starts is None:
t_starts = [None]
elif type(t_starts) == pq.Quantity:
t_starts = [t_starts]
elif type(t_starts) != list or any(
[(type(i) != pq.Quantity and i is not None) for i in t_starts]):
raise ValueError('Invalid specification of t_starts.')
if t_stops is None:
t_stops = [None]
elif type(t_stops) == pq.Quantity:
t_stops = [t_stops]
elif type(t_stops) != list or any(
[(type(i) != pq.Quantity and i is not None) for i in t_stops]):
raise ValueError('Invalid specification of t_stops.')
# adapting t_starts and t_stops to known gap times (extracted in
# association process / initialization)
for gap in self.parameters_global['gaps']:
# gap=gap_list[0]
for e in range(len(t_starts)):
t1, t2 = t_starts[e], t_stops[e]
gap_start = gap[1] * self.ncs_time_unit - \
self.parameters_global['t_start']
gap_stop = gap[2] * self.ncs_time_unit - self.parameters_global[
't_start']
if ((t1 is None and t2 is None)
or (t1 is None and t2 is not None and t2.rescale(
self.ncs_time_unit) > gap_stop)
or (t2 is None and t1 is not None and t1.rescale(
self.ncs_time_unit) < gap_stop)
or (t1 is not None and t2 is not None and t1.rescale(
self.ncs_time_unit) < gap_start
and t2.rescale(self.ncs_time_unit) > gap_stop)):
# adapting first time segment
t_stops[e] = gap_start
# inserting second time segment
t_starts.insert(e + 1, gap_stop)
t_stops.insert(e + 1, t2)
warnings.warn(
'Substituted t_starts and t_stops in order to skip '
'gap in recording session.')
# loading all channels if empty electrode_list
if electrode_list == [] or electrode_list is None:
electrode_list = self.parameters_ncs.keys()
# adding a segment for each t_start, t_stop pair
for t_start, t_stop in zip(t_starts, t_stops):
seg = self.read_segment(lazy=lazy, cascade=cascade,
t_start=t_start, t_stop=t_stop,
electrode_list=electrode_list,
unit_list=unit_list,
analogsignals=analogsignals, events=events,
waveforms=waveforms)
bl.segments.append(seg)
# generate units
units = []
channel_unit_collection = {}
for st in [s for seg in bl.segments for s in seg.spiketrains]:
# collecting spiketrains of same channel and unit id to generate
# common unit
chuid = (st.annotations['channel_index'], st.annotations['unit_id'])
if chuid in channel_unit_collection:
channel_unit_collection[chuid].append(st)
else:
channel_unit_collection[chuid] = [st]
for chuid in channel_unit_collection:
sts = channel_unit_collection[chuid]
unit = Unit(name='Channel %i, Unit %i' % chuid)
unit.spiketrains.extend(sts)
units.append(unit)
# generate one channel indexes for each analogsignal
for anasig in [a for seg in bl.segments for a in seg.analogsignals]:
channelids = anasig.annotations['channel_index']
channel_names = ['channel %i' % i for i in channelids]
channelidx = ChannelIndex(index=range(len(channelids)),
channel_names=channel_names,
name='channel ids for all analogsignal '
'"%s"' % anasig.name,
channel_ids=channelids)
channelidx.analogsignals.append(anasig)
bl.channel_indexes.append(channelidx)
# generate channel indexes for units
channelids = [unit.spiketrains[0].annotations['channel_index']
for unit in units]
channel_names = ['channel %i' % i for i in channelids]
channelidx = ChannelIndex(index=range(len(channelids)),
channel_names=channel_names,
name='channel ids for all spiketrains',
channel_ids=channelids)
channelidx.units.extend(units)
bl.channel_indexes.append(channelidx)
bl.create_many_to_one_relationship()
# Adding global parameters to block annotation
bl.annotations.update(self.parameters_global)
return bl
def read_segment(self, lazy=False, cascade=True, t_start=None, t_stop=None,
electrode_list=None, unit_list=None, analogsignals=True,
events=False, waveforms=False):
"""Reads one Segment.
The Segment will contain one AnalogSignal for each channel
and will go from t_start to t_stop.
Arguments:
lazy : Postpone actual reading of the data files. Default 'False'.
cascade : Do not postpone reading subsequent neo types (SpikeTrains,
AnalogSignals, Events).
Default 'True'.
t_start : time (quantity) that the Segment begins. Default None.
t_stop : time (quantity) that the Segment ends. Default None.
electrode_list : list of integers containing the IDs of the
requested to load. If [] or None all available
channels will be loaded.
Default: None.
unit_list : list of integers containing the IDs of the requested
units to load. If [] or None all available units
will be loaded. If False, no unit will be loaded.
Default: None.
analogsignals : boolean, indication whether analogsignals should be
read. Default: True.
events : Loading events. If True all available events in the given
time window will be read. Default: False.
waveforms : Load waveform for spikes in the requested time
window. Default: False.
Returns:
Segment object containing neo objects, which contain the data.
"""
# input check
# loading all channels if empty electrode_list
if electrode_list == [] or electrode_list is None:
electrode_list = self.parameters_ncs.keys()
elif electrode_list is None:
raise ValueError('Electrode_list can not be None.')
elif [v for v in electrode_list if
v in self.parameters_ncs.keys()] == []:
# warn if non of the requested channels are present in this session
warnings.warn('Requested channels %s are not present in session '
'(contains only %s)' % (
electrode_list, self.parameters_ncs.keys()))
electrode_list = []
seg = Segment(file_origin=self.filename)
if not cascade:
return seg
# generate empty segment for analogsignal collection
empty_seg = Segment(file_origin=self.filename)
# Reading NCS Files #
# selecting ncs files to load based on electrode_list requested
if analogsignals:
for chid in electrode_list:
if chid in self.parameters_ncs:
file_ncs = self.parameters_ncs[chid]['filename']
self.read_ncs(file_ncs, empty_seg, lazy, cascade,
t_start=t_start, t_stop=t_stop)
else:
self._diagnostic_print('Can not load ncs of channel %i. '
'No corresponding ncs file '
'present.' % (chid))
# supplementory merge function, should be replaced by neo utility
# function
def merge_analogsignals(anasig_list):
for aid, anasig in enumerate(anasig_list):
anasig.channel_index = None
if aid == 0:
full_analogsignal = anasig
else:
full_analogsignal = full_analogsignal.merge(anasig)
for key in anasig_list[0].annotations.keys():
listified_values = [a.annotations[key] for a in anasig_list]
full_analogsignal.annotations[key] = listified_values
return full_analogsignal
analogsignal = merge_analogsignals(empty_seg.analogsignals)
seg.analogsignals.append(analogsignal)
analogsignal.segment = seg
# Reading NEV Files (Events)#
# reading all files available
if events:
for filename_nev in self.nev_asso:
self.read_nev(filename_nev, seg, lazy, cascade, t_start=t_start,
t_stop=t_stop)
# Reading Spike Data only if requested
if unit_list is not False:
# Reading NSE Files (Spikes)#
# selecting nse files to load based on electrode_list requested
for chid in electrode_list:
if chid in self.parameters_nse:
filename_nse = self.parameters_nse[chid]['filename']
self.read_nse(filename_nse, seg, lazy, cascade,
t_start=t_start, t_stop=t_stop,
waveforms=waveforms)
else:
self._diagnostic_print('Can not load nse of channel %i. '
'No corresponding nse file '
'present.' % (chid))
# Reading ntt Files (Spikes)#
# selecting ntt files to load based on electrode_list requested
for chid in electrode_list:
if chid in self.parameters_ntt:
filename_ntt = self.parameters_ntt[chid]['filename']
self.read_ntt(filename_ntt, seg, lazy, cascade,
t_start=t_start, t_stop=t_stop,
waveforms=waveforms)
else:
self._diagnostic_print('Can not load ntt of channel %i. '
'No corresponding ntt file '
'present.' % (chid))
return seg
def read_ncs(self, filename_ncs, seg, lazy=False, cascade=True,
t_start=None, t_stop=None):
'''
Reading a single .ncs file from the associated Neuralynx recording
session.
In case of a recording gap between t_start and t_stop, data are only
loaded until gap start.
For loading data across recording gaps use read_block(...).
Arguments:
filename_ncs : Name of the .ncs file to be loaded.
seg : Neo Segment, to which the AnalogSignal containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
AnalogSignal. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time or sample (quantity or integer) that the
AnalogSignal begins.
Default None.
t_stop : time or sample (quantity or integer) that the
AnalogSignal ends.
Default None.
Returns:
None
'''
# checking format of filename and correcting if necessary
if filename_ncs[-4:] != '.ncs':
filename_ncs = filename_ncs + '.ncs'
if sep in filename_ncs:
filename_ncs = filename_ncs.split(sep)[-1]
# Extracting the channel id from prescan (association) of ncs files with
# this recording session
chid = self.get_channel_id_by_file_name(filename_ncs)
if chid is None:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_ncs))
if not cascade:
return
# read data
header_time_data = self.__mmap_ncs_packet_timestamps(filename_ncs)
data = self.__mmap_ncs_data(filename_ncs)
# ensure meaningful values for requested start and stop times
# in case time is provided in samples: transform to absolute time units
if isinstance(t_start, int):
t_start = t_start / self.parameters_ncs[chid]['sampling_rate']
if isinstance(t_stop, int):
t_stop = t_stop / self.parameters_ncs[chid]['sampling_rate']
# rescaling to global start time of recording (time of first sample
# in any file type)
if t_start is None or t_start < (
self.parameters_ncs[chid]['t_start']
- self.parameters_global[
't_start']):
t_start = (
self.parameters_ncs[chid]['t_start'] - self.parameters_global[
't_start'])
if t_start > (
self.parameters_ncs[chid]['t_stop']
- self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ncs[chid]['t_stop']
- self.parameters_global['t_start']),
filename_ncs))
if t_stop is None or t_stop > (
self.parameters_ncs[chid]['t_stop']
- self.parameters_global[
't_start']):
t_stop = (
self.parameters_ncs[chid]['t_stop'] - self.parameters_global[
't_start'])
if t_stop < (
self.parameters_ncs[chid]['t_start']
- self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are '
'recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ncs[chid]['t_start']
- self.parameters_global['t_start']),
filename_ncs))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) '
'for file %s.' % (t_start, t_stop, filename_ncs))
# Extracting data signal in requested time window
unit = pq.dimensionless # default value
if lazy:
sig = []
p_id_start = 0
else:
tstamps = header_time_data * self.ncs_time_unit - \
self.parameters_global['t_start']
# find data packet to start with signal construction
starts = np.where(tstamps <= t_start)[0]
if len(starts) == 0:
self._diagnostic_print(
'Requested AnalogSignal not present in this time '
'interval.')
return
else:
# first packet to be included into signal
p_id_start = starts[-1]
# find data packet where signal ends (due to gap or t_stop)
stops = np.where(tstamps >= t_stop)[0]
if len(stops) != 0:
first_stop = [stops[0]]
else:
first_stop = []
# last packet to be included in signal
p_id_stop = min(first_stop + [len(data)])
# search gaps in recording in time range to load
gap_packets = [gap_id[0] for gap_id in
self.parameters_ncs[chid]['gaps'] if
gap_id[0] > p_id_start]
if len(gap_packets) > 0 and min(gap_packets) < p_id_stop:
p_id_stop = min(gap_packets)
warnings.warn(
'Analogsignalarray was shortened due to gap in '
'recorded '
'data '
' of file %s at packet id %i' % (
filename_ncs, min(gap_packets)))
# search broken packets in time range to load
broken_packets = []
if 'broken_packet' in self.parameters_ncs[chid]:
broken_packets = [packet[0] for packet in
self.parameters_ncs[chid]['broken_packet']
if packet[0] > p_id_start]
if len(broken_packets) > 0 and min(broken_packets) < p_id_stop:
p_id_stop = min(broken_packets)
warnings.warn(
'Analogsignalarray was shortened due to broken data '
'packet in recorded data '
' of file %s at packet id %i' % (
filename_ncs, min(broken_packets)))
# construct signal in valid packet range
sig = np.array(data[p_id_start:p_id_stop + 1], dtype=float)
sig = sig.reshape(len(sig) * len(sig[0]))
# ADBitVolts is not guaranteed to be present in the header!
if 'ADBitVolts' in self.parameters_ncs[chid]:
sig *= self.parameters_ncs[chid]['ADBitVolts']
unit = pq.V
else:
warnings.warn(
'Could not transform data from file %s into physical '
'signal. '
'Missing "ADBitVolts" value in text header.')
# defining sampling rate for rescaling purposes
sampling_rate = self.parameters_ncs[chid]['sampling_unit'][0]
# creating neo AnalogSignal containing data
anasig = AnalogSignal(signal=pq.Quantity(sig, unit, copy=False),
sampling_rate=1 * sampling_rate,
# rescaling t_start to sampling time units
t_start=(header_time_data[p_id_start] * self.ncs_time_unit
- self.parameters_global['t_start']).rescale(
1 / sampling_rate),
name='channel_%i' % (chid),
channel_index=chid)
# removing protruding parts of first and last data packet
if anasig.t_start < t_start.rescale(anasig.t_start.units):
anasig = anasig.time_slice(t_start.rescale(anasig.t_start.units),
None)
if anasig.t_stop > t_stop.rescale(anasig.t_start.units):
anasig = anasig.time_slice(None,
t_stop.rescale(anasig.t_start.units))
annotations = copy.deepcopy(self.parameters_ncs[chid])
for pop_key in ['sampling_rate', 't_start']:
if pop_key in annotations:
annotations.pop(pop_key)
anasig.annotations.update(annotations)
anasig.annotations['electrode_id'] = chid
# this annotation is necesary for automatic genereation of
# recordingchannels
anasig.annotations['channel_index'] = chid
anasig.segment = seg # needed for merge function of analogsignals
seg.analogsignals.append(anasig)
def read_nev(self, filename_nev, seg, lazy=False, cascade=True,
t_start=None, t_stop=None):
'''
Reads associated nev file and attaches its content as eventarray to
provided neo segment. In constrast to read_ncs times can not be provided
in number of samples as a nev file has no inherent sampling rate.
Arguments:
filename_nev : Name of the .nev file to be loaded.
seg : Neo Segment, to which the Event containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
Event. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time (quantity) that the Events begin.
Default None.
t_stop : time (quantity) that the Event end.
Default None.
Returns:
None
'''
if filename_nev[-4:] != '.nev':
filename_nev += '.nev'
if sep in filename_nev:
filename_nev = filename_nev.split(sep)[-1]
if filename_nev not in self.nev_asso:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_nev))
# # ensure meaningful values for requested start and stop times
# # providing time is samples for nev file does not make sense as we
# don't know the underlying sampling rate
if isinstance(t_start, int):
raise ValueError(
'Requesting event information from nev file in samples '
'does '
'not make sense. '
'Requested t_start %s' % t_start)
if isinstance(t_stop, int):
raise ValueError(
'Requesting event information from nev file in samples '
'does '
'not make sense. '
'Requested t_stop %s' % t_stop)
# ensure meaningful values for requested start and stop times
if t_start is None or t_start < (
self.parameters_nev[filename_nev]['t_start']
- self.parameters_global['t_start']):
t_start = (self.parameters_nev[filename_nev]['t_start']
- self.parameters_global['t_start'])
if t_start > (self.parameters_nev[filename_nev]['t_stop']
- self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_nev[filename_nev]['t_stop']
- self.parameters_global['t_start']),
filename_nev))
if t_stop is None or t_stop > (
self.parameters_nev[filename_nev]['t_stop']
- self.parameters_global['t_start']):
t_stop = (self.parameters_nev[filename_nev]['t_stop']
- self.parameters_global['t_start'])
if t_stop < (self.parameters_nev[filename_nev]['t_start']
- self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are '
'recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(
self.parameters_nev[filename_nev][
't_start']
- self.parameters_global['t_start']),
filename_nev))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) '
'for file %s.' % (t_start, t_stop, filename_nev))
data = self.__mmap_nev_file(filename_nev)
# Extracting all events for one event type and put it into an event
# array
# TODO: Check if this is the correct way of event creation.
for event_type in self.parameters_nev[filename_nev]['event_types']:
# Extract all time stamps of digital markers and rescaling time
type_mask = [i for i in range(len(data)) if
(data[i][4] == event_type['event_id']
and data[i][5] == event_type['nttl']
and data[i][10].decode('latin-1') == event_type[
'name'])]
marker_times = [t[3] for t in
data[type_mask]] * self.nev_time_unit - \
self.parameters_global['t_start']
# only consider Events in the requested time window [t_start,
# t_stop]
time_mask = [i for i in range(len(marker_times)) if (
marker_times[i] >= t_start and marker_times[i] <= t_stop)]
marker_times = marker_times[time_mask]
# Do not create an eventarray if there are no events of this type
# in the requested time range
if len(marker_times) == 0:
continue
ev = Event(times=pq.Quantity(marker_times, units=self.nev_time_unit,
dtype="int"),
labels=event_type['name'],
name="Digital Marker " + str(event_type),
file_origin=filename_nev,
marker_id=event_type['event_id'],
digital_marker=True,
analog_marker=False,
nttl=event_type['nttl'])
seg.events.append(ev)
def read_nse(self, filename_nse, seg, lazy=False, cascade=True,
t_start=None, t_stop=None, unit_list=None,
waveforms=False):
'''
Reads nse file and attaches content as spike train to provided neo
segment. Times can be provided in samples (integer values). If the
nse file does not contain a sampling rate value, the ncs sampling
rate on the same electrode is used.
Arguments:
filename_nse : Name of the .nse file to be loaded.
seg : Neo Segment, to which the Spiketrain containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
SpikeTrain. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time or sample (quantity or integer) that the
SpikeTrain begins.
Default None.
t_stop : time or sample (quantity or integer) that the SpikeTrain
ends.
Default None.
unit_list : unit ids to be loaded. If [], all units are loaded.
Default None.
waveforms : Load the waveform (up to 32 data points) for each
spike time. Default: False
Returns:
None
'''
if filename_nse[-4:] != '.nse':
filename_nse += '.nse'
if sep in filename_nse:
filename_nse = filename_nse.split(sep)[-1]
# extracting channel id of requested file
channel_id = self.get_channel_id_by_file_name(filename_nse)
if channel_id is not None:
chid = channel_id
else:
# if nse file is empty it is not listed in self.parameters_nse, but
# in self.nse_avail
if filename_nse in self.nse_avail:
warnings.warn('NeuralynxIO is attempting to read an empty '
'(not associated) nse file (%s). '
'Not loading nse file.' % (filename_nse))
return
else:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_nse))
# ensure meaningful values for requested start and stop times
# in case time is provided in samples: transform to absolute time units
# ncs sampling rate is best guess if there is no explicit sampling
# rate given for nse values.
if 'sampling_rate' in self.parameters_nse[chid]:
sr = self.parameters_nse[chid]['sampling_rate']
elif chid in self.parameters_ncs and 'sampling_rate' in \
self.parameters_ncs[chid]:
sr = self.parameters_ncs[chid]['sampling_rate']
else:
raise ValueError(
'No sampling rate present for channel id %i in nse file '
'%s. '
'Could also not find the sampling rate of the respective '
'ncs '
'file.' % (
chid, filename_nse))
if isinstance(t_start, int):
t_start = t_start / sr
if isinstance(t_stop, int):
t_stop = t_stop / sr
# + rescaling global recording start (first sample in any file type)
# This is not optimal, as there is no way to know how long the
# recording lasted after last spike
if t_start is None or t_start < (
self.parameters_nse[chid]['t_first']
- self.parameters_global[
't_start']):
t_start = (
self.parameters_nse[chid]['t_first'] - self.parameters_global[
't_start'])
if t_start > (
self.parameters_nse[chid]['t_last']
- self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_nse[chid]['t_last']
- self.parameters_global['t_start']),
filename_nse))
if t_stop is None:
t_stop = (sys.maxsize) * self.nse_time_unit
if t_stop is None or t_stop > (
self.parameters_nse[chid]['t_last']
- self.parameters_global[
't_start']):
t_stop = (
self.parameters_nse[chid]['t_last'] - self.parameters_global[
't_start'])
if t_stop < (
self.parameters_nse[chid]['t_first']
- self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_nse[chid]['t_first']
- self.parameters_global['t_start']),
filename_nse))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) for file %s.' % (t_start, t_stop, filename_nse))
# reading data
[timestamps, channel_ids, cell_numbers, features,
data_points] = self.__mmap_nse_packets(filename_nse)
# load all units available if unit_list==[] or None
if unit_list == [] or unit_list is None:
unit_list = np.unique(cell_numbers)
elif not any([u in cell_numbers for u in unit_list]):
self._diagnostic_print(
'None of the requested unit ids (%s) present '
'in nse file %s (contains unit_list %s)' % (
unit_list, filename_nse, np.unique(cell_numbers)))
# extracting spikes unit-wise and generate spiketrains
for unit_i in unit_list:
if not lazy:
# Extract all time stamps of that neuron on that electrode
unit_mask = np.where(cell_numbers == unit_i)[0]
spike_times = timestamps[unit_mask] * self.nse_time_unit
spike_times = spike_times - self.parameters_global['t_start']
time_mask = np.where(np.logical_and(spike_times >= t_start,
spike_times < t_stop))
spike_times = spike_times[time_mask]
else:
spike_times = pq.Quantity([], units=self.nse_time_unit)
# Create SpikeTrain object
st = SpikeTrain(times=spike_times,
t_start=t_start,
t_stop=t_stop,
sampling_rate=self.parameters_ncs[chid][
'sampling_rate'],
name="Channel %i, Unit %i" % (chid, unit_i),
file_origin=filename_nse,
unit_id=unit_i,
channel_id=chid)
if waveforms and not lazy:
# Collect all waveforms of the specific unit
# For computational reasons: no units, no time axis
st.waveforms = data_points[unit_mask][time_mask]
# TODO: Add units to waveforms (pq.uV?) and add annotation
# left_sweep = x * pq.ms indicating when threshold crossing
# occurred in waveform
st.annotations.update(self.parameters_nse[chid])
st.annotations['electrode_id'] = chid
# This annotations is necessary for automatic generation of
# recordingchannels
st.annotations['channel_index'] = chid
seg.spiketrains.append(st)
def read_ntt(self, filename_ntt, seg, lazy=False, cascade=True,
t_start=None, t_stop=None, unit_list=None,
waveforms=False):
'''
Reads ntt file and attaches content as spike train to provided neo
segment.
Arguments:
filename_ntt : Name of the .ntt file to be loaded.
seg : Neo Segment, to which the Spiketrain containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
SpikeTrain. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time (quantity) that the SpikeTrain begins. Default None.
t_stop : time (quantity) that the SpikeTrain ends. Default None.
unit_list : unit ids to be loaded. If [] or None all units are
loaded.
Default None.
waveforms : Load the waveform (up to 32 data points) for each
spike time. Default: False
Returns:
None
'''
if filename_ntt[-4:] != '.ntt':
filename_ntt += '.ntt'
if sep in filename_ntt:
filename_ntt = filename_ntt.split(sep)[-1]
# extracting channel id of requested file
channel_id = self.get_channel_id_by_file_name(filename_ntt)
if channel_id is not None:
chid = channel_id
else:
# if ntt file is empty it is not listed in self.parameters_ntt, but
# in self.ntt_avail
if filename_ntt in self.ntt_avail:
warnings.warn('NeuralynxIO is attempting to read an empty '
'(not associated) ntt file (%s). '
'Not loading ntt file.' % (filename_ntt))
return
else:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_ntt))
# ensure meaningful values for requested start and stop times
# in case time is provided in samples: transform to absolute time units
# ncs sampling rate is best guess if there is no explicit sampling
# rate given for ntt values.
if 'sampling_rate' in self.parameters_ntt[chid]:
sr = self.parameters_ntt[chid]['sampling_rate']
elif chid in self.parameters_ncs and 'sampling_rate' in \
self.parameters_ncs[chid]:
sr = self.parameters_ncs[chid]['sampling_rate']
else:
raise ValueError(
'No sampling rate present for channel id %i in ntt file '
'%s. '
'Could also not find the sampling rate of the respective '
'ncs '
'file.' % (
chid, filename_ntt))
if isinstance(t_start, int):
t_start = t_start / sr
if isinstance(t_stop, int):
t_stop = t_stop / sr
# + rescaling to global recording start (first sample in any
# recording file)
if t_start is None or t_start < (
self.parameters_ntt[chid]['t_first']
- self.parameters_global[
't_start']):
t_start = (
self.parameters_ntt[chid]['t_first'] - self.parameters_global[
't_start'])
if t_start > (
self.parameters_ntt[chid]['t_last']
- self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ntt[chid]['t_last']
- self.parameters_global['t_start']),
filename_ntt))
if t_stop is None:
t_stop = (sys.maxsize) * self.ntt_time_unit
if t_stop is None or t_stop > (
self.parameters_ntt[chid]['t_last']
- self.parameters_global[
't_start']):
t_stop = (
self.parameters_ntt[chid]['t_last'] - self.parameters_global[
't_start'])
if t_stop < (
self.parameters_ntt[chid]['t_first']
- self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are '
'recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ntt[chid]['t_first']
- self.parameters_global['t_start']),
filename_ntt))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) '
'for file %s.' % (t_start, t_stop, filename_ntt))
# reading data
[timestamps, channel_ids, cell_numbers, features,
data_points] = self.__mmap_ntt_packets(filename_ntt)
# TODO: When ntt available: Implement 1 RecordingChannelGroup per
# Tetrode, such that each electrode gets its own recording channel
# load all units available if units==[]
if unit_list == [] or unit_list is None:
unit_list = np.unique(cell_numbers)
elif not any([u in cell_numbers for u in unit_list]):
self._diagnostic_print(
'None of the requested unit ids (%s) present '
'in ntt file %s (contains units %s)' % (
unit_list, filename_ntt, np.unique(cell_numbers)))
# loading data for each unit and generating spiketrain
for unit_i in unit_list:
if not lazy:
# Extract all time stamps of that neuron on that electrode
mask = np.where(cell_numbers == unit_i)[0]
spike_times = timestamps[mask] * self.ntt_time_unit
spike_times = spike_times - self.parameters_global['t_start']
spike_times = spike_times[np.where(
np.logical_and(spike_times >= t_start,
spike_times < t_stop))]
else:
spike_times = pq.Quantity([], units=self.ntt_time_unit)
# Create SpikeTrain object
st = SpikeTrain(times=spike_times,
t_start=t_start,
t_stop=t_stop,
sampling_rate=self.parameters_ncs[chid][
'sampling_rate'],
name="Channel %i, Unit %i" % (chid, unit_i),
file_origin=filename_ntt,
unit_id=unit_i,
channel_id=chid)
# Collect all waveforms of the specific unit
if waveforms and not lazy:
# For computational reasons: no units, no time axis
# transposing to adhere to neo guidline, which states that
# time should be in the first axis.
# This is stupid and not intuitive.
st.waveforms = np.array(
[data_points[t, :, :] for t in range(len(timestamps))
if cell_numbers[t] == unit_i]).transpose()
# TODO: Add units to waveforms (pq.uV?) and add annotation
# left_sweep = x * pq.ms indicating when threshold crossing
# occurred in waveform
st.annotations = self.parameters_ntt[chid]
st.annotations['electrode_id'] = chid
# This annotations is necessary for automatic generation of
# recordingchannels
st.annotations['channel_index'] = chid
seg.spiketrains.append(st)
# private routines
# #################################################
def _associate(self, cachedir=None, usecache='hash'):
"""
Associates the object with a specified Neuralynx session, i.e., a
combination of a .nse, .nev and .ncs files. The meta data is read
into the
object for future reference.
Arguments:
cachedir : Directory for loading and saving hashes of recording
sessions
and pickled meta information about files
extracted during
association process
use_cache: method used for cache identification. Possible values:
'hash'/
'always'/'datesize'/'never'. Default 'hash'
Returns:
-
"""
# If already associated, disassociate first
if self.associated:
raise OSError(
"Trying to associate an already associated NeuralynxIO "
"object.")
# Create parameter containers
# Dictionary that holds different parameters read from the .nev file
self.parameters_nse = {}
# List of parameter dictionaries for all potential file types
self.parameters_ncs = {}
self.parameters_nev = {}
self.parameters_ntt = {}
# combined global parameters
self.parameters_global = {}
# Scanning session directory for recorded files
self.sessionfiles = [f for f in listdir(self.sessiondir) if
isfile(os.path.join(self.sessiondir, f))]
# Listing available files
self.ncs_avail = []
self.nse_avail = []
self.nev_avail = []
self.ntt_avail = []
# Listing associated (=non corrupted, non empty files)
self.ncs_asso = []
self.nse_asso = []
self.nev_asso = []
self.ntt_asso = []
if usecache not in ['hash', 'always', 'datesize', 'never']:
raise ValueError(
"Argument value of usecache '%s' is not valid. Accepted "
"values are 'hash','always','datesize','never'" % usecache)
if cachedir is None and usecache != 'never':
raise ValueError('No cache directory provided.')
# check if there are any changes of the data files -> new data check run
check_files = True if usecache != 'always' else False # never
# checking files if usecache=='always'
if cachedir is not None and usecache != 'never':
self._diagnostic_print(
'Calculating %s of session files to check for cached '
'parameter files.' % usecache)
cachefile = cachedir + sep + self.sessiondir.split(sep)[
-1] + '/hashkeys'
if not os.path.exists(cachedir + sep + self.sessiondir.split(sep)[-1]):
os.makedirs(cachedir + sep + self.sessiondir.split(sep)[-1])
if usecache == 'hash':
hashes_calc = {}
# calculates hash of all available files
for f in self.sessionfiles:
file_hash = self.hashfile(open(self.sessiondir + sep + f,
'rb'), hashlib.sha256())
hashes_calc[f] = file_hash
elif usecache == 'datesize':
hashes_calc = {}
for f in self.sessionfiles:
hashes_calc[f] = self.datesizefile(
self.sessiondir + sep + f)
# load hashes saved for this session in an earlier loading run
if os.path.exists(cachefile):
hashes_read = pickle.load(open(cachefile, 'rb'))
else:
hashes_read = {}
# compare hashes to previously saved meta data und load meta data
# if no changes occured
if usecache == 'always' or all([f in hashes_calc
and f in hashes_read
and hashes_calc[f] == hashes_read[f]
for f in self.sessionfiles]):
check_files = False
self._diagnostic_print(
'Using cached metadata from earlier analysis run in '
'file '
'%s. Skipping file checks.' % cachefile)
# loading saved parameters
parameterfile = cachedir + sep + self.sessiondir.split(sep)[
-1] + '/parameters.cache'
if os.path.exists(parameterfile):
parameters_read = pickle.load(open(parameterfile, 'rb'))
else:
raise OSError('Inconsistent cache files.')
for IOdict, dictname in [(self.parameters_global, 'global'),
(self.parameters_ncs, 'ncs'),
(self.parameters_nse, 'nse'),
(self.parameters_nev, 'nev'),
(self.parameters_ntt, 'ntt')]:
IOdict.update(parameters_read[dictname])
self.nev_asso = self.parameters_nev.keys()
self.ncs_asso = [val['filename'] for val in
self.parameters_ncs.values()]
self.nse_asso = [val['filename'] for val in
self.parameters_nse.values()]
self.ntt_asso = [val['filename'] for val in
self.parameters_ntt.values()]
for filename in self.sessionfiles:
# Extracting only continuous signal files (.ncs)
if filename[-4:] == '.ncs':
self.ncs_avail.append(filename)
elif filename[-4:] == '.nse':
self.nse_avail.append(filename)
elif filename[-4:] == '.nev':
self.nev_avail.append(filename)
elif filename[-4:] == '.ntt':
self.ntt_avail.append(filename)
else:
self._diagnostic_print(
'Ignoring file of unknown data type %s' % filename)
if check_files:
self._diagnostic_print('Starting individual file checks.')
# =======================================================================
# # Scan NCS files
# =======================================================================
self._diagnostic_print(
'\nDetected %i .ncs file(s).' % (len(self.ncs_avail)))
for ncs_file in self.ncs_avail:
# Loading individual NCS file and extracting parameters
self._diagnostic_print("Scanning " + ncs_file + ".")
# Reading file packet headers
filehandle = self.__mmap_ncs_packet_headers(ncs_file)
if filehandle is None:
continue
try:
# Checking consistency of ncs file
self.__ncs_packet_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % ncs_file)
continue
# Reading data packet header information and store them in
# parameters_ncs
self.__read_ncs_data_headers(filehandle, ncs_file)
# Reading txt file header
channel_id = self.get_channel_id_by_file_name(ncs_file)
self.__read_text_header(ncs_file,
self.parameters_ncs[channel_id])
# Check for invalid starting times of data packets in ncs file
self.__ncs_invalid_first_sample_check(filehandle)
# Check ncs file for gaps
self.__ncs_gap_check(filehandle)
self.ncs_asso.append(ncs_file)
# =======================================================================
# # Scan NSE files
# =======================================================================
# Loading individual NSE file and extracting parameters
self._diagnostic_print(
'\nDetected %i .nse file(s).' % (len(self.nse_avail)))
for nse_file in self.nse_avail:
# Loading individual NSE file and extracting parameters
self._diagnostic_print('Scanning ' + nse_file + '.')
# Reading file
filehandle = self.__mmap_nse_packets(nse_file)
if filehandle is None:
continue
try:
# Checking consistency of nse file
self.__nse_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % nse_file)
continue
# Reading header information and store them in parameters_nse
self.__read_nse_data_header(filehandle, nse_file)
# Reading txt file header
channel_id = self.get_channel_id_by_file_name(nse_file)
self.__read_text_header(nse_file,
self.parameters_nse[channel_id])
# using sampling rate from txt header, as this is not saved
# in data packets
if 'SamplingFrequency' in self.parameters_nse[channel_id]:
self.parameters_nse[channel_id]['sampling_rate'] = \
(self.parameters_nse[channel_id][
'SamplingFrequency'] * self.nse_sr_unit)
self.nse_asso.append(nse_file)
# =======================================================================
# # Scan NEV files
# =======================================================================
self._diagnostic_print(
'\nDetected %i .nev file(s).' % (len(self.nev_avail)))
for nev_file in self.nev_avail:
# Loading individual NEV file and extracting parameters
self._diagnostic_print('Scanning ' + nev_file + '.')
# Reading file
filehandle = self.__mmap_nev_file(nev_file)
if filehandle is None:
continue
try:
# Checking consistency of nev file
self.__nev_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % nev_file)
continue
# Reading header information and store them in parameters_nev
self.__read_nev_data_header(filehandle, nev_file)
# Reading txt file header
self.__read_text_header(nev_file, self.parameters_nev[nev_file])
self.nev_asso.append(nev_file)
# =======================================================================
# # Scan NTT files
# =======================================================================
self._diagnostic_print(
'\nDetected %i .ntt file(s).' % (len(self.ntt_avail)))
for ntt_file in self.ntt_avail:
# Loading individual NTT file and extracting parameters
self._diagnostic_print('Scanning ' + ntt_file + '.')
# Reading file
filehandle = self.__mmap_ntt_file(ntt_file)
if filehandle is None:
continue
try:
# Checking consistency of nev file
self.__ntt_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % ntt_file)
continue
# Reading header information and store them in parameters_nev
self.__read_ntt_data_header(filehandle, ntt_file)
# Reading txt file header
self.__read_ntt_text_header(ntt_file)
# using sampling rate from txt header, as this is not saved
# in data packets
if 'SamplingFrequency' in self.parameters_ntt[channel_id]:
self.parameters_ntt[channel_id]['sampling_rate'] = \
(self.parameters_ntt[channel_id][
'SamplingFrequency'] * self.ntt_sr_unit)
self.ntt_asso.append(ntt_file)
# =======================================================================
# # Check consistency across files
# =======================================================================
# check RECORDING_OPENED / CLOSED times (from txt header) for
# different files
for parameter_collection in [self.parameters_ncs,
self.parameters_nse,
self.parameters_nev,
self.parameters_ntt]:
# check recoding_closed times for specific file types
if any(np.abs(np.diff([i['recording_opened'] for i in
parameter_collection.values()]))
> datetime.timedelta(seconds=1)):
raise ValueError(
'NCS files were opened for recording with a delay '
'greater than 0.1 second.')
# check recoding_closed times for specific file types
if any(np.diff([i['recording_closed'] for i in
parameter_collection.values()
if i['recording_closed'] is not None])
> datetime.timedelta(seconds=0.1)):
raise ValueError(
'NCS files were closed after recording with a '
'delay '
'greater than 0.1 second.')
# get maximal duration of any file in the recording
parameter_collection = list(self.parameters_ncs.values()) + \
list(self.parameters_nse.values()) + \
list(self.parameters_ntt.values()) + \
list(self.parameters_nev.values())
self.parameters_global['recording_opened'] = min(
[i['recording_opened'] for i in parameter_collection])
self.parameters_global['recording_closed'] = max(
[i['recording_closed'] for i in parameter_collection])
# Set up GLOBAL TIMING SCHEME
# #############################
for file_type, parameter_collection in [
('ncs', self.parameters_ncs), ('nse', self.parameters_nse),
('nev', self.parameters_nev), ('ntt', self.parameters_ntt)]:
# check starting times
name_t1, name_t2 = ['t_start', 't_stop'] if (
file_type != 'nse' and file_type != 'ntt') \
else ['t_first', 't_last']
# checking if files of same type start at same time point
if file_type != 'nse' and file_type != 'ntt' \
and len(np.unique(np.array(
[i[name_t1].magnitude for i in
parameter_collection.values()]))) > 1:
raise ValueError(
'%s files do not start at same time point.' %
file_type)
# saving t_start and t_stop for each file type available
if len([i[name_t1] for i in parameter_collection.values()]):
self.parameters_global['%s_t_start' % file_type] = min(
[i[name_t1]
for i in parameter_collection.values()])
self.parameters_global['%s_t_stop' % file_type] = min(
[i[name_t2]
for i in parameter_collection.values()])
# extracting minimial t_start and maximal t_stop value for this
# recording session
self.parameters_global['t_start'] = min(
[self.parameters_global['%s_t_start' % t]
for t in ['ncs', 'nev', 'nse', 'ntt']
if '%s_t_start' % t in self.parameters_global])
self.parameters_global['t_stop'] = max(
[self.parameters_global['%s_t_stop' % t]
for t in ['ncs', 'nev', 'nse', 'ntt']
if '%s_t_start' % t in self.parameters_global])
# checking gap consistency across ncs files
# check number of gaps detected
if len(np.unique([len(i['gaps']) for i in
self.parameters_ncs.values()])) != 1:
raise ValueError('NCS files contain different numbers of gaps!')
# check consistency of gaps across files and create global gap
# collection
self.parameters_global['gaps'] = []
for g in range(len(list(self.parameters_ncs.values())[0]['gaps'])):
integrated = False
gap_stats = np.unique(
[i['gaps'][g] for i in self.parameters_ncs.values()],
return_counts=True)
if len(gap_stats[0]) != 3 or len(np.unique(gap_stats[1])) != 1:
raise ValueError(
'Gap number %i is not consistent across NCS '
'files.' % (
g))
else:
# check if this is second part of already existing gap
for gg in range(len(self.parameters_global['gaps'])):
globalgap = self.parameters_global['gaps'][gg]
# check if stop time of first is start time of second
# -> continuous gap
if globalgap[2] == \
list(self.parameters_ncs.values())[0]['gaps'][
g][1]:
self.parameters_global['gaps'][gg] = \
self.parameters_global['gaps'][gg][:2] + (
list(self.parameters_ncs.values())[0][
'gaps'][g][
2],)
integrated = True
break
if not integrated:
# add as new gap if this is not a continuation of
# existing global gap
self.parameters_global['gaps'].append(
list(self.parameters_ncs.values())[0][
'gaps'][g])
# save results of association for future analysis together with hash
# values for change tracking
if cachedir is not None and usecache != 'never':
pickle.dump({'global': self.parameters_global,
'ncs': self.parameters_ncs,
'nev': self.parameters_nev,
'nse': self.parameters_nse,
'ntt': self.parameters_ntt},
open(cachedir + sep + self.sessiondir.split(sep)[
-1] + '/parameters.cache', 'wb'))
if usecache != 'always':
pickle.dump(hashes_calc, open(
cachedir + sep + self.sessiondir.split(sep)[
-1] + '/hashkeys', 'wb'))
self.associated = True
# private routines
# #########################################################�
# Memory Mapping Methods
def __mmap_nse_packets(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u2',
shape=((filesize - 16384) // 2 // 56, 56),
mode='r', offset=16384)
# reconstructing original data
# first 4 ints -> timestamp in microsec
timestamps = data[:, 0] \
+ data[:, 1] * 2 ** 16 \
+ data[:, 2] * 2 ** 32 \
+ data[:, 3] * 2 ** 48
channel_id = data[:, 4] + data[:, 5] * 2 ** 16
cell_number = data[:, 6] + data[:, 7] * 2 ** 16
features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in
range(8, 23, 2)]
features = np.array(features, dtype='i4')
data_points = data[:, 24:56].astype('i2')
del data
return timestamps, channel_id, cell_number, features, data_points
else:
return None
def __mmap_ncs_data(self, filename):
""" Memory map of the Neuralynx .ncs file optimized for data
extraction"""
if getsize(self.sessiondir + sep + filename) > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype=np.dtype(('i2', (522))), mode='r',
offset=16384)
# removing data packet headers and flattening data
return data[:, 10:]
else:
return None
def __mmap_ncs_packet_headers(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u4',
shape=((filesize - 16384) // 4 // 261, 261),
mode='r', offset=16384)
ts = data[:, 0:2]
multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
axis=0)
timestamps = np.sum(ts * multi, axis=1)
# timestamps = data[:,0] + (data[:,1] *2**32)
header_u4 = data[:, 2:5]
return timestamps, header_u4
else:
return None
def __mmap_ncs_packet_timestamps(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u4',
shape=(int((filesize - 16384) / 4 / 261), 261),
mode='r', offset=16384)
ts = data[:, 0:2]
multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
axis=0)
timestamps = np.sum(ts * multi, axis=1)
# timestamps = data[:,0] + data[:,1]*2**32
return timestamps
else:
return None
def __mmap_nev_file(self, filename):
""" Memory map the Neuralynx .nev file """
nev_dtype = np.dtype([
('reserved', '<i2'),
('system_id', '<i2'),
('data_size', '<i2'),
('timestamp', '<u8'),
('event_id', '<i2'),
('ttl_input', '<i2'),
('crc_check', '<i2'),
('dummy1', '<i2'),
('dummy2', '<i2'),
('extra', '<i4', (8,)),
('event_string', 'a128'),
])
if getsize(self.sessiondir + sep + filename) > 16384:
return np.memmap(self.sessiondir + sep + filename,
dtype=nev_dtype, mode='r', offset=16384)
else:
return None
def __mmap_ntt_file(self, filename):
""" Memory map the Neuralynx .nse file """
nse_dtype = np.dtype([
('timestamp', '<u8'),
('sc_number', '<u4'),
('cell_number', '<u4'),
('params', '<u4', (8,)),
('data', '<i2', (32, 4)),
])
if getsize(self.sessiondir + sep + filename) > 16384:
return np.memmap(self.sessiondir + sep + filename,
dtype=nse_dtype, mode='r', offset=16384)
else:
return None
def __mmap_ntt_packets(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u2',
shape=((filesize - 16384) / 2 / 152, 152),
mode='r', offset=16384)
# reconstructing original data
# first 4 ints -> timestamp in microsec
timestamps = data[:, 0] + data[:, 1] * 2 ** 16 + \
data[:, 2] * 2 ** 32 + data[:, 3] * 2 ** 48
channel_id = data[:, 4] + data[:, 5] * 2 ** 16
cell_number = data[:, 6] + data[:, 7] * 2 ** 16
features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in
range(8, 23, 2)]
features = np.array(features, dtype='i4')
data_points = data[:, 24:152].astype('i2').reshape((4, 32))
del data
return timestamps, channel_id, cell_number, features, data_points
else:
return None
# ___________________________ header extraction __________________________
def __read_text_header(self, filename, parameter_dict):
# Reading main file header (plain text, 16kB)
text_header = codecs.open(self.sessiondir + sep + filename, 'r',
'latin-1').read(16384)
parameter_dict['cheetah_version'] = \
self.__get_cheetah_version_from_txt_header(text_header, filename)
parameter_dict.update(self.__get_filename_and_times_from_txt_header(
text_header, parameter_dict['cheetah_version']))
# separating lines of header and ignoring last line (fill), check if
# Linux or Windows OS
if sep == '/':
text_header = text_header.split('\r\n')[:-1]
if sep == '\\':
text_header = text_header.split('\n')[:-1]
# minor parameters possibly saved in header (for any file type)
minor_keys = ['AcqEntName',
'FileType',
'FileVersion',
'RecordSize',
'HardwareSubSystemName',
'HardwareSubSystemType',
'SamplingFrequency',
'ADMaxValue',
'ADBitVolts',
'NumADChannels',
'ADChannel',
'InputRange',
'InputInverted',
'DSPLowCutFilterEnabled',
'DspLowCutFrequency',
'DspLowCutNumTaps',
'DspLowCutFilterType',
'DSPHighCutFilterEnabled',
'DspHighCutFrequency',
'DspHighCutNumTaps',
'DspHighCutFilterType',
'DspDelayCompensation',
'DspFilterDelay_\xb5s',
'DisabledSubChannels',
'WaveformLength',
'AlignmentPt',
'ThreshVal',
'MinRetriggerSamples',
'SpikeRetriggerTime',
'DualThresholding',
'Feature Peak 0',
'Feature Valley 1',
'Feature Energy 2',
'Feature Height 3',
'Feature NthSample 4',
'Feature NthSample 5',
'Feature NthSample 6',
'Feature NthSample 7',
'SessionUUID',
'FileUUID',
'CheetahRev',
'ProbeName',
'OriginalFileName',
'TimeCreated',
'TimeClosed',
'ApplicationName',
'AcquisitionSystem',
'ReferenceChannel']
# extracting minor key values of header (only taking into account
# non-empty lines)
for i, minor_entry in enumerate(text_header):
if minor_entry == '' or minor_entry[0] == '#':
continue
matching_key = [key for key in minor_keys if
minor_entry.strip('-').startswith(key)]
if len(matching_key) == 1:
matching_key = matching_key[0]
minor_value = minor_entry.split(matching_key)[1].strip(
' ').rstrip(' ')
# determine data type of entry
if minor_value.isdigit():
# converting to int if possible
minor_value = int(minor_value)
else:
# converting to float if possible
try:
minor_value = float(minor_value)
except:
pass
if matching_key in parameter_dict:
warnings.warn(
'Multiple entries for {} in text header of {}'.format(
matching_key, filename))
else:
parameter_dict[matching_key] = minor_value
elif len(matching_key) > 1:
raise ValueError(
'Inconsistent minor key list for text header '
'interpretation.')
else:
warnings.warn(
'Skipping text header entry %s, because it is not in '
'minor key list' % minor_entry)
self._diagnostic_print(
'Successfully decoded text header of file (%s).' % filename)
def __get_cheetah_version_from_txt_header(self, text_header, filename):
version_regex = re.compile(r'((-CheetahRev )|'
r'(ApplicationName Cheetah "))'
r'(?P<version>\d{1,3}\.\d{1,3}\.\d{1,3})')
match = version_regex.search(text_header)
if match:
return match.groupdict()['version']
else:
raise ValueError('Can not extract Cheetah version from file '
'header of file %s' % filename)
def __get_filename_and_times_from_txt_header(self, text_header, version):
if parse_version(version) <= parse_version('5.6.4'):
datetime1_regex = re.compile(r'## Time Opened \(m/d/y\): '
r'(?P<date>\S+)'
r' \(h:m:s\.ms\) '
r'(?P<time>\S+)')
datetime2_regex = re.compile(r'## Time Closed \(m/d/y\): '
r'(?P<date>\S+)'
r' \(h:m:s\.ms\) '
r'(?P<time>\S+)')
filename_regex = re.compile(r'## File Name (?P<filename>\S+)')
datetimeformat = '%m/%d/%Y %H:%M:%S.%f'
else:
datetime1_regex = re.compile(r'-TimeCreated '
r'(?P<date>\S+) '
r'(?P<time>\S+)')
datetime2_regex = re.compile(r'-TimeClosed '
r'(?P<date>\S+) '
r'(?P<time>\S+)')
filename_regex = re.compile(r'-OriginalFileName '
r'"?(?P<filename>\S+)"?')
datetimeformat = '%Y/%m/%d %H:%M:%S'
matchtime1 = datetime1_regex.search(text_header).groupdict()
matchtime2 = datetime2_regex.search(text_header).groupdict()
matchfilename = filename_regex.search(text_header)
filename = matchfilename.groupdict()['filename']
if '## Time Closed File was not closed properly' in text_header:
warnings.warn('Text header of file %s does not contain recording '
'closed time. File was not closed properly.'
'' % filename)
datetime1 = datetime.datetime.strptime(matchtime1['date'] + ' '
+ matchtime1['time'],
datetimeformat)
datetime2 = datetime.datetime.strptime(matchtime2['date'] + ' '
+ matchtime2['time'],
datetimeformat)
output = {'recording_opened': datetime1,
'recording_closed': datetime2,
'file_created': datetime1,
'file_closed': datetime2,
'recording_file_name': filename}
return output
def __read_ncs_data_headers(self, filehandle, filename):
'''
Reads the .ncs data block headers and stores the information in the
object's parameters_ncs dictionary.
Args:
filehandle (file object):
Handle to the already opened .ncs file.
filename (string):
Name of the ncs file.
Returns:
dict of extracted data
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
channel_id = header_u4[0][0]
sr = header_u4[0][1] # in Hz
t_start = timestamps[0] # in microseconds
# calculating corresponding time stamp of first sample, that was not
# recorded any more
# t_stop= time of first sample in last packet +(#samples per packet *
# conversion factor / sampling rate)
# conversion factor is needed as times are recorded in ms
t_stop = timestamps[-1] + (
(header_u4[-1][2]) * (
1 / self.ncs_time_unit.rescale(pq.s)).magnitude
/ header_u4[-1][1])
if channel_id in self.parameters_ncs:
raise ValueError(
'Detected multiple ncs files for channel_id %i.'
% channel_id)
else:
sampling_unit = [pq.CompoundUnit('%f*%s'
'' % (sr,
self.ncs_sr_unit.symbol))]
sampling_rate = sr * self.ncs_sr_unit
self.parameters_ncs[channel_id] = {'filename': filename,
't_start': t_start
* self.ncs_time_unit,
't_stop': t_stop
* self.ncs_time_unit,
'sampling_rate': sampling_rate,
'sampling_unit': sampling_unit,
'gaps': []}
return {channel_id: self.parameters_ncs[channel_id]}
def __read_nse_data_header(self, filehandle, filename):
'''
Reads the .nse data block headers and stores the information in the
object's parameters_ncs dictionary.
Args:
filehandle (file object):
Handle to the already opened .nse file.
filename (string):
Name of the nse file.
Returns:
-
'''
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
if filehandle is not None:
t_first = timestamps[0] # in microseconds
t_last = timestamps[-1] # in microseconds
channel_id = channel_ids[0]
cell_count = cell_numbers[0] # number of cells identified
self.parameters_nse[channel_id] = {'filename': filename,
't_first': t_first
* self.nse_time_unit,
't_last': t_last
* self.nse_time_unit,
'cell_count': cell_count}
def __read_ntt_data_header(self, filehandle, filename):
'''
Reads the .nse data block headers and stores the information in the
object's parameters_ncs dictionary.
Args:
filehandle (file object):
Handle to the already opened .nse file.
filename (string):
Name of the nse file.
Returns:
-
'''
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
if filehandle is not None:
t_first = timestamps[0] # in microseconds
t_last = timestamps[-1] # in microseconds
channel_id = channel_ids[0]
cell_count = cell_numbers[0] # number of cells identified
# spike_parameters = filehandle[0][3]
# else:
# t_first = None
# channel_id = None
# cell_count = 0
# # spike_parameters = None
#
# self._diagnostic_print('Empty file: No information
# contained in %s'%filename)
self.parameters_ntt[channel_id] = {'filename': filename,
't_first': t_first
* self.ntt_time_unit,
't_last': t_last
* self.nse_time_unit,
'cell_count': cell_count}
def __read_nev_data_header(self, filehandle, filename):
'''
Reads the .nev data block headers and stores the relevant information
in the
object's parameters_nev dictionary.
Args:
filehandle (file object):
Handle to the already opened .nev file.
filename (string):
Name of the nev file.
Returns:
-
'''
# Extracting basic recording events to be able to check recording
# consistency
if filename in self.parameters_nev:
raise ValueError(
'Detected multiple nev files of name %s.' % (filename))
else:
self.parameters_nev[filename] = {}
if 'Starting_Recording' in self.parameters_nev[filename]:
raise ValueError('Trying to read second nev file of name %s. '
' Only one can be handled.' % filename)
self.parameters_nev[filename]['Starting_Recording'] = []
self.parameters_nev[filename]['events'] = []
for event in filehandle:
# separately extracting 'Starting Recording'
if ((event[4] in [11, 19])
and (event[10].decode('latin-1') == 'Starting Recording')):
self.parameters_nev[filename]['Starting_Recording'].append(
event[3] * self.nev_time_unit)
# adding all events to parameter collection
self.parameters_nev[filename]['events'].append(
{'timestamp': event[3] * self.nev_time_unit,
'event_id': event[4],
'nttl': event[5],
'name': event[10].decode('latin-1')})
if len(self.parameters_nev[filename]['Starting_Recording']) < 1:
raise ValueError(
'No Event "Starting_Recording" detected in %s' % (
filename))
self.parameters_nev[filename]['t_start'] = min(
self.parameters_nev[filename]['Starting_Recording'])
# t_stop = time stamp of last event in file
self.parameters_nev[filename]['t_stop'] = max(
[e['timestamp'] for e in
self.parameters_nev[filename]['events']])
# extract all occurring event types (= combination of nttl,
# event_id and name/string)
event_types = copy.deepcopy(self.parameters_nev[filename]['events'])
for d in event_types:
d.pop('timestamp')
self.parameters_nev[filename]['event_types'] = [dict(y) for y in
{tuple(
x.items())
for x in
event_types}]
# ________________ File Checks __________________________________
def __ncs_packet_check(self, filehandle):
'''
Checks consistency of data in ncs file and raises assertion error if a
check fails. Detected recording gaps are added to parameter_ncs
Args:
filehandle (file object):
Handle to the already opened .ncs file.
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
# checking sampling rate of data packets
sr0 = header_u4[0, 1]
assert all(header_u4[:, 1] == sr0)
# checking channel id of data packets
channel_id = header_u4[0, 0]
assert all(header_u4[:, 0] == channel_id)
# time offset of data packets
# TODO: Check if there is a safer way to do the delta_t check for ncs
# data packets
# this is a not safe assumption, that the first two data packets have
# correct time stamps
delta_t = timestamps[1] - timestamps[0]
# valid samples of first data packet
temp_valid_samples = header_u4[0, 2]
# unit test
# time difference between packets corresponds to number of recorded
# samples
assert delta_t == (
temp_valid_samples / (
self.ncs_time_unit.rescale(pq.s).magnitude * sr0))
self._diagnostic_print('NCS packet check successful.')
def __nse_check(self, filehandle):
'''
Checks consistency of data in ncs file and raises assertion error if a
check fails.
Args:
filehandle (file object):
Handle to the already opened .nse file.
'''
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
assert all(channel_ids == channel_ids[0])
assert all([len(dp) == len(data_points[0]) for dp in data_points])
self._diagnostic_print('NSE file check successful.')
def __nev_check(self, filehandle):
'''
Checks consistency of data in nev file and raises assertion error if a
check fails.
Args:
filehandle (file object):
Handle to the already opened .nev file.
'''
# this entry should always equal 2 (see Neuralynx File Description),
# but it is not. For me, this is 0.
assert all([f[2] == 2 or f[2] == 0 for f in filehandle])
# TODO: check with more nev files, if index 0,1,2,6,7,8 and 9 can be
# non-zero. Interpretation? Include in event extraction.
# only observed 0 for index 0,1,2,6,7,8,9 in nev files.
# If they are non-zero, this needs to be included in event extraction
assert all([f[0] == 0 for f in filehandle])
assert all([f[1] == 0 for f in filehandle])
assert all([f[2] in [0, 2] for f in filehandle])
assert all([f[6] == 0 for f in filehandle])
assert all([f[7] == 0 for f in filehandle])
assert all([f[8] == 0 for f in filehandle])
assert all([all(f[9] == 0) for f in filehandle])
self._diagnostic_print('NEV file check successful.')
def __ntt_check(self, filehandle):
'''
Checks consistency of data in ncs file and raises assertion error if a
check fails.
Args:
filehandle (file object):
Handle to the already opened .nse file.
'''
# TODO: check this when first .ntt files are available
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
assert all(channel_ids == channel_ids[0])
assert all([len(dp) == len(data_points[0]) for dp in data_points])
self._diagnostic_print('NTT file check successful.')
def __ncs_gap_check(self, filehandle):
'''
Checks individual data blocks of ncs files for consistent starting
times with respect to sample count.
This covers intended recording gaps as well as shortened data packet,
which are incomplete
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
channel_id = header_u4[0, 0]
if channel_id not in self.parameters_ncs:
self.parameters_ncs[channel_id] = {}
# time stamps of data packets
delta_t = timestamps[1] - timestamps[0] # in microsec
data_packet_offsets = np.diff(timestamps) # in microsec
# check if delta_t corresponds to number of valid samples present in
# data packets
# NOTE: This also detects recording gaps!
valid_samples = header_u4[:-1, 2]
sampling_rate = header_u4[0, 1]
packet_checks = (valid_samples / (self.ncs_time_unit.rescale(
pq.s).magnitude * sampling_rate)) == data_packet_offsets
if not all(packet_checks):
if 'broken_packets' not in self.parameters_ncs[channel_id]:
self.parameters_ncs[channel_id]['broken_packets'] = []
broken_packets = np.where(np.array(packet_checks) is False)[0]
for broken_packet in broken_packets:
self.parameters_ncs[channel_id]['broken_packets'].append(
(broken_packet,
valid_samples[broken_packet],
data_packet_offsets[broken_packet]))
self._diagnostic_print('Detected broken packet in NCS file at '
'packet id %i (sample number %i '
'time offset id %i)'
'' % (broken_packet,
valid_samples[broken_packet],
data_packet_offsets[broken_packet])
) # in microsec
# checking for irregular data packet durations -> gaps / shortened
# data packets
if not all(data_packet_offsets == delta_t):
if 'gaps' not in self.parameters_ncs[channel_id]:
self.parameters_ncs[channel_id]['gaps'] = []
# gap identification by (sample of gap start, duration)
# gap packets
gap_packet_ids = np.where(data_packet_offsets != delta_t)[0]
for gap_packet_id in gap_packet_ids:
# skip if this packet starting time is known to be corrupted
# hoping no corruption and gap occurs simultaneously
# corrupted time stamp affects two delta_t comparisons:
if gap_packet_id in self.parameters_ncs[channel_id][
'invalid_first_samples'] \
or gap_packet_id + 1 in self.parameters_ncs[channel_id][
'invalid_first_samples']:
continue
gap_start = timestamps[
gap_packet_id] # t_start of last packet [microsec]
gap_stop = timestamps[
gap_packet_id + 1] # t_stop of first packet [microsec]
self.parameters_ncs[channel_id]['gaps'].append((gap_packet_id,
gap_start,
gap_stop)) #
# [,microsec,microsec]
self._diagnostic_print('Detected gap in NCS file between'
'sample time %i and %i (last correct '
'packet id %i)' % (gap_start, gap_stop,
gap_packet_id))
def __ncs_invalid_first_sample_check(self, filehandle):
'''
Checks data blocks of ncs files for corrupted starting times indicating
a missing first sample in the data packet. These are then excluded from
the gap check, but ignored for further analysis.
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
channel_id = header_u4[0, 0]
self.parameters_ncs[channel_id]['invalid_first_samples'] = []
# checking if first bit of timestamp is 1, which indicates error
invalid_packet_ids = np.where(timestamps >= 2 ** 55)[0]
if len(invalid_packet_ids) > 0:
warnings.warn('Invalid first sample(s) detected in ncs file'
'(packet id(s) %i)! This error is ignored in'
'subsequent routines.' % (invalid_packet_ids))
self.parameters_ncs[channel_id][
'invalid_first_samples'] = invalid_packet_ids
# checking consistency of data around corrupted packet time
for invalid_packet_id in invalid_packet_ids:
if invalid_packet_id < 2 or invalid_packet_id > len(
filehandle) - 2:
raise ValueError(
'Corrupted ncs data packet at the beginning'
'or end of file.')
elif (timestamps[invalid_packet_id + 1] - timestamps[
invalid_packet_id - 1] != 2 * (
timestamps[invalid_packet_id - 1] - timestamps[
invalid_packet_id - 2])):
raise ValueError('Starting times of ncs data packets around'
'corrupted data packet are not '
'consistent!')
# Supplementory Functions
def get_channel_id_by_file_name(self, filename):
"""
Checking parameters of NCS, NSE and NTT Files for given filename and
return channel_id if result is consistent
:param filename:
:return:
"""
channel_ids = []
channel_ids += [k for k in self.parameters_ncs if
self.parameters_ncs[k]['filename'] == filename]
channel_ids += [k for k in self.parameters_nse if
self.parameters_nse[k]['filename'] == filename]
channel_ids += [k for k in self.parameters_ntt if
self.parameters_ntt[k]['filename'] == filename]
if len(np.unique(np.asarray(channel_ids))) == 1:
return channel_ids[0]
elif len(channel_ids) > 1:
raise ValueError(
'Ambiguous channel ids detected. Filename %s is associated'
' to different channels of NCS and NSE and NTT %s'
'' % (filename, channel_ids))
else: # if filename was not detected
return None
def hashfile(self, afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.digest()
def datesizefile(self, filename):
return str(os.path.getmtime(filename)) + '_' + str(
os.path.getsize(filename))
def _diagnostic_print(self, text):
'''
Print a diagnostic message.
Args:
text (string):
Diagnostic text to print.
Returns:
-
'''
if self._print_diagnostic:
print('NeuralynxIO: ' + text)
| bsd-3-clause |
rlouf/patterns-of-segregation | bin/plot_scaling_classes.py | 1 | 3443 | """plot_income_scaling.py
Plot the number of households from a given class as a function of the total
number of households per city
"""
import csv
import math
from matplotlib import pylab as plt
from scipy.stats import linregress
colours = {'Lower':'#4F8F6B',
'Higher':'#C1A62E',
'Middle':'#4B453C'}
# Puerto-rican cities are excluded from the analysis
PR_cities = ['7442','0060','6360','4840']
#
# Read data
#
## List of MSA
msa = {}
with open('data/names/msa.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
if rows[0] not in PR_cities:
msa[rows[0]] = rows[1]
## Classes
classes = {}
with open('extr/classes/msa_average/classes.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
classes[rows[0]] =[int(r) for r in rows[1:]]
## Number of households per class, and total
households_class = {cl:[] for cl in classes}
households = []
for i, city in enumerate(msa):
print "Compute number of households for %s (%s/%s)"%(msa[city],
i+1,
len(msa))
## Import households data
incomes = {}
with open('data/income/msa/%s/income.csv'%city, 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
num_cat = len(rows[1:])
incomes[rows[0]] = {cl: sum([int(rows[1+c]) for c in classes[cl]])
for cl in classes}
incomes_cl = {cl: sum([incomes[au][cl] for au in incomes])
for cl in classes}
for cl in classes:
households_class[cl].append(incomes_cl[cl])
households.append(sum(incomes_cl.values()))
#
# Fit
#
slopes = {}
r_values = {}
intercepts = {}
for cl in classes:
print "Power-law fit for %s income class"%cl
slope, intercept, r_value, p_value, std_err = linregress([math.log(p) for
p in households],[math.log(d) for d in households_class[cl]])
slopes[cl] = slope
r_values[cl] = r_value
intercepts[cl] = intercept
print "alpha = %s (R^2=%s)"%(slope, r_value)
#
# Plot
#
fig = plt.figure(figsize=(24,8))
for i,cl in enumerate(classes):
ax = fig.add_subplot(1, len(classes), i+1)
ax.plot(households, households_class[cl], 'o', color=colours[cl],
mec=colours[cl], label=r'$%s$'%cl)
ax.plot(sorted(households),
[math.exp(intercepts[cl])*h**slopes[cl] for h in sorted(households)],
label=r'$H_{%s} \sim H^{\,%.2f}$'%(cl, slopes[cl]),
linestyle='--',
color='black')
ax.set_xlabel(r'$H$', fontsize=20)
ax.set_ylabel(r'$H_{%s}$'%cl, fontsize=20)
ax.set_xscale('log')
ax.set_yscale('log')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 10)) # outward by 10 points
ax.spines['bottom'].set_position(('outward', 10)) # outward by 10 points
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.legend(loc='upper left', numpoints=1, frameon=False)
plt.savefig('figures/paper/si/scaling_class.pdf', bbox_inches='tight')
plt.show()
| bsd-3-clause |
liberatorqjw/scikit-learn | sklearn/tests/test_multiclass.py | 8 | 21910 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
"""Test that ovr works with classes that are always present or absent."""
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
@ignore_warnings
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
# y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = assert_warns(DeprecationWarning,
OneVsRestClassifier(base_clf).fit,
X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
ovr.fit(iris.data, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# test that ties are broken using the decision function, not defaulting to
# the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron())
ovo_prediction = multi_clf.fit(X, y).predict(X)
# recalculate votes to make sure we have a tie
predictions = np.vstack([clf.predict(X) for clf in multi_clf.estimators_])
scores = np.vstack([clf.decision_function(X)
for clf in multi_clf.estimators_])
# classifiers are in order 0-1, 0-2, 1-2
# aggregate votes:
votes = np.zeros((4, 3))
votes[np.arange(4), predictions[0]] += 1
votes[np.arange(4), 2 * predictions[1]] += 1
votes[np.arange(4), 1 + predictions[2]] += 1
# for the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# for the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# for the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], 0)
# in the zero-one classifier, the score for 0 is greater than the score for
# one.
assert_greater(scores[0][0], scores[0][1])
# score for one is greater than score for zero
assert_greater(scores[2, 0] - scores[0, 0], scores[0, 0] + scores[1, 0])
# score for one is greater than score for two
assert_greater(scores[2, 0] - scores[0, 0], -scores[1, 0] - scores[2, 0])
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron())
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
"Test that the OvO doesn't screw the encoding of string labels"
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
svc = LinearSVC()
ovo = OneVsOneClassifier(svc)
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb, X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
dshen1/trading-with-python | lib/functions.py | 76 | 11627 | # -*- coding: utf-8 -*-
"""
twp support functions
@author: Jev Kuznetsov
Licence: GPL v2
"""
from scipy import polyfit, polyval
import datetime as dt
#from datetime import datetime, date
from pandas import DataFrame, Index, Series
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def nans(shape, dtype=float):
''' create a nan numpy array '''
a = np.empty(shape, dtype)
a.fill(np.nan)
return a
def plotCorrelationMatrix(price, thresh = None):
''' plot a correlation matrix as a heatmap image
inputs:
price: prices DataFrame
thresh: correlation threshold to use for checking, default None
'''
symbols = price.columns.tolist()
R = price.pct_change()
correlationMatrix = R.corr()
if thresh is not None:
correlationMatrix = correlationMatrix > thresh
plt.imshow(abs(correlationMatrix.values),interpolation='none')
plt.xticks(range(len(symbols)),symbols)
plt.yticks(range(len(symbols)),symbols)
plt.colorbar()
plt.title('Correlation matrix')
return correlationMatrix
def pca(A):
""" performs principal components analysis
(PCA) on the n-by-p DataFrame A
Rows of A correspond to observations, columns to variables.
Returns :
coeff : principal components, column-wise
transform: A in principal component space
latent : eigenvalues
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - A.mean()).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted
idx = np.argsort(latent) # sort eigenvalues
idx = idx[::-1] # in ascending order
coeff = coeff[:,idx]
latent = latent[idx]
score = np.dot(coeff.T,A.T) # projection of the data in the new space
transform = DataFrame(index = A.index, data = score.T)
return coeff,transform,latent
def pos2pnl(price,position , ibTransactionCost=False ):
"""
calculate pnl based on price and position
Inputs:
---------
price: series or dataframe of price
position: number of shares at each time. Column names must be same as in price
ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share
Returns a portfolio DataFrame
"""
delta=position.diff()
port = DataFrame(index=price.index)
if isinstance(price,Series): # no need to sum along 1 for series
port['cash'] = (-delta*price).cumsum()
port['stock'] = (position*price)
else: # dealing with DataFrame here
port['cash'] = (-delta*price).sum(axis=1).cumsum()
port['stock'] = (position*price).sum(axis=1)
if ibTransactionCost:
tc = -0.005*position.diff().abs() # basic transaction cost
tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$
if isinstance(price,DataFrame):
tc = tc.sum(axis=1)
port['tc'] = tc.cumsum()
else:
port['tc'] = 0.
port['total'] = port['stock']+port['cash']+port['tc']
return port
def tradeBracket(price,entryBar,maxTradeLength,bracket):
'''
trade a symmetrical bracket on price series, return price delta and exit bar #
Input
------
price : series of price values
entryBar: entry bar number
maxTradeLength : max trade duration in bars
bracket : allowed price deviation
'''
lastBar = min(entryBar+maxTradeLength,len(price)-1)
p = price[entryBar:lastBar]-price[entryBar]
idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket
if idxOutOfBound[0].any(): # found match
priceDelta = p[idxOutOfBound[0][0]]
exitBar = idxOutOfBound[0][0]+entryBar
else: # all in bracket, exiting based on time
priceDelta = p[-1]
exitBar = lastBar
return priceDelta, exitBar
def estimateBeta(priceY,priceX,algo = 'standard'):
'''
estimate stock Y vs stock X beta using iterative linear
regression. Outliers outside 3 sigma boundary are filtered out
Parameters
--------
priceX : price series of x (usually market)
priceY : price series of y (estimate beta of this price)
Returns
--------
beta : stockY beta relative to stock X
'''
X = DataFrame({'x':priceX,'y':priceY})
if algo=='returns':
ret = (X/X.shift(1)-1).dropna().values
#print len(ret)
x = ret[:,0]
y = ret[:,1]
# filter high values
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
iteration = 1
nrOutliers = 1
while iteration < 10 and nrOutliers > 0 :
(a,b) = polyfit(x,y,1)
yf = polyval([a,b],x)
#plot(x,y,'x',x,yf,'r-')
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
elif algo=='log':
x = np.log(X['x'])
y = np.log(X['y'])
(a,b) = polyfit(x,y,1)
beta = a
elif algo=='standard':
ret =np.log(X).diff().dropna()
beta = ret['x'].cov(ret['y'])/ret['x'].var()
else:
raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'")
return beta
def estimateVolatility(ohlc, N=10, algo='YangZhang'):
"""
Volatility estimation
Possible algorithms: ['YangZhang', 'CC']
"""
cc = np.log(ohlc.close/ohlc.close.shift(1))
if algo == 'YangZhang': # Yang-zhang volatility
ho = np.log(ohlc.high/ohlc.open)
lo = np.log(ohlc.low/ohlc.open)
co = np.log(ohlc.close/ohlc.open)
oc = np.log(ohlc.open/ohlc.close.shift(1))
oc_sq = oc**2
cc_sq = cc**2
rs = ho*(ho-co)+lo*(lo-co)
close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0))
open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0))
window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0))
result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252)
result[:N-1] = np.nan
elif algo == 'CC': # standard close-close estimator
result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N))
else:
raise ValueError('Unknown algo type.')
return result*100
def rank(current,past):
''' calculate a relative rank 0..1 for a value against series '''
return (current>past).sum()/float(past.count())
def returns(df):
return (df/df.shift(1)-1)
def logReturns(df):
t = np.log(df)
return t-t.shift(1)
def dateTimeToDate(idx):
''' convert datetime index to date '''
dates = []
for dtm in idx:
dates.append(dtm.date())
return dates
def readBiggerScreener(fName):
''' import data from Bigger Capital screener '''
with open(fName,'rb') as f:
reader = csv.reader(f)
rows = [row for row in reader]
header = rows[0]
data = [[] for i in range(len(header))]
for row in rows[1:]:
for i,elm in enumerate(row):
try:
data[i].append(float(elm))
except Exception:
data[i].append(str(elm))
return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header]
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
def drawdown(s):
"""
calculate max drawdown and duration
Input:
s, price or cumulative pnl curve $
Returns:
drawdown : vector of drawdwon values
duration : vector of drawdown duration
"""
# convert to array if got pandas series, 10x speedup
if isinstance(s,pd.Series):
idx = s.index
s = s.values
returnSeries = True
else:
returnSeries = False
if s.min() < 0: # offset if signal minimum is less than zero
s = s-s.min()
highwatermark = np.zeros(len(s))
drawdown = np.zeros(len(s))
drawdowndur = np.zeros(len(s))
for t in range(1,len(s)):
highwatermark[t] = max(highwatermark[t-1], s[t])
drawdown[t] = (highwatermark[t]-s[t])
drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1)
if returnSeries:
return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur)
else:
return drawdown , drawdowndur
def profitRatio(pnl):
'''
calculate profit ratio as sum(pnl)/drawdown
Input: pnl - daily pnl, Series or DataFrame
'''
def processVector(pnl): # process a single column
s = pnl.fillna(0)
dd = drawdown(s)[0]
p = s.sum()/dd.max()
return p
if isinstance(pnl,Series):
return processVector(pnl)
elif isinstance(pnl,DataFrame):
p = Series(index = pnl.columns)
for col in pnl.columns:
p[col] = processVector(pnl[col])
return p
else:
raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl)))
def candlestick(df,width=0.5, colorup='b', colordown='r'):
''' plot a candlestick chart of a dataframe '''
O = df['open'].values
H = df['high'].values
L = df['low'].values
C = df['close'].values
fig = plt.gcf()
ax = plt.axes()
#ax.hold(True)
X = df.index
#plot high and low
ax.bar(X,height=H-L,bottom=L,width=0.1,color='k')
idxUp = C>O
ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup)
idxDown = C<=O
ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown)
try:
fig.autofmt_xdate()
except Exception: # pragma: no cover
pass
ax.grid(True)
#ax.bar(x,height=H-L,bottom=L,width=0.01,color='k')
def datetime2matlab(t):
''' convert datetime timestamp to matlab numeric timestamp '''
mdn = t + dt.timedelta(days = 366)
frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def getDataSources(fName = None):
''' return data sources directories for this machine.
directories are defined in datasources.ini or provided filepath'''
import socket
from ConfigParser import ConfigParser
pcName = socket.gethostname()
p = ConfigParser()
p.optionxform = str
if fName is None:
fName = 'datasources.ini'
p.read(fName)
if pcName not in p.sections():
raise NameError('Host name section %s not found in file %s' %(pcName,fName))
dataSources = {}
for option in p.options(pcName):
dataSources[option] = p.get(pcName,option)
return dataSources
if __name__ == '__main__':
df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]})
plt.clf()
candlestick(df) | bsd-3-clause |
namccart/gnuradio | gr-digital/examples/example_costas.py | 49 | 5316 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_costas(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.cst = digital.costas_loop_cc(bw, 2)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_cst = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.cst, self.vsnk_cst)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.cst,1), self.vsnk_frq)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.707,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_costas(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data.
data_cst = scipy.array(3*[0,]+list(put.vsnk_cst.data()))
# Plot the Costas loop's LO frequency
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("Costas LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,2)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_cst.real, data_cst.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
s3.set_xlim([-2, 2])
s3.set_ylim([-2, 2])
# Plot the symbols in time
s4 = f1.add_subplot(2,2,3)
s4.set_position([0.125, 0.05, 0.775, 0.4])
s4.plot(data_src.real, "o-")
s4.plot(data_cst.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
tbabej/astropy | astropy/visualization/wcsaxes/tests/test_frame.py | 2 | 5298 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import matplotlib.pyplot as plt
from ....wcs import WCS
from ....tests.helper import pytest, remote_data
from .. import WCSAxes
from ..frame import BaseFrame
from ....tests.image_tests import IMAGE_REFERENCE_DIR
from .test_images import BaseImageTests
class HexagonalFrame(BaseFrame):
spine_names = 'abcdef'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
ymid = 0.5 * (ymin + ymax)
xmid1 = (xmin + xmax) / 4.
xmid2 = (xmin + xmax) * 3. / 4.
self['a'].data = np.array(([xmid1, ymin], [xmid2, ymin]))
self['b'].data = np.array(([xmid2, ymin], [xmax, ymid]))
self['c'].data = np.array(([xmax, ymid], [xmid2, ymax]))
self['d'].data = np.array(([xmid2, ymax], [xmid1, ymax]))
self['e'].data = np.array(([xmid1, ymax], [xmin, ymid]))
self['f'].data = np.array(([xmin, ymid], [xmid1, ymin]))
class TestFrame(BaseImageTests):
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='custom_frame.png',
tolerance=1.5)
def test_custom_frame(self):
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7],
wcs=wcs,
frame_class=HexagonalFrame)
fig.add_axes(ax)
ax.coords.grid(color='white')
im = ax.imshow(np.ones((149, 149)), vmin=0., vmax=2.,
origin='lower', cmap=plt.cm.gist_heat)
minpad = {}
minpad['a'] = minpad['d'] = 1
minpad['b'] = minpad['c'] = minpad['e'] = minpad['f'] = 2.75
ax.coords['glon'].set_axislabel("Longitude", minpad=minpad)
ax.coords['glon'].set_axislabel_position('ad')
ax.coords['glat'].set_axislabel("Latitude", minpad=minpad)
ax.coords['glat'].set_axislabel_position('bcef')
ax.coords['glon'].set_ticklabel_position('ad')
ax.coords['glat'].set_ticklabel_position('bcef')
# Set limits so that no labels overlap
ax.set_xlim(5.5, 100.5)
ax.set_ylim(5.5, 110.5)
# Clip the image to the frame
im.set_clip_path(ax.coords.frame.patch)
return fig
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='update_clip_path_rectangular.png',
tolerance=1.5)
def test_update_clip_path_rectangular(self, tmpdir):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='update_clip_path_nonrectangular.png',
tolerance=1.5)
def test_update_clip_path_nonrectangular(self, tmpdir):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal',
frame_class=HexagonalFrame)
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='update_clip_path_change_wcs.png',
tolerance=1.5)
def test_update_clip_path_change_wcs(self, tmpdir):
# When WCS is changed, a new frame is created, so we need to make sure
# that the path is carried over to the new frame.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.reset_wcs()
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
def test_copy_frame_properties_change_wcs(self):
# When WCS is changed, a new frame is created, so we need to make sure
# that the color and linewidth are transferred over
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
ax.coords.frame.set_linewidth(5)
ax.coords.frame.set_color('purple')
ax.reset_wcs()
assert ax.coords.frame.get_linewidth() == 5
assert ax.coords.frame.get_color() == 'purple'
| bsd-3-clause |
blisseth/ThinkStats2 | code/regression.py | 62 | 9652 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import pandas
import random
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import re
import chap01soln
import first
import linear
import thinkplot
import thinkstats2
def QuickLeastSquares(xs, ys):
"""Estimates linear least squares fit and returns MSE.
xs: sequence of values
ys: sequence of values
returns: inter, slope, mse
"""
n = float(len(xs))
meanx = xs.mean()
dxs = xs - meanx
varx = np.dot(dxs, dxs) / n
meany = ys.mean()
dys = ys - meany
cov = np.dot(dxs, dys) / n
slope = cov / varx
inter = meany - slope * meanx
res = ys - (inter + slope * xs)
mse = np.dot(res, res) / n
return inter, slope, mse
def ReadVariables():
"""Reads Stata dictionary files for NSFG data.
returns: DataFrame that maps variables names to descriptions
"""
vars1 = thinkstats2.ReadStataDct('2002FemPreg.dct').variables
vars2 = thinkstats2.ReadStataDct('2002FemResp.dct').variables
all_vars = vars1.append(vars2)
all_vars.index = all_vars.name
return all_vars
def JoinFemResp(df):
"""Reads the female respondent file and joins on caseid.
df: DataFrame
"""
resp = chap01soln.ReadFemResp()
resp.index = resp.caseid
join = df.join(resp, on='caseid', rsuffix='_r')
# convert from colon-separated time strings to datetimes
join.screentime = pandas.to_datetime(join.screentime)
return join
def GoMining(df):
"""Searches for variables that predict birth weight.
df: DataFrame of pregnancy records
returns: list of (rsquared, variable name) pairs
"""
variables = []
for name in df.columns:
try:
if df[name].var() < 1e-7:
continue
formula = 'totalwgt_lb ~ agepreg + ' + name
formula = formula.encode('ascii')
model = smf.ols(formula, data=df)
if model.nobs < len(df)/2:
continue
results = model.fit()
except (ValueError, TypeError):
continue
variables.append((results.rsquared, name))
return variables
def MiningReport(variables, n=30):
"""Prints variables with the highest R^2.
t: list of (R^2, variable name) pairs
n: number of pairs to print
"""
all_vars = ReadVariables()
variables.sort(reverse=True)
for mse, name in variables[:n]:
key = re.sub('_r$', '', name)
try:
desc = all_vars.loc[key].desc
if isinstance(desc, pandas.Series):
desc = desc[0]
print(name, mse, desc)
except KeyError:
print(name, mse)
def PredictBirthWeight(live):
"""Predicts birth weight of a baby at 30 weeks.
live: DataFrame of live births
"""
live = live[live.prglngth>30]
join = JoinFemResp(live)
t = GoMining(join)
MiningReport(t)
formula = ('totalwgt_lb ~ agepreg + C(race) + babysex==1 + '
'nbrnaliv>1 + paydu==1 + totincr')
results = smf.ols(formula, data=join).fit()
SummarizeResults(results)
def SummarizeResults(results):
"""Prints the most important parts of linear regression results:
results: RegressionResults object
"""
for name, param in results.params.iteritems():
pvalue = results.pvalues[name]
print('%s %0.3g (%.3g)' % (name, param, pvalue))
try:
print('R^2 %.4g' % results.rsquared)
ys = results.model.endog
print('Std(ys) %.4g' % ys.std())
print('Std(res) %.4g' % results.resid.std())
except AttributeError:
print('R^2 %.4g' % results.prsquared)
def RunSimpleRegression(live):
"""Runs a simple regression and compare results to thinkstats2 functions.
live: DataFrame of live births
"""
# run the regression with thinkstats2 functions
live_dropna = live.dropna(subset=['agepreg', 'totalwgt_lb'])
ages = live_dropna.agepreg
weights = live_dropna.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
res = thinkstats2.Residuals(ages, weights, inter, slope)
r2 = thinkstats2.CoefDetermination(weights, res)
# run the regression with statsmodels
formula = 'totalwgt_lb ~ agepreg'
model = smf.ols(formula, data=live)
results = model.fit()
SummarizeResults(results)
def AlmostEquals(x, y, tol=1e-6):
return abs(x-y) < tol
assert(AlmostEquals(results.params['Intercept'], inter))
assert(AlmostEquals(results.params['agepreg'], slope))
assert(AlmostEquals(results.rsquared, r2))
def PivotTables(live):
"""Prints a pivot table comparing first babies to others.
live: DataFrame of live births
"""
table = pandas.pivot_table(live, rows='isfirst',
values=['totalwgt_lb', 'agepreg'])
print(table)
def FormatRow(results, columns):
"""Converts regression results to a string.
results: RegressionResults object
returns: string
"""
t = []
for col in columns:
coef = results.params.get(col, np.nan)
pval = results.pvalues.get(col, np.nan)
if np.isnan(coef):
s = '--'
elif pval < 0.001:
s = '%0.3g (*)' % (coef)
else:
s = '%0.3g (%0.2g)' % (coef, pval)
t.append(s)
try:
t.append('%.2g' % results.rsquared)
except AttributeError:
t.append('%.2g' % results.prsquared)
return t
def RunModels(live):
"""Runs regressions that predict birth weight.
live: DataFrame of pregnancy records
"""
columns = ['isfirst[T.True]', 'agepreg', 'agepreg2']
header = ['isfirst', 'agepreg', 'agepreg2']
rows = []
formula = 'totalwgt_lb ~ isfirst'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ isfirst + agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
live['agepreg2'] = live.agepreg**2
formula = 'totalwgt_lb ~ isfirst + agepreg + agepreg2'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
PrintTabular(rows, header)
def PrintTabular(rows, header):
"""Prints results in LaTeX tabular format.
rows: list of rows
header: list of strings
"""
s = r'\hline ' + ' & '.join(header) + r' \\ \hline'
print(s)
for row in rows:
s = ' & '.join(row) + r' \\'
print(s)
print(r'\hline')
def LogisticRegressionExample():
"""Runs a simple example of logistic regression and prints results.
"""
y = np.array([0, 1, 0, 1])
x1 = np.array([0, 0, 0, 1])
x2 = np.array([0, 1, 1, 1])
beta = [-1.5, 2.8, 1.1]
log_o = beta[0] + beta[1] * x1 + beta[2] * x2
print(log_o)
o = np.exp(log_o)
print(o)
p = o / (o+1)
print(p)
like = y * p + (1-y) * (1-p)
print(like)
print(np.prod(like))
df = pandas.DataFrame(dict(y=y, x1=x1, x2=x2))
results = smf.logit('y ~ x1 + x2', data=df).fit()
print(results.summary())
def RunLogisticModels(live):
"""Runs regressions that predict sex.
live: DataFrame of pregnancy records
"""
#live = linear.ResampleRowsWeighted(live)
df = live[live.prglngth>30]
df['boy'] = (df.babysex==1).astype(int)
df['isyoung'] = (df.agepreg<20).astype(int)
df['isold'] = (df.agepreg<35).astype(int)
df['season'] = (((df.datend+1) % 12) / 3).astype(int)
# run the simple model
model = smf.logit('boy ~ agepreg', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# run the complex model
model = smf.logit('boy ~ agepreg + hpagelb + birthord + C(race)', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# make the scatter plot
exog = pandas.DataFrame(model.exog, columns=model.exog_names)
endog = pandas.DataFrame(model.endog, columns=[model.endog_names])
xs = exog['agepreg']
lo = results.fittedvalues
o = np.exp(lo)
p = o / (o+1)
#thinkplot.Scatter(xs, p, alpha=0.1)
#thinkplot.Show()
# compute accuracy
actual = endog['boy']
baseline = actual.mean()
predict = (results.predict() >= 0.5)
true_pos = predict * actual
true_neg = (1 - predict) * (1 - actual)
acc = (sum(true_pos) + sum(true_neg)) / len(actual)
print(acc, baseline)
columns = ['agepreg', 'hpagelb', 'birthord', 'race']
new = pandas.DataFrame([[35, 39, 3, 1]], columns=columns)
y = results.predict(new)
print(y)
def main(name, data_dir='.'):
thinkstats2.RandomSeed(17)
LogisticRegressionExample()
live, firsts, others = first.MakeFrames()
live['isfirst'] = (live.birthord == 1)
RunLogisticModels(live)
RunSimpleRegression(live)
RunModels(live)
PredictBirthWeight(live)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
liyu1990/sklearn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
jarathomas/openVA-Pipeline | pipeline.py | 1 | 49777 | #-------------------------------------------------------------------------------------------------------------------------------------------#
# openVA Pipeline: pipeline.py -- Software for processing Verbal Autopsy data with automated cause of death assignment. #
# Copyright (C) 2018 Jason Thomas, Samuel Clark, Martin Bratschi in collaboration with the Bloomberg Data for Health Initiative #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#-------------------------------------------------------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------------------------------------------------------#
# User Settings
sqlitePW = "enilepiP"
dbName = "Pipeline.db"
#-------------------------------------------------------------------------------------------------------------------------------------------#
from pysqlcipher3 import dbapi2 as sqlcipher
from pandas import read_csv, groupby
import pandas as pd
import sys
import csv
import datetime
import os
import subprocess
import shutil
import requests
import json
import sqlite3
import time
import re
import pickle
#-------------------------------------------------------------------------------------------------------------------------------------------#
# Define functions and objects needed for functioning of pipeline; then set up log files and configuration of pipeline
#-------------------------------------------------------------------------------------------------------------------------------------------#
class Dhis(object):
"""Access DHIS2 API."""
def __init__(self, dhisURL, dhisUser, dhisPass):
if '/api' in dhisURL:
print('Please do not specify /api/ in the server argument: e.g. --server=play.dhis2.org/demo')
sys.exit()
if dhisURL.startswith('localhost') or dhisURL.startswith('127.0.0.1'):
dhisURL = 'http://{}'.format(dhisURL)
elif dhisURL.startswith('http://'):
dhisURL = dhisURL
elif not dhisURL.startswith('https://'):
dhisURL = 'https://{}'.format(dhisURL)
self.auth = (dhisUser, dhisPass)
self.url = '{}/api/25'.format(dhisURL)
def get(self, endpoint, params=None):
"""
GET method for DHIS2 API.
:rtype: dict
"""
url = '{}/{}.json'.format(self.url, endpoint)
if not params:
params = {}
params['paging'] = False
try:
r = requests.get(url=url, params=params, auth=self.auth)
if r.status_code != 200:
print("HTTP Code: {}".format(r.status_code)) ## HERE
print(r.text)
else:
return r.json()
except requests.RequestException:
raise requests.RequestException
def post(self, endpoint, data):
"""
POST method for DHIS2 API.
:rtype: dict
"""
url = '{}/{}.json'.format(self.url, endpoint)
try:
r = requests.post(url=url, json=data, auth=self.auth)
if r.status_code not in range(200, 206):
print("HTTP Code: {}".format(r.status_code)) ## HERE
print(r.text)
else:
return r.json()
except requests.RequestException:
raise requests.RequestException
def post_blob(self, f):
""" Post file to DHIS2 and return created UID for that file
:rtype: str
"""
url = '{}/fileResources'.format(self.url)
files = {'file': (f, open(f, 'rb'), 'application/x-sqlite3', {'Expires': '0'})}
try:
r = requests.post(url, files=files, auth=self.auth)
if r.status_code not in (200, 202):
print("HTTP Code: {}".format(r.status_code)) ## HERE
print(r.text)
else:
response = r.json()
file_id = response['response']['fileResource']['id']
return file_id
except requests.RequestException:
raise requests.RequestException
class VerbalAutopsyEvent(object):
""" DHIS2 event + a BLOB file resource"""
def __init__(self, va_id, program, dhis_orgunit, event_date, sex, dob, age, cod_code, algorithm_metadata, file_id):
self.va_id = va_id
self.program = program
self.dhis_orgunit = dhis_orgunit
self.event_date = event_date
self.sex = sex
self.dob = dob
self.age = age
self.cod_code = cod_code
self.algorithm_metadata = algorithm_metadata
self.datavalues = [
{"dataElement": "htm6PixLJNy", "value": self.va_id},
{"dataElement": "hi7qRC4SMMk", "value": self.sex},
{"dataElement": "mwSaVq64k7j", "value": self.dob},
{"dataElement": "F4XGdOBvWww", "value": self.cod_code},
{"dataElement": "wiJviUqN1io", "value": self.algorithm_metadata},
{"dataElement": "oPAg4MA0880", "value": self.age},
{"dataElement": "XLHIBoLtjGt", "value": file_id}
]
def format_to_dhis2(self, dhisUser):
"""
Format object to DHIS2 compatible event for DHIS2 API
:rtype: dict
"""
event = {
"program": self.program,
"orgUnit": self.dhis_orgunit,
"eventDate": datetime.datetime.strftime(self.event_date, '%Y-%m-%d'),
"status": "COMPLETED",
"storedBy": dhisUser,
"dataValues": self.datavalues
}
return event
def __str__(self):
return json.dumps(self, default=lambda o: o.__dict__)
def create_db(fName, evaList):
"""
Create a SQLite database with VA data + COD
:rtype: None
"""
conn = sqlite3.connect(fName)
with conn:
cur = conn.cursor()
cur.execute("CREATE TABLE vaRecord(ID INT, Attrtibute TEXT, Value TEXT)")
cur.executemany("INSERT INTO vaRecord VALUES (?,?,?)", evaList)
def getCODCode(myDict, searchFor):
for i in range(len(myDict.keys())):
match = re.search(searchFor, list(myDict.keys())[i])
if match:
return list(myDict.values())[i]
# set the ODK_Conf table item odkLastRunResult as 0, log error message, and exit script
def cleanup(errorMsg):
# handle single case when DB file not found
if connectionError == "1":
with open(connectionErrorFile, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow([timeFMT, "Unable to Connect to SQLite Database, see {} for details".format(errorFile)])
sys.exit(1)
else:
# update ODK_Conf table with LastRunResult = 0
try:
sql = "UPDATE ODK_Conf SET odkLastRunResult = ?"
par = ("0",)
cursor.execute(sql, par)
db.commit()
if os.path.isfile(connectionErrorFile) == True:
try:
os.remove(connectionErrorFile)
except OSError:
print("Could not remove {}".format(connectionErrorFile))
# write errorMsg to errorFile if DB is inaccessible
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError):
db.rollback()
errorMsg[2] += "; unable to set odkLastRunResult to 0 (in ODK_Conf table)"
try:
with open(errorFile, "a", newline="") as f:
writer = csv.writer(f)
writer.writerow(errorMsg)
except OSError:
print(errorMsg)
# close DB resources and exit script
finally:
cursor.close()
db.close()
sys.exit(1)
def findKeyValue(key, d):
if key in d:
yield d[key]
for k in d:
if isinstance(d[k], list):
for i in d[k]:
for j in findKeyValue(key, i):
yield j
# error log files
errorFile = "./dbErrorLog.csv"
timeFMT = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
connectionError = "0"
connectionErrorFile = "./sqlConnect.csv"
## create error file if it does not exist
if os.path.isfile(errorFile) == False:
try:
with open(errorFile, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["Date"] + ["Description"] + ["Additional Information"])
except (OSError) as e:
print(str(e))
sys.exit(1)
# connect to the database and configure the pipeline's settings for ODK Aggregate, openVA, and DHIS2.
if os.path.isfile(dbName) == False:
connectionError = "1"
with open(errorFile, "a", newline="") as f:
writer = csv.writer(f)
writer.writerow([timeFMT, "Database {}.db not found".format(dbName), ])
cleanup()
db = sqlcipher.connect(dbName)
db.execute("PRAGMA key = " + sqlitePW)
sqlODK = "SELECT odkID, odkURL, odkUser, odkPass, odkFormID, odkLastRun, odkLastRunResult FROM ODK_Conf"
sqlPipeline = "SELECT workingDirectory, openVA_Algorithm, algorithmMetadataCode, codSource FROM Pipeline_Conf"
sqlInterVA4 = "SELECT HIV, Malaria FROM InterVA4_Conf"
sqlAdvancedInterVA4 = "SELECT directory, filename, output, append, groupcode, replicate, replicate_bug1, replicate_bug2, write FROM Advanced_InterVA4_Conf"
sqlInSilicoVA = "SELECT Nsim FROM InSilicoVA_Conf"
sqlAdvancedInSilicoVA = "SELECT isNumeric, updateCondProb, keepProbbase_level, CondProb, CondProbNum, datacheck, datacheck_missing," \
+ "warning_write, external_sep, thin, burnin, auto_length, conv_csmf, jump_scale," \
+ "levels_prior, levels_strength, trunc_min, trunc_max, subpop, java_option, seed," \
+ "phy_code, phy_cat, phy_unknown, phy_external, phy_debias, exclude_impossible_cause, indiv_CI " \
+ "FROM Advanced_InSilicoVA_Conf"
sqlDHIS = "SELECT dhisURL, dhisUser, dhisPass, dhisOrgUnit FROM DHIS_Conf"
sqlCODCodes_WHO = "SELECT codName, codCode FROM COD_Codes_DHIS WHERE codSource = 'WHO'"
sqlCODCodes_Tariff = "SELECT codName, codCode FROM COD_Codes_DHIS WHERE codSource = 'Tariff'"
## grab configuration settings from SQLite DB
try:
# ODK configuration
cursor = db.cursor()
cursor.execute(sqlODK)
odkQuery = cursor.fetchall()
for row in odkQuery:
odkID = row[0]
odkURL = row[1]
odkUser = row[2]
odkPass = row[3]
odkFormID = row[4]
odkLastRun = row[5]
odkLastRunDate = datetime.datetime.strptime(odkLastRun, "%Y-%m-%d_%H:%M:%S").strftime("%Y/%m/%d")
odkLastRunDatePrev = (datetime.datetime.strptime(odkLastRunDate, "%Y/%m/%d") - datetime.timedelta(days=1)).strftime("%Y/%m/%d")
odkLastRunResult = row[6]
# Pipeline configuration
cursor.execute(sqlPipeline)
pipelineQuery = cursor.fetchall()
for row in pipelineQuery:
processDir = row[0]
pipelineAlgorithm = row[1]
algorithmMetadataCode = row[2]
codSource = row[3]
# InterVA4 configuration
cursor.execute(sqlInterVA4)
interVA4Query = cursor.fetchall()
for row in interVA4Query:
interVA_HIV = row[0]
interVA_Malaria = row[1]
# InterVA4 advanced configuration
cursor.execute(sqlAdvancedInterVA4)
advancedInterVA4Query = cursor.fetchall()
for row in advancedInterVA4Query:
interVA_directory = row[0]
interVA_filename = row[1]
interVA_output = row[2]
interVA_append = row[3]
interVA_groupcode = row[4]
interVA_replicate = row[5]
interVA_replicate_bug1 = row[6]
interVA_replicate_bug2 = row[7]
interVA_write = row[8]
# InSilicoVA configuration
cursor.execute(sqlInSilicoVA)
insilicoVAQuery = cursor.fetchall()
for row in insilicoVAQuery:
insilico_Nsim = row[0]
# InSilicoVA advanced configuration
cursor.execute(sqlAdvancedInSilicoVA)
advancedInsilicoVAQuery = cursor.fetchall()
for row in advancedInsilicoVAQuery:
insilico_isNumeric = row[ 0]
insilico_updateCondProb = row[ 1]
insilico_keepProbbase_level = row[ 2]
insilico_CondProb = row[ 3]
insilico_CondProbNum = row[ 4]
insilico_datacheck = row[ 5]
insilico_datacheck_missing = row[ 6]
insilico_warning_write = row[ 7]
insilico_external_sep = row[ 8]
insilico_thin = row[ 9]
insilico_burnin = row[10]
insilico_auto_length = row[11]
insilico_conv_csmf = row[12]
insilico_jump_scale = row[13]
insilico_levels_prior = row[14]
insilico_levels_strength = row[15]
insilico_trunc_min = row[16]
insilico_trunc_max = row[17]
insilico_subpop = row[18]
insilico_java_option = row[19]
insilico_seed = row[20]
insilico_phy_code = row[21]
insilico_phy_cat = row[22]
insilico_phy_unknown = row[23]
insilico_phy_external = row[24]
insilico_phy_debias = row[25]
insilico_exclude_impossible_cause = row[26]
insilico_indiv_CI = row[27]
# DHIS2 configuration
cursor.execute(sqlDHIS)
dhisQuery = cursor.fetchall()
for row in dhisQuery:
dhisURL = row[0]
dhisUser = row[1]
dhisPass = row[2]
dhisOrgUnit = row[3]
# CoD Codes for DHIS2
cursor.execute(sqlCODCodes_WHO)
resultsWHO = cursor.fetchall()
codesWHO = dict(resultsWHO)
cursor.execute(sqlCODCodes_Tariff)
resultsTariff = cursor.fetchall()
codesTariff = dict(resultsTariff)
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Problem selecting config information from ODK_Conf ", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Problem selecting config information from ODK_Conf"]
cleanup(errorMsg)
#-------------------------------------------------------------------------------------------------------------------------------------------#
# create folders & files to store (ODK & openVA) input and output; also create call to ODK Briefcase
#-------------------------------------------------------------------------------------------------------------------------------------------#
odkBCExportDir = processDir + "/ODKExport"
odkBCExportFilename = "ODKExportNew.csv"
odkBCExportPrevious = odkBCExportDir + "/ODKExportPrevious.csv"
odkBCExportNewFile = odkBCExportDir + "/" + odkBCExportFilename
odkBCArgumentList = "java -jar ODK-Briefcase-v1.10.1.jar -oc -em -id '" + odkFormID + "' -sd '" + odkBCExportDir + "' -ed '" \
+ odkBCExportDir + "' -f '" + odkBCExportFilename + "' -url '" + odkURL + "' -u '" + odkUser \
+ "' -p '" + odkPass + "' -start '" + odkLastRunDatePrev + "'"
openVAFilesDir = processDir + "/OpenVAFiles"
openVAReadyFile = odkBCExportDir + "/OpenVAReadyFile.csv"
rScriptIn = openVAFilesDir + "/" + timeFMT + "/RScript_" + timeFMT + ".R"
rScriptOut = openVAFilesDir + "/" + timeFMT + "/RScript_" + timeFMT + ".Rout"
dhisDir = processDir + "/DHIS2"
if codSource=="WHO":
dhisCODCodes = codesWHO
else:
dhisCODCodes = codesTariff
# check if processing directory exists and create if necessary
if not os.path.exists(processDir):
try:
os.makedirs(processDir)
except OSError as e:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not create processing directory: " + processDir, str(e), timeFMT)
try:
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Could not create processing directory: " + processDir]
cleanup(errorMsg)
# create openVAFilesDir (if does not exist)
if not os.path.exists(openVAFilesDir + "/" + timeFMT):
try:
os.makedirs(openVAFilesDir + "/" + timeFMT)
except OSError as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not create openVA Directory: " + openVAFilesDir + "/" + timeFMT, str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Could not create openVA directory: " + openVAFilesDir + "/" + timeFMT]
cleanup(errorMsg)
# make a copy of current ODK Briefcase Export file, to compare with new file once exported (if there is an existing export file)
if os.path.isfile(odkBCExportNewFile) == True and odkLastRunResult == 1 and not os.path.isfile(connectionErrorFile):
try:
shutil.copy(odkBCExportNewFile, odkBCExportPrevious)
except (OSError, shutil.Error) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Error: Trying to copy export files from ODK Briefcase", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Trying to copy export files from ODK Briefcase"]
cleanup(errorMsg)
try:
os.remove(openVAReadyFile)
except (OSError) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime)"
par = ("Could not remove " + openVAReadyFile, str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Could not remove " + openVAReadyFile]
cleanup(errorMsg)
# launch ODK Briefcase to collect ODK Aggregate data and export to file for further processing
try:
process = subprocess.Popen(odkBCArgumentList, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
rc = process.returncode
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not launch ODK Briefcase Java Application", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: Could not launch ODK Briefcase Java Application",""]
cleanup(errorMsg)
# catch application errors from ODK Briefcase and log into EventLog table
if rc != 0:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = (str(stderr), "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(stderr),""]
cleanup(errorMsg)
if "SEVERE" in str(stderr):
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = (stderr,"Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(stderr),""]
cleanup(errorMsg)
else:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Briefcase Export Completed Successfully", "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
sql = "UPDATE ODK_Conf SET odkLastRun=?, odkLastRunResult=?"
par = (timeFMT,"1")
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "ODK Briefcase ran successfully but problems writing to DB (check odkLastRunResult in ODK_Conf)"]
cleanup(errorMsg)
# check if previous file exists from above operations and create delta file of new entries
if os.path.isfile(odkBCExportPrevious) == True:
try:
## WARNING: odkBCExportPrevious & odkBCExportNewFil (CSV files)
## contain sensitive VA information (leaving them in folder)
with open(odkBCExportPrevious, "r", newline="") as t1, open(odkBCExportNewFile, "r", newline="") as t2:
fileone = t1.readlines()
filetwo = t2.readlines()
header = filetwo[0]
with open(openVAReadyFile, "w", newline="") as outFile:
outFile.write(header)
for line in filetwo:
if line not in fileone:
outFile.write(line)
except OSError as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES"
par = ("Could not create: " + openVAReadyFile, "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Could not create: " + openVAReadyFile]
cleanup(errorMsg)
else:
# if there is no pre-existing ODK Briefcase Export file, then copy and rename to OpenVAReadyFile.csv
try:
shutil.copy(odkBCExportNewFile, openVAReadyFile)
except (OSError, shutil.Error) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = (e, "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Could not copy: " + odkBCExportNewFile + " to: " + openVAReadyFile]
cleanup(errorMsg)
# if no records retrieved, then close up shop; otherwise, create R script for running openVA
## WARNING: openVAReadyFile (CSV file) contains sensitive VA information (leaving it in folder)
with open(openVAReadyFile, "r", newline="") as outFile:
nRecords = len(list(outFile)) - 1 ## take away 1 for the column header
if nRecords == 0:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("No Records From ODK Briefcase (nothing more to do)", "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "No records from ODK Briefcase, but error writing to DB"]
cleanup(errorMsg)
try:
sql = "UPDATE ODK_Conf SET odkLastRun=?, odkLastRunResult=?"
par = (timeFMT,"1")
cursor.execute(sql, par)
db.commit()
cursor.close()
db.close()
sys.exit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e),
"No records from ODK Briefcase, but error writing to DB (trying to set odkLastRun & odkLastRunResult)."]
cleanup(errorMsg)
try:
with open(rScriptIn, "w", newline="") as f:
f.write("date() \n")
f.write("library(openVA); library(CrossVA) \n")
f.write("getwd() \n")
f.write("records <- read.csv('" + openVAReadyFile + "') \n")
# InSilicoVA
if pipelineAlgorithm == "InSilicoVA":
f.write("names(data) <- tolower(data) \n")
f.write("data <- map_records_insilicova(records) \n")
## assign ID from survey (odkID) if specified, otherwise use uuid from ODK Aggregate
if odkID == None:
f.write("data$ID <- records$meta.instanceID \n")
else:
f.write("data$ID <- records$" + odkID + "\n")
f.write("results <- insilico(data=data, " + ", \n")
f.write("\t isNumeric=" + insilico_isNumeric + ", \n")
f.write("\t updateCondProb=" + insilico_updateCondProb + ", \n")
f.write("\t keepProbbase.level=" + insilico_keepProbbase_level + ", \n")
f.write("\t CondProb=" + insilico_CondProb + ", \n")
f.write("\t CondProbNum=" + insilico_CondProbNum + ", \n")
f.write("\t datacheck=" + insilico_datacheck + ", \n")
f.write("\t datacheck.missing=" + insilico_datacheck_missing + ", \n")
f.write("\t warning.write=" + insilico_warning_write + ", \n")
f.write("\t external.sep=" + insilico_external_sep + ", \n")
f.write("\t Nsim=" + insilico_Nsim + ", \n")
f.write("\t thin=" + insilico_thin + ", \n")
f.write("\t burnin=" + insilico_burnin + ", \n")
f.write("\t auto.length=" + insilico_auto_length + ", \n")
f.write("\t conv.csmf=" + insilico_conv_csmf + ", \n")
f.write("\t jump.scale=" + insilico_jump_scale + ", \n")
f.write("\t levels.prior=" + insilico_levels_prior + ", \n")
f.write("\t levels.strength=" + insilico_levels_strength + ", \n")
f.write("\t trunc.min=" + insilico_trunc_min + ", \n")
f.write("\t trunc.max=" + insilico_trunc_max + ", \n")
f.write("\t subpop=" + insilico_subpop + ", \n")
f.write("\t java.option=" + insilico_java_option + ", \n")
f.write("\t seed=" + insilico_seed + ", \n")
f.write("\t phy.code=" + insilico_phy_code + ", \n")
f.write("\t phy.cat=" + insilico_phy_cat + ", \n")
f.write("\t phy.unknown=" + insilico_phy_unknown + ", \n")
f.write("\t phy.external=" + insilico_phy_external + ", \n")
f.write("\t phy.debias=" + insilico_phy_debias + ", \n")
f.write("\t exclude.impossible.cause=" + insilico_exclude_impossible_cause + ", \n")
f.write("\t indiv.CI=" + insilico_indiv_CI + ") \n")
f.write("sex <- ifelse(tolower(data$male)=='y', 'Male', 'Female') \n")
# InterVA
if pipelineAlgorithm == "InterVA":
f.write("data <- map_records_interva4(records) \n")
## assign ID from survey (odkID) if specified, otherwise use uuid from ODK Aggregate
if odkID == None:
f.write("data$ID <- records$meta.instanceID \n")
else:
f.write("data$ID <- records$" + odkID + "\n")
f.write("results <- InterVA(Input=data, \n")
f.write("\t HIV= '" + interVA_HIV + "', \n")
f.write("\t Malaria = '" + interVA_Malaria + "', \n")
f.write("\t output='" + interVA_output + "', \n")
f.write("\t groupcode=" + interVA_groupcode + ", \n")
f.write("\t replicate=" + interVA_replicate + ", \n")
f.write("\t replicate.bug1=" + interVA_replicate_bug1 + ", \n")
f.write("\t replicate.bug2=" + interVA_replicate_bug2 + ", \n")
f.write("\t write=FALSE) \n")
f.write("sex <- ifelse(tolower(data$MALE)=='y', 'Male', 'Female') \n")
# write results
f.write("cod <- getTopCOD(results) \n")
f.write("hasCOD <- as.character(data$ID) %in% as.character(levels(cod$ID)) \n")
f.write("dob <- as.Date(as.character(records$consented.deceased_CRVS.info_on_deceased.Id10021), '%b %d, %Y') \n") ## HERE -- not sure if date format will vary!
f.write("dod <- as.Date(as.character(records$consented.deceased_CRVS.info_on_deceased.Id10023), '%b %d, %Y') \n") ## HERE -- not sure if date format will vary!
f.write("age <- floor(records$consented.deceased_CRVS.info_on_deceased.ageInDays/365.25) \n")
f.write("## create matrices for DHIS2 blob (data2) and transfer database (data3) \n")
f.write("## first column must be ID \n")
f.write("metadataCode <- '" + algorithmMetadataCode + "'\n")
f.write("cod2 <- rep('MISSING', nrow(data)); cod2[hasCOD] <- as.character(cod[,2]) \n")
f.write("data2 <- cbind(data[,-1], cod2, metadataCode) \n")
f.write("names(data2) <- c(names(data[,-1]), 'Cause of Death', 'Metadata') \n")
f.write("evaBlob <- cbind(rep(as.character(data[,1]), each=ncol(data2)), rep(names(data2)), c(apply(data2, 1, c))) \n")
f.write("colnames(evaBlob) <- c('ID', 'Attribute', 'Value') \n")
f.write("write.csv(evaBlob, file='" + openVAFilesDir + "/entityAttributeValue.csv', row.names=FALSE, na='') \n\n")
f.write("data3 <- cbind(as.character(data[,1]), sex, dob, dod, age, cod2, metadataCode, data[,-1]) \n")
f.write("names(data3) <- c('id', 'sex', 'dob', 'dod', 'age', 'cod', 'metadataCode', names(data[,-1])) \n")
f.write("write.csv(data3, file='" + openVAFilesDir + "/recordStorage.csv', row.names=FALSE, na='') \n")
except OSError as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not create R Script File","Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Could not create R Script File"]
cleanup(errorMsg)
# run R script
rBatch = "R CMD BATCH --vanilla " + rScriptIn + " " + rScriptOut
rprocess = subprocess.Popen(rBatch, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = rprocess.communicate()
rrc = rprocess.returncode
if rrc != 0:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not run R Script", str(stderr), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: Could not run R Script", str(stderr)]
cleanup(errorMsg)
else:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("OpenVA Analysis Completed Successfully", "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "OpenVA Analysis Completed Successfully (error committing message to database)."]
cleanup(errorMsg)
# push results to DHIS2
try:
api = Dhis(dhisURL, dhisUser, dhisPass)
except (requests.RequestException) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to connect to DHIS2", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Unable to connect to DHIS2"]
cleanup(errorMsg)
# verify VA program and orgUnit
try:
vaPrograms = api.get("programs", params={"filter": "name:like:Verbal Autopsy"}).get("programs")
orgUnitValid = len(api.get("organisationUnits", params={"filter": "id:eq:{}".format(dhisOrgUnit)})["organisationUnits"])==1
if not orgUnitValid:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Organisation Unit UID could not be found.", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: Organisation Unit UID could not be found.", "Error committing message to database"]
cleanup(errorMsg)
if not vaPrograms:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("'Verbal Autopsy' program not found", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: 'Verbal Autopsy' program not found.", "Error committing message to database"]
cleanup(errorMsg)
elif len(vaPrograms) > 1:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("More than one 'Verbal Autopsy' found.", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: More than one 'Verbal Autopsy' found.", "Error committing message to database"]
cleanup(errorMsg)
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Error using Dhis.get, unable to either get UID for VA Program or verify Org Unit ID", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: Error using Dhis.get, unable to either get UID for VA Program or verify Org Unit ID",
"Error committing message to database"]
cleanup(errorMsg)
vaProgramUID = vaPrograms[0]["id"]
blobPath = os.path.join(dhisDir, "blobs")
try:
if not os.path.isdir(blobPath):
os.makedirs(blobPath)
except OSError as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to create folder for DHIS blobs.", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Unable to create folder for DHIS blobs."]
cleanup(errorMsg)
events = []
export = {}
## read in VA data (with COD and algorithm metadata) from csv's (and create groups by ID for Entity-Attribute-Value file)
try:
## WARNING: The following CSV file contains sensitive VA information (leaving it in folder)!
dfDHIS2 = pd.read_csv(openVAFilesDir + "/entityAttributeValue.csv")
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to access file: " + openVAFilesDir + "entityAttributeVAlue.csv", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Unable to access file: " + openVAFilesDir + "entityAttributeVAlue.csv",
"Error committing message to database"]
cleanup(errorMsg)
grouped = dfDHIS2.groupby(["ID"])
## prepare events for DHIS2 export
try:
with open(openVAFilesDir + "/recordStorage.csv", "r", newline="") as csvIn:
with open(openVAFilesDir + "/newStorage.csv", "w", newline="") as csvOut:
reader = csv.reader(csvIn)
writer = csv.writer(csvOut, lineterminator="\n")
header = next(reader)
header.extend(["dhisVerbalAutopsyID", "pipelineOutcome"])
writer.writerow(header)
for row in reader:
if row[5]!="MISSING":
vaID = str(row[0])
blobFile = "{}.db".format(os.path.join(dhisDir, "blobs", vaID))
blobRecord = grouped.get_group(str(row[0]))
blobEVA = blobRecord.values.tolist()
## create DHIS2 blob
try:
create_db(blobFile, blobEVA)
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to create DHIS2 BLOB", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Unable to create DHIS2 BLOB", "Error committing message to database"]
cleanup(errorMsg)
## post DHIS2 blob
try:
fileID = api.post_blob(blobFile)
except requests.RequestException as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to post BLOB to DHIS2", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Unable to post BLOB to DHIS2"]
cleanup(errorMsg)
sex = row[1].lower()
dob = row[2]
if row[3] =="":
eventDate = datetime.date(9999,9,9)
else:
dod = datetime.datetime.strptime(row[3], "%Y-%m-%d")
eventDate = datetime.date(dod.year, dod.month, dod.day)
age = row[4]
if row[5] == "Undetermined":
codCode = "99"
else:
codCode = getCODCode(dhisCODCodes, row[5])
e = VerbalAutopsyEvent(vaID, vaProgramUID, dhisOrgUnit,
eventDate, sex, dob, age, codCode, algorithmMetadataCode, fileID)
events.append(e.format_to_dhis2(dhisUser))
row.extend([vaID, "Pushing to DHIS2"])
writer.writerow(row)
else:
row.extend(["", "No CoD Assigned"])
writer.writerow(row)
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to access one of record/newStorage.csv files in folder: " + openVAFilesDir, "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Unable to access one of record/newStorage.csv files in folder: " + openVAFilesDir,
"Error committing message to database"]
cleanup(errorMsg)
export["events"] = events
try:
log = api.post("events", data=export)
except requests.RequestException as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to post events to DHIS2 VA Program.", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Unable to post events to DHIS2 VA Program."]
cleanup(errorMsg)
if 'importSummaries' not in log['response'].keys():
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Failed to retrieve summary from post to DHIS2 VA Program.", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error", "Failed to retrieve summary from post to DHIS2 VA Program."]
cleanup(errorMsg)
if log["httpStatusCode"] == 200:
nPosted = len(log['response']['importSummaries'])
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Successfully posted {} events to DHIS2 VA Program.".format(nPosted), "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Successfully posted {} events to DHIS2 VA Program, but error writing to DB".format(nPosted)]
cleanup(errorMsg)
vaReferences = list(findKeyValue("reference", d=log["response"]))
dfNewStorage = pd.read_csv(openVAFilesDir + "/newStorage.csv")
try:
for vaReference in vaReferences:
postedDataValues = api.get("events/{}".format(vaReference)).get("dataValues")
postedVAIDIndex = next((index for (index, d) in enumerate(postedDataValues) if d["dataElement"]=="htm6PixLJNy"), None)
postedVAID = postedDataValues[postedVAIDIndex]["value"]
rowVAID = dfNewStorage["dhisVerbalAutopsyID"] == postedVAID
dfNewStorage.loc[rowVAID,"pipelineOutcome"] = "Pushed to DHIS2"
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Error trying to verify events posted to DHIS2", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error trying to verify events posted to DHIS2", ""]
cleanup(errorMsg)
# store results in database
try:
for row in dfNewStorage.itertuples():
xferDBID = row[1]
xferDBOutcome = row[254]
vaData = row[1],row[8:253]
vaDataFlat = tuple([y for x in vaData for y in (x if isinstance(x, tuple) else (x,))])
xferDBRecord = pickle.dumps(vaDataFlat)
sqlXferDB = "INSERT INTO VA_Storage (id, outcome, record, dateEntered) VALUES (?,?,?,?)"
par = [xferDBID, xferDBOutcome, sqlite3.Binary(xferDBRecord), timeFMT]
cursor.execute(sqlXferDB, par)
db.commit()
## note: to read back in: (1) cursor.exetute(SQL SELECT STATEMENT) (2) results = pickle.loads(sqlResult[0])
## An alternative version of storing VA records to SQLite DB(not relying on pickle)
# for row in dfNewStorage.itertuples():
# xferDBID = row[1]
# xferDBOutcome = row[254]
# with open("xferDBRecord.txt", "w", newline="") as f:
# vaData = row[1],row[8:253]
# vaDataFlat = tuple([y for x in vaData for y in (x if isinstance(x, tuple) else (x,))])
# writer = csv.writer(f, lineterminator="\n")
# writer.writerow(vaDataFlat)
# with open("xferDBRecord.txt", "rb") as f:
# xferDBRecord = f.read()
# sqlXferDB = "INSERT INTO VA_Storage (id, outcome, record, dateEntered) VALUES (?,?,?,?)"
# par = [xferDBID, xferDBOutcome, sqlite3.Binary(xferDBRecord), timeFMT]
# cursor.execute(sqlXferDB, par)
# db.commit()
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Error storing Blobs to {}.db".format(dbName), "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error storing Blobs to {}.db".format(dbName), ""]
cleanup(errorMsg)
try:
nNewStorage = dfNewStorage.shape[0]
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Stored {} records to {}.db".format(nNewStorage, dbName), "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e),
"Stored {} records to {}.db, but error trying to log message to EventLog".format(nNewStorage, dbName)]
cleanup(errorMsg)
# all done!
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Successful completion of Pipeline", "Information", str(datetime.datetime.now()))
cursor.execute(sql, par)
db.commit()
cursor.close()
db.close()
sys.exit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Finished executing Pipeline steps, but error trying to log last message."]
cleanup(errorMsg)
| gpl-3.0 |
gfyoung/pandas | pandas/tests/frame/indexing/test_getitem.py | 2 | 5364 | import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalDtype,
CategoricalIndex,
DataFrame,
MultiIndex,
Series,
Timestamp,
get_dummies,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
class TestGetitem:
def test_getitem_unused_level_raises(self):
# GH#20410
mi = MultiIndex(
levels=[["a_lot", "onlyone", "notevenone"], [1970, ""]],
codes=[[1, 0], [1, 0]],
)
df = DataFrame(-1, index=range(3), columns=mi)
with pytest.raises(KeyError, match="notevenone"):
df["notevenone"]
def test_getitem_periodindex(self):
rng = period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
# GH#1211; smoketest unrelated to the rest of this test
repr(df)
ts = df["1/1/2000"]
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_getitem_list_of_labels_categoricalindex_cols(self):
# GH#16115
cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
expected = DataFrame(
[[1, 0], [0, 1]], dtype="uint8", index=[0, 1], columns=cats
)
dummies = get_dummies(cats)
result = dummies[list(dummies.columns)]
tm.assert_frame_equal(result, expected)
def test_getitem_sparse_column_return_type_and_dtype(self):
# https://github.com/pandas-dev/pandas/issues/23559
data = SparseArray([0, 1])
df = DataFrame({"A": data})
expected = Series(data, name="A")
result = df["A"]
tm.assert_series_equal(result, expected)
# Also check iloc and loc while we're here
result = df.iloc[:, 0]
tm.assert_series_equal(result, expected)
result = df.loc[:, "A"]
tm.assert_series_equal(result, expected)
class TestGetitemListLike:
def test_getitem_list_missing_key(self):
# GH#13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({"x": [1.0], "y": [2.0], "z": [3.0]})
df.columns = ["x", "x", "z"]
# Check that we get the correct value in the KeyError
with pytest.raises(KeyError, match=r"\['y'\] not in index"):
df[["x", "y", "z"]]
class TestGetitemCallable:
def test_getitem_callable(self, float_frame):
# GH#12533
result = float_frame[lambda x: "A"]
expected = float_frame.loc[:, "A"]
tm.assert_series_equal(result, expected)
result = float_frame[lambda x: ["A", "B"]]
expected = float_frame.loc[:, ["A", "B"]]
tm.assert_frame_equal(result, float_frame.loc[:, ["A", "B"]])
df = float_frame[:3]
result = df[lambda x: [True, False, True]]
expected = float_frame.iloc[[0, 2], :]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_columns_one_level(self):
# GH#29749
df = DataFrame([[1, 2]], columns=[["a", "b"]])
expected = DataFrame([1], columns=[["a"]])
result = df["a"]
tm.assert_frame_equal(result, expected)
result = df.loc[:, "a"]
tm.assert_frame_equal(result, expected)
class TestGetitemBooleanMask:
def test_getitem_bool_mask_categorical_index(self):
df3 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=True),
name="B",
),
)
df4 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=False),
name="B",
),
)
result = df3[df3.index == "a"]
expected = df3.iloc[[]]
tm.assert_frame_equal(result, expected)
result = df4[df4.index == "a"]
expected = df4.iloc[[]]
tm.assert_frame_equal(result, expected)
result = df3[df3.index == 1]
expected = df3.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df4[df4.index == 1]
expected = df4.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
# since we have an ordered categorical
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=True,
# name='B')
result = df3[df3.index < 2]
expected = df3.iloc[[4]]
tm.assert_frame_equal(result, expected)
result = df3[df3.index > 1]
expected = df3.iloc[[]]
tm.assert_frame_equal(result, expected)
# unordered
# cannot be compared
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=False,
# name='B')
msg = "Unordered Categoricals can only compare equality or not"
with pytest.raises(TypeError, match=msg):
df4[df4.index < 2]
with pytest.raises(TypeError, match=msg):
df4[df4.index > 1]
| bsd-3-clause |
hmendozap/master-arbeit-projects | autosk_dev_test/component/LinReg.py | 1 | 8756 | import numpy as np
import scipy.sparse as sp
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.conditions import EqualsCondition, InCondition
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, Constant
from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm
from autosklearn.pipeline.constants import *
class LinReg(AutoSklearnRegressionAlgorithm):
def __init__(self, number_updates, batch_size, dropout_output,
learning_rate, solver, lambda2,
momentum=0.99, beta1=0.9, beta2=0.9, rho=0.95,
lr_policy='fixed', gamma=0.01, power=1.0, epoch_step=2,
random_state=None):
self.number_updates = number_updates
self.batch_size = batch_size
self.dropout_output = dropout_output
self.learning_rate = learning_rate
self.lr_policy = lr_policy
self.lambda2 = lambda2
self.momentum = momentum
self.beta1 = 1-beta1 if beta1 is not None else 0.9
self.beta2 = 1-beta2 if beta2 is not None else 0.99
self.rho = rho
self.solver = solver
self.gamma = gamma
self.power = power
self.epoch_step = epoch_step
# Empty features and shape
self.n_features = None
self.input_shape = None
self.m_issparse = False
self.m_isregression = True
self.m_isbinary = False
self.m_ismultilabel = False
self.estimator = None
def _prefit(self, X, y):
self.batch_size = int(self.batch_size)
self.n_features = X.shape[1]
self.input_shape = (self.batch_size, self.n_features)
self.num_output_units = 1 # Regression
# Normalize the output
self.mean_y = np.mean(y)
self.std_y = np.std(y)
y = (y - self.mean_y) / self.std_y
if len(y.shape) == 1:
y = y[:, np.newaxis]
self.m_issparse = sp.issparse(X)
return X, y
def fit(self, X, y):
Xf, yf = self._prefit(X, y)
epoch = (self.number_updates * self.batch_size)//X.shape[0]
number_epochs = min(max(2, epoch), 110) # Cap the max number of possible epochs
from implementation import LogisticRegression
self.estimator = LogisticRegression.LogisticRegression(batch_size=self.batch_size,
input_shape=self.input_shape,
num_output_units=self.num_output_units,
dropout_output=self.dropout_output,
learning_rate=self.learning_rate,
lr_policy=self.lr_policy,
lambda2=self.lambda2,
momentum=self.momentum,
beta1=self.beta1,
beta2=self.beta2,
rho=self.rho,
solver=self.solver,
num_epochs=number_epochs,
gamma=self.gamma,
power=self.power,
epoch_step=self.epoch_step,
is_sparse=self.m_issparse,
is_binary=self.m_isbinary,
is_multilabel=self.m_ismultilabel,
is_regression=self.m_isregression)
self.estimator.fit(Xf, yf)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
preds = self.estimator.predict(X, self.m_issparse)
return preds * self.std_y + self.mean_y
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X, self.m_issparse)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'lin_reg',
'name': 'Linear Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
policy_choices = ['fixed', 'inv', 'exp', 'step']
batch_size = UniformIntegerHyperparameter("batch_size",
100, 3000,
log=True,
default=150)
number_updates = UniformIntegerHyperparameter("number_updates",
500, 10500,
log=True,
default=500)
dropout_output = UniformFloatHyperparameter("dropout_output", 0.0, 0.99,
default=0.5)
lr = UniformFloatHyperparameter("learning_rate", 1e-6, 0.1,
log=True,
default=0.01)
l2 = UniformFloatHyperparameter("lambda2", 1e-6, 1e-2,
log=True,
default=1e-3)
solver = CategoricalHyperparameter(name="solver",
choices=["sgd", "adam"],
default="sgd")
beta1 = UniformFloatHyperparameter("beta1", 1e-4, 0.1,
log=True,
default=0.1)
beta2 = UniformFloatHyperparameter("beta2", 1e-4, 0.1,
log=True,
default=0.01)
lr_policy = CategoricalHyperparameter(name="lr_policy",
choices=policy_choices,
default='fixed')
gamma = UniformFloatHyperparameter(name="gamma",
lower=1e-3, upper=1e-1,
default=1e-2)
power = UniformFloatHyperparameter("power",
0.0, 1.0,
default=0.5)
epoch_step = UniformIntegerHyperparameter("epoch_step",
2, 20,
default=5)
cs = ConfigurationSpace()
cs.add_hyperparameter(number_updates)
cs.add_hyperparameter(batch_size)
cs.add_hyperparameter(dropout_output)
cs.add_hyperparameter(lr)
cs.add_hyperparameter(l2)
cs.add_hyperparameter(solver)
cs.add_hyperparameter(beta1)
cs.add_hyperparameter(beta2)
cs.add_hyperparameter(lr_policy)
cs.add_hyperparameter(gamma)
cs.add_hyperparameter(power)
cs.add_hyperparameter(epoch_step)
beta1_depends_on_solver = EqualsCondition(beta1, solver, "adam")
beta2_depends_on_solver = EqualsCondition(beta2, solver, "adam")
gamma_depends_on_policy = InCondition(child=gamma, parent=lr_policy,
values=['inv', 'exp', 'step'])
power_depends_on_policy = EqualsCondition(power, lr_policy, 'inv')
epoch_step_depends_on_policy = EqualsCondition(epoch_step,
lr_policy, 'step')
cs.add_condition(beta1_depends_on_solver)
cs.add_condition(beta2_depends_on_solver)
cs.add_condition(gamma_depends_on_policy)
cs.add_condition(power_depends_on_policy)
cs.add_condition(epoch_step_depends_on_policy)
return cs
| mit |
PrashntS/scikit-learn | sklearn/linear_model/ridge.py | 60 | 44642 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
Notes
-----
This function won't compute the intercept.
"""
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = get_max_squared_sum(X)
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
dict())
coef[i] = coef_
n_iter[i] = n_iter_
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used, else, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
vipulroxx/sympy | sympy/physics/quantum/circuitplot.py | 58 | 12941 | """Matplotlib based plotting of quantum circuits.
Todo:
* Optimize printing of large circuits.
* Get this to work with single gates.
* Do a better job checking the form of circuits to make sure it is a Mul of
Gates.
* Get multi-target gates plotting.
* Get initial and final states to plot.
* Get measurements to plot. Might need to rethink measurement as a gate
issue.
* Get scale and figsize to be handled in a better way.
* Write some tests/examples!
"""
from __future__ import print_function, division
from sympy import Mul
from sympy.core.compatibility import u, range
from sympy.external import import_module
from sympy.physics.quantum.gate import Gate, OneQubitGate, CGate, CGateS
from sympy.core.core import BasicMeta
from sympy.core.assumptions import ManagedProperties
__all__ = [
'CircuitPlot',
'circuit_plot',
'labeller',
'Mz',
'Mx',
'CreateOneQubitGate',
'CreateCGate',
]
np = import_module('numpy')
matplotlib = import_module(
'matplotlib', __import__kwargs={'fromlist': ['pyplot']},
catch=(RuntimeError,)) # This is raised in environments that have no display.
if not np or not matplotlib:
class CircuitPlot(object):
def __init__(*args, **kwargs):
raise ImportError('numpy or matplotlib not available.')
def circuit_plot(*args, **kwargs):
raise ImportError('numpy or matplotlib not available.')
else:
pyplot = matplotlib.pyplot
Line2D = matplotlib.lines.Line2D
Circle = matplotlib.patches.Circle
#from matplotlib import rc
#rc('text',usetex=True)
class CircuitPlot(object):
"""A class for managing a circuit plot."""
scale = 1.0
fontsize = 20.0
linewidth = 1.0
control_radius = 0.05
not_radius = 0.15
swap_delta = 0.05
labels = []
inits = {}
label_buffer = 0.5
def __init__(self, c, nqubits, **kwargs):
self.circuit = c
self.ngates = len(self.circuit.args)
self.nqubits = nqubits
self.update(kwargs)
self._create_grid()
self._create_figure()
self._plot_wires()
self._plot_gates()
self._finish()
def update(self, kwargs):
"""Load the kwargs into the instance dict."""
self.__dict__.update(kwargs)
def _create_grid(self):
"""Create the grid of wires."""
scale = self.scale
wire_grid = np.arange(0.0, self.nqubits*scale, scale, dtype=float)
gate_grid = np.arange(0.0, self.ngates*scale, scale, dtype=float)
self._wire_grid = wire_grid
self._gate_grid = gate_grid
def _create_figure(self):
"""Create the main matplotlib figure."""
self._figure = pyplot.figure(
figsize=(self.ngates*self.scale, self.nqubits*self.scale),
facecolor='w',
edgecolor='w'
)
ax = self._figure.add_subplot(
1, 1, 1,
frameon=True
)
ax.set_axis_off()
offset = 0.5*self.scale
ax.set_xlim(self._gate_grid[0] - offset, self._gate_grid[-1] + offset)
ax.set_ylim(self._wire_grid[0] - offset, self._wire_grid[-1] + offset)
ax.set_aspect('equal')
self._axes = ax
def _plot_wires(self):
"""Plot the wires of the circuit diagram."""
xstart = self._gate_grid[0]
xstop = self._gate_grid[-1]
xdata = (xstart - self.scale, xstop + self.scale)
for i in range(self.nqubits):
ydata = (self._wire_grid[i], self._wire_grid[i])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
if self.labels:
init_label_buffer = 0
if self.inits.get(self.labels[i]): init_label_buffer = 0.25
self._axes.text(
xdata[0]-self.label_buffer-init_label_buffer,ydata[0],
render_label(self.labels[i],self.inits),
size=self.fontsize,
color='k',ha='center',va='center')
self._plot_measured_wires()
def _plot_measured_wires(self):
ismeasured = self._measurements()
xstop = self._gate_grid[-1]
dy = 0.04 # amount to shift wires when doubled
# Plot doubled wires after they are measured
for im in ismeasured:
xdata = (self._gate_grid[ismeasured[im]],xstop+self.scale)
ydata = (self._wire_grid[im]+dy,self._wire_grid[im]+dy)
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
# Also double any controlled lines off these wires
for i,g in enumerate(self._gates()):
if isinstance(g, CGate) or isinstance(g, CGateS):
wires = g.controls + g.targets
for wire in wires:
if wire in ismeasured and \
self._gate_grid[i] > self._gate_grid[ismeasured[wire]]:
ydata = min(wires), max(wires)
xdata = self._gate_grid[i]-dy, self._gate_grid[i]-dy
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def _gates(self):
"""Create a list of all gates in the circuit plot."""
gates = []
if isinstance(self.circuit, Mul):
for g in reversed(self.circuit.args):
if isinstance(g, Gate):
gates.append(g)
elif isinstance(self.circuit, Gate):
gates.append(self.circuit)
return gates
def _plot_gates(self):
"""Iterate through the gates and plot each of them."""
for i, gate in enumerate(self._gates()):
gate.plot_gate(self, i)
def _measurements(self):
"""Return a dict {i:j} where i is the index of the wire that has
been measured, and j is the gate where the wire is measured.
"""
ismeasured = {}
for i,g in enumerate(self._gates()):
if getattr(g,'measurement',False):
for target in g.targets:
if target in ismeasured:
if ismeasured[target] > i:
ismeasured[target] = i
else:
ismeasured[target] = i
return ismeasured
def _finish(self):
# Disable clipping to make panning work well for large circuits.
for o in self._figure.findobj():
o.set_clip_on(False)
def one_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a single qubit gate."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
self._axes.text(
x, y, t,
color='k',
ha='center',
va='center',
bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth),
size=self.fontsize
)
def two_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a two qubit gate. Doesn't work yet.
"""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]+0.5
print(self._gate_grid)
print(self._wire_grid)
obj = self._axes.text(
x, y, t,
color='k',
ha='center',
va='center',
bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth),
size=self.fontsize
)
def control_line(self, gate_idx, min_wire, max_wire):
"""Draw a vertical control line."""
xdata = (self._gate_grid[gate_idx], self._gate_grid[gate_idx])
ydata = (self._wire_grid[min_wire], self._wire_grid[max_wire])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def control_point(self, gate_idx, wire_idx):
"""Draw a control point."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.control_radius
c = Circle(
(x, y),
radius*self.scale,
ec='k',
fc='k',
fill=True,
lw=self.linewidth
)
self._axes.add_patch(c)
def not_point(self, gate_idx, wire_idx):
"""Draw a NOT gates as the circle with plus in the middle."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.not_radius
c = Circle(
(x, y),
radius,
ec='k',
fc='w',
fill=False,
lw=self.linewidth
)
self._axes.add_patch(c)
l = Line2D(
(x, x), (y - radius, y + radius),
color='k',
lw=self.linewidth
)
self._axes.add_line(l)
def swap_point(self, gate_idx, wire_idx):
"""Draw a swap point as a cross."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
d = self.swap_delta
l1 = Line2D(
(x - d, x + d),
(y - d, y + d),
color='k',
lw=self.linewidth
)
l2 = Line2D(
(x - d, x + d),
(y + d, y - d),
color='k',
lw=self.linewidth
)
self._axes.add_line(l1)
self._axes.add_line(l2)
def circuit_plot(c, nqubits, **kwargs):
"""Draw the circuit diagram for the circuit with nqubits.
Parameters
==========
c : circuit
The circuit to plot. Should be a product of Gate instances.
nqubits : int
The number of qubits to include in the circuit. Must be at least
as big as the largest `min_qubits`` of the gates.
"""
return CircuitPlot(c, nqubits, **kwargs)
def render_label(label, inits={}):
"""Slightly more flexible way to render labels.
>>> from sympy.physics.quantum.circuitplot import render_label
>>> render_label('q0')
'$|q0\\\\rangle$'
>>> render_label('q0', {'q0':'0'})
'$|q0\\\\rangle=|0\\\\rangle$'
"""
init = inits.get(label)
if init:
return r'$|%s\rangle=|%s\rangle$' % (label, init)
return r'$|%s\rangle$' % label
def labeller(n, symbol='q'):
"""Autogenerate labels for wires of quantum circuits.
Parameters
==========
n : int
number of qubits in the circuit
symbol : string
A character string to precede all gate labels. E.g. 'q_0', 'q_1', etc.
>>> from sympy.physics.quantum.circuitplot import labeller
>>> labeller(2)
['q_1', 'q_0']
>>> labeller(3,'j')
['j_2', 'j_1', 'j_0']
"""
return ['%s_%d' % (symbol,n-i-1) for i in range(n)]
class Mz(OneQubitGate):
"""Mock-up of a z measurement gate.
This is in circuitplot rather than gate.py because it's not a real
gate, it just draws one.
"""
measurement = True
gate_name='Mz'
gate_name_latex=u('M_z')
class Mx(OneQubitGate):
"""Mock-up of an x measurement gate.
This is in circuitplot rather than gate.py because it's not a real
gate, it just draws one.
"""
measurement = True
gate_name='Mx'
gate_name_latex=u('M_x')
class CreateOneQubitGate(ManagedProperties):
def __new__(mcl, name, latexname=None):
if not latexname:
latexname = name
return BasicMeta.__new__(mcl, name + "Gate", (OneQubitGate,),
{'gate_name': name, 'gate_name_latex': latexname})
def CreateCGate(name, latexname=None):
"""Use a lexical closure to make a controlled gate.
"""
if not latexname:
latexname = name
onequbitgate = CreateOneQubitGate(name, latexname)
def ControlledGate(ctrls,target):
return CGate(tuple(ctrls),onequbitgate(target))
return ControlledGate
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/tests/test_cross_validation.py | 79 | 47914 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
srowen/spark | python/run-tests.py | 15 | 13614 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from argparse import ArgumentParser
import os
import re
import shutil
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
import queue as Queue
from multiprocessing import Manager
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = None
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# TODO: revisit for Scala 2.13
for scala in ["2.12"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise RuntimeError("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(target_dir, test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python),
# Preserve legacy nested timezone behavior for pyarrow>=2, remove after SPARK-32285
'PYARROW_IGNORE_TIMEZONE': '1',
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
metastore_dir = os.path.join(tmp_dir, str(uuid.uuid4()))
while os.path.isdir(metastore_dir):
metastore_dir = os.path.join(metastore_dir, str(uuid.uuid4()))
os.mkdir(metastore_dir)
# Also override the JVM's temp directory by setting driver and executor options.
java_options = "-Djava.io.tmpdir={0} -Dio.netty.tryReflectionSetAccessible=true".format(tmp_dir)
spark_args = [
"--conf", "spark.driver.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.executor.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.sql.warehouse.dir='{0}'".format(metastore_dir),
"pyspark-shell"
]
env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark")] + test_name.split(),
stderr=per_test_output, stdout=per_test_output, env=env).wait()
shutil.rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode("utf-8", "replace")
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode("utf-8", "replace"), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(pyspark\..*\) ... (skip|SKIP)', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (pyspark_python, test_name)
assert SKIPPED_TESTS is not None
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", pyspark_python, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python3.6", "pypy3"] if which(x)]
if "python3.6" not in python_execs:
p = which("python3")
if not p:
LOGGER.error("No python3 executable found. Exiting!")
os._exit(1)
else:
python_execs.insert(0, p)
return python_execs
def parse_opts():
parser = ArgumentParser(
prog="run-tests"
)
parser.add_argument(
"--python-executables", type=str, default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %(default)s)"
)
parser.add_argument(
"--modules", type=str,
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %(default)s)"
)
parser.add_argument(
"-p", "--parallelism", type=int, default=4,
help="The number of suites to test in parallel (default %(default)d)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
group = parser.add_argument_group("Developer Options")
group.add_argument(
"--testnames", type=str,
default=None,
help=(
"A comma-separated list of specific modules, classes and functions of doctest "
"or unittest to test. "
"For example, 'pyspark.sql.foo' to run the module as unittests or doctests, "
"'pyspark.sql.tests FooTests' to run the specific class of unittests, "
"'pyspark.sql.tests FooTests.test_foo' to run the specific unittest in the class. "
"'--modules' option is ignored if they are given.")
)
args, unknown = parser.parse_known_args()
if unknown:
parser.error("Unsupported arguments: %s" % ' '.join(unknown))
if args.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return args
def _check_coverage(python_exec):
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
def main():
opts = parse_opts()
if opts.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
should_test_modules = opts.testnames is None
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
LOGGER.info("Will test against the following Python executables: %s", python_execs)
if should_test_modules:
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
else:
testnames_to_test = opts.testnames.split(',')
LOGGER.info("Will test the following Python tests: %s", testnames_to_test)
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has coverage installed when 'COVERAGE_PROCESS_START'
# environmental variable is set.
if "COVERAGE_PROCESS_START" in os.environ:
_check_coverage(python_exec)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.info("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.info("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
if should_test_modules:
for module in modules_to_test:
if python_implementation not in module.excluded_python_implementations:
for test_goal in module.python_test_goals:
heavy_tests = ['pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests', 'pyspark.ml.tests',
'pyspark.pandas.tests']
if any(map(lambda prefix: test_goal.startswith(prefix), heavy_tests)):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
else:
for test_goal in testnames_to_test:
task_queue.put((0, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
pyspark_python, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, pyspark_python))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
SKIPPED_TESTS = Manager().dict()
main()
| apache-2.0 |
jinglining/flink | flink-python/setup.py | 5 | 12946 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import glob
import io
import os
import platform
import subprocess
import sys
from distutils.command.build_ext import build_ext
from shutil import copytree, copy, rmtree
from setuptools import setup, Extension
if sys.version_info < (3, 5):
print("Python versions prior to 3.5 are not supported for PyFlink.",
file=sys.stderr)
sys.exit(-1)
def remove_if_exists(file_path):
if os.path.exists(file_path):
if os.path.islink(file_path) or os.path.isfile(file_path):
os.remove(file_path)
else:
assert os.path.isdir(file_path)
rmtree(file_path)
def find_file_path(pattern):
files = glob.glob(pattern)
if len(files) < 1:
print("Failed to find the file %s." % pattern)
exit(-1)
if len(files) > 1:
print("The file pattern %s is ambiguous: %s" % (pattern, files))
exit(-1)
return files[0]
# Currently Cython optimizing doesn't support Windows.
if platform.system() == 'Windows':
extensions = ([])
else:
try:
from Cython.Build import cythonize
extensions = cythonize([
Extension(
name="pyflink.fn_execution.fast_coder_impl",
sources=["pyflink/fn_execution/fast_coder_impl.pyx"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.fast_operations",
sources=["pyflink/fn_execution/fast_operations.pyx"],
include_dirs=["pyflink/fn_execution/"])
])
except ImportError:
if os.path.exists("pyflink/fn_execution/fast_coder_impl.c"):
extensions = ([
Extension(
name="pyflink.fn_execution.fast_coder_impl",
sources=["pyflink/fn_execution/fast_coder_impl.c"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.fast_operations",
sources=["pyflink/fn_execution/fast_operations.c"],
include_dirs=["pyflink/fn_execution/"])
])
else:
extensions = ([])
this_directory = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(this_directory, 'pyflink/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load PyFlink version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
with io.open(os.path.join(this_directory, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
TEMP_PATH = "deps"
LIB_TEMP_PATH = os.path.join(TEMP_PATH, "lib")
OPT_TEMP_PATH = os.path.join(TEMP_PATH, "opt")
CONF_TEMP_PATH = os.path.join(TEMP_PATH, "conf")
LOG_TEMP_PATH = os.path.join(TEMP_PATH, "log")
EXAMPLES_TEMP_PATH = os.path.join(TEMP_PATH, "examples")
LICENSES_TEMP_PATH = os.path.join(TEMP_PATH, "licenses")
PLUGINS_TEMP_PATH = os.path.join(TEMP_PATH, "plugins")
SCRIPTS_TEMP_PATH = os.path.join(TEMP_PATH, "bin")
LICENSE_FILE_TEMP_PATH = os.path.join(this_directory, "LICENSE")
NOTICE_FILE_TEMP_PATH = os.path.join(this_directory, "NOTICE")
README_FILE_TEMP_PATH = os.path.join("pyflink", "README.txt")
PYFLINK_UDF_RUNNER_SH = "pyflink-udf-runner.sh"
PYFLINK_UDF_RUNNER_BAT = "pyflink-udf-runner.bat"
in_flink_source = os.path.isfile("../flink-java/src/main/java/org/apache/flink/api/java/"
"ExecutionEnvironment.java")
# Due to changes in FLINK-14008, the licenses directory and NOTICE file may not exist in
# build-target folder. Just ignore them in this case.
exist_licenses = None
try:
if in_flink_source:
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
flink_version = VERSION.replace(".dev0", "-SNAPSHOT")
FLINK_HOME = os.path.abspath(
"../flink-dist/target/flink-%s-bin/flink-%s" % (flink_version, flink_version))
incorrect_invocation_message = """
If you are installing pyflink from flink source, you must first build Flink and
run sdist.
To build Flink with maven you can run:
mvn -DskipTests clean package
Building the source dist is done in the flink-python directory:
cd flink-python
python setup.py sdist
pip install dist/*.tar.gz"""
LIB_PATH = os.path.join(FLINK_HOME, "lib")
OPT_PATH = os.path.join(FLINK_HOME, "opt")
OPT_PYTHON_JAR_NAME = os.path.basename(
find_file_path(os.path.join(OPT_PATH, "flink-python_*.jar")))
OPT_SQL_CLIENT_JAR_NAME = os.path.basename(
find_file_path(os.path.join(OPT_PATH, "flink-sql-client_*.jar")))
CONF_PATH = os.path.join(FLINK_HOME, "conf")
EXAMPLES_PATH = os.path.join(FLINK_HOME, "examples")
LICENSES_PATH = os.path.join(FLINK_HOME, "licenses")
PLUGINS_PATH = os.path.join(FLINK_HOME, "plugins")
SCRIPTS_PATH = os.path.join(FLINK_HOME, "bin")
LICENSE_FILE_PATH = os.path.join(FLINK_HOME, "LICENSE")
README_FILE_PATH = os.path.join(FLINK_HOME, "README.txt")
exist_licenses = os.path.exists(LICENSES_PATH)
if not os.path.isdir(LIB_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
try:
os.symlink(LIB_PATH, LIB_TEMP_PATH)
support_symlinks = True
except BaseException: # pylint: disable=broad-except
support_symlinks = False
os.mkdir(OPT_TEMP_PATH)
if support_symlinks:
os.symlink(os.path.join(OPT_PATH, OPT_PYTHON_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_PYTHON_JAR_NAME))
os.symlink(os.path.join(OPT_PATH, OPT_SQL_CLIENT_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_SQL_CLIENT_JAR_NAME))
os.symlink(CONF_PATH, CONF_TEMP_PATH)
os.symlink(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
os.symlink(PLUGINS_PATH, PLUGINS_TEMP_PATH)
os.symlink(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
os.symlink(README_FILE_PATH, README_FILE_TEMP_PATH)
else:
copytree(LIB_PATH, LIB_TEMP_PATH)
copy(os.path.join(OPT_PATH, OPT_PYTHON_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_PYTHON_JAR_NAME))
copy(os.path.join(OPT_PATH, OPT_SQL_CLIENT_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_SQL_CLIENT_JAR_NAME))
copytree(CONF_PATH, CONF_TEMP_PATH)
copytree(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
copytree(PLUGINS_PATH, PLUGINS_TEMP_PATH)
copy(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
copy(README_FILE_PATH, README_FILE_TEMP_PATH)
os.mkdir(LOG_TEMP_PATH)
with open(os.path.join(LOG_TEMP_PATH, "empty.txt"), 'w') as f:
f.write("This file is used to force setuptools to include the log directory. "
"You can delete it at any time after installation.")
# copy the udf runner scripts
copytree(SCRIPTS_PATH, SCRIPTS_TEMP_PATH)
copy(os.path.join(this_directory, "bin", PYFLINK_UDF_RUNNER_SH),
os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_SH))
copy(os.path.join(this_directory, "bin", PYFLINK_UDF_RUNNER_BAT),
os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_BAT))
if exist_licenses and platform.system() != "Windows":
# regenerate the licenses directory and NOTICE file as we only copy part of the
# flink binary distribution.
collect_licenses_file_sh = os.path.abspath(os.path.join(
this_directory, "..", "tools", "releasing", "collect_license_files.sh"))
subprocess.check_output([collect_licenses_file_sh, TEMP_PATH, TEMP_PATH])
# move the NOTICE file to the root of the package
GENERATED_NOTICE_FILE_PATH = os.path.join(TEMP_PATH, "NOTICE")
os.rename(GENERATED_NOTICE_FILE_PATH, NOTICE_FILE_TEMP_PATH)
else:
if not os.path.isdir(LIB_TEMP_PATH) or not os.path.isdir(OPT_TEMP_PATH) \
or not os.path.isdir(SCRIPTS_TEMP_PATH):
print("The flink core files are not found. Please make sure your installation package "
"is complete, or do this in the flink-python directory of the flink source "
"directory.")
sys.exit(-1)
exist_licenses = os.path.exists(LICENSES_TEMP_PATH)
script_names = ["pyflink-shell.sh", "find-flink-home.sh"]
scripts = [os.path.join(SCRIPTS_TEMP_PATH, script) for script in script_names]
scripts.append("pyflink/find_flink_home.py")
PACKAGES = ['pyflink',
'pyflink.table',
'pyflink.util',
'pyflink.datastream',
'pyflink.dataset',
'pyflink.common',
'pyflink.fn_execution',
'pyflink.metrics',
'pyflink.ml',
'pyflink.ml.api',
'pyflink.ml.api.param',
'pyflink.ml.lib',
'pyflink.ml.lib.param',
'pyflink.lib',
'pyflink.opt',
'pyflink.conf',
'pyflink.log',
'pyflink.examples',
'pyflink.plugins',
'pyflink.bin']
PACKAGE_DIR = {
'pyflink.lib': TEMP_PATH + '/lib',
'pyflink.opt': TEMP_PATH + '/opt',
'pyflink.conf': TEMP_PATH + '/conf',
'pyflink.log': TEMP_PATH + '/log',
'pyflink.examples': TEMP_PATH + '/examples',
'pyflink.plugins': TEMP_PATH + '/plugins',
'pyflink.bin': TEMP_PATH + '/bin'}
PACKAGE_DATA = {
'pyflink': ['README.txt'],
'pyflink.lib': ['*.jar'],
'pyflink.opt': ['*.*', '*/*'],
'pyflink.conf': ['*'],
'pyflink.log': ['*'],
'pyflink.examples': ['*.py', '*/*.py'],
'pyflink.plugins': ['*', '*/*'],
'pyflink.bin': ['*']}
if exist_licenses and platform.system() != "Windows":
PACKAGES.append('pyflink.licenses')
PACKAGE_DIR['pyflink.licenses'] = TEMP_PATH + '/licenses'
PACKAGE_DATA['pyflink.licenses'] = ['*']
setup(
name='apache-flink',
version=VERSION,
packages=PACKAGES,
include_package_data=True,
package_dir=PACKAGE_DIR,
package_data=PACKAGE_DATA,
scripts=scripts,
url='https://flink.apache.org',
license='https://www.apache.org/licenses/LICENSE-2.0',
author='Apache Software Foundation',
author_email='[email protected]',
python_requires='>=3.5',
install_requires=['py4j==0.10.8.1', 'python-dateutil==2.8.0', 'apache-beam==2.19.0',
'cloudpickle==1.2.2', 'avro-python3>=1.8.1,<=1.9.1', 'jsonpickle==1.2',
'pandas>=0.23.4,<=0.25.3', 'pyarrow>=0.15.1,<0.16.0', 'pytz>=2018.3'],
cmdclass={'build_ext': build_ext},
tests_require=['pytest==4.4.1'],
description='Apache Flink Python API',
long_description=long_description,
long_description_content_type='text/markdown',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'],
ext_modules=extensions
)
finally:
if in_flink_source:
remove_if_exists(TEMP_PATH)
remove_if_exists(LICENSE_FILE_TEMP_PATH)
remove_if_exists(NOTICE_FILE_TEMP_PATH)
remove_if_exists(README_FILE_TEMP_PATH)
| apache-2.0 |
rhnvrm/iot-hackerearth | py/sim/wo/work.py | 1 | 5919 | from __future__ import division
import requests
import random
import time
import threading
import rethinkdb as r
import math
import numpy as np
import cv2
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from scipy.misc import imread
import os
plt.scatter([0,5],[0,5])
plt.ion()
plt.show()
plt.clf()
plt.gca().grid(1)
img = imread("im1.png")
# Coordinates of the Beacons
x1=0.5
y1=0
x2=0.5
y2=2.5
x3=2.5
y3=2.5
# End Segment
maximum =10
#init fences
FENCES = []
for i in xrange(0,3):
for j in xrange(0,3):
FENCES+=[[[i,j],[i,j+1],[i+1,j+1],[i+1,j]]]
def kmeans(Z,STO):# pg please make Z a np array like the one described below :P
#Z = np.array([[a1,b1],[x1,y1],[x2,y2],[a3,b3],[a2,b2]])
# convert to np.float32
plt.clf()
plt.imshow(img, zorder=0, extent=[-1,4,-6,3.5])
Z = np.float32(Z)
# define criteria and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret,label,center=cv2.kmeans(Z,1,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
# pls add corresponding entries for each cluster
# Now separate the data, Note the flatten()
A = Z[label.ravel()==0]
B = Z[label.ravel()==1]
# Plot the data
#"""
#rempove for debug
plt.scatter(A[:,0],A[:,1])
plt.scatter(B[:,0],B[:,1],c = 'r')
plt.scatter([x1,x2,x3],[y1,y2,y3],s = 40, c = 'red')
plt.scatter(center[:,0],center[:,1],s = 80,c = 'y', marker = 's')
plt.xlabel('X'),plt.ylabel('Y')
plotfences(plt)
plt.draw()
plt.savefig("plot.png")
pt = [center[:,0],center[:,1]]
postdata(checkfences(pt),pt[0],pt[1])
STO = center
def Intersects(x1,y1,x2,y2,x3,y3,r1,r2,r3):
#Cirlce 1: r1^2 = x^2 + y^2
#Circle 2: r2^2 = (x - a)^2 + (y - b)^2
a = x2 - x1;
b = y2 - y1;
d = math.sqrt(a*a + b*b);
if (r1 + r2 <= d):
sx1,sy1,sx2,sy2 = ((r2*x1+r1*x2)/(r1+r2),((r2*y1+r1*y2)/(r1+r2)),(r2*x1+r1*x2)/(r1+r2),((r2*y1+r1*y2)/(r1+r2)))
elif ((d <= abs( r1 - r2 )) and (r1>r2)):
sx1,sy1,sx2,sy2 = ((r1*x2-r2*x1)/(r1-r2),((r1*y2-r2*y1)/(r1-r2)),(r1*x2-r2*x1)/(r1-r2),((r1*y2-r2*y1)/(r1-r2)))
elif ((d <= abs( r1 - r2 )) and (r2>r1)):
sx1,sy1,sx2,sy2 = ((r2*x1-r1*x2)/(r2-r1),((r2*y1-r1*y2)/(r2-r1)),(r2*x1-r1*x2)/(r2-r1),((r2*y1-r1*y2)/(r2-r1)))
else:
t = math.sqrt( (d + r1 + r2) * (d + r1 - r2) * (d - r1 + r2) * (-d + r1 + r2) )
sx1 = 0.5 * (a + (a*(r1*r1 - r2*r2) + b*t)/(d**2))
sx2 = 0.5 * (a + (a*(r1*r1 - r2*r2) - b*t)/(d**2))
sy1 = 0.5 * (b + (b*(r1*r1 - r2*r2) - a*t)/(d**2))
sy2 = 0.5 * (b + (b*(r1*r1 - r2*r2) + a*t)/(d**2))
sx1 = sx1 + x1
sy1 = sy1 + y1
sx2 = sx2 + x1
sy2 = sy2 + y1
# if take set with min abs error from dist from the 3rd side
if (abs((((sx1-x3)**2 +(sy1-y3)**2)**0.5)-r3)>=abs((((sx2-x3)**2 +(sy2-y3)**2)**0.5)-r3)):
return [[sx2,sy2]]
else:
return [[sx1,sy1]]
"""
#append the following to the storing array that passes to the kmeans clustering
print "x1 = %f" %sx1
print "y1 = %f" %sy1
print "x2 = %f" %sx2
print "y2 = %f" %sy2
#[sx1,sy1,sx2,sy2]
return [[sx1,sy1],[sx2,sy2]]
"""
def display_data(distances):
# hardcoded beacon data Start
#r1,r2,r3 = tuple(distances[i] for i in distances)
r1 = distance_lookup_table["B4:99:4C:66:4B:38"]
r2 = distance_lookup_table["B4:99:4C:66:5A:26"]
r3 = distance_lookup_table["B4:99:4C:66:2C:58"]
if(r1 < 0 or r2 < 0 or r3 < 0): return -1
print(r1,r2,r3)
# Hardcoded beacon data end
# END OF FUNCTION DECLARITIONS
alive = 3
nclusters = 2
PTS = []
#taking groups of 3
for i in range(0,alive-2):
for j in range(i+1,alive-1):
for k in range(j+1,alive):
PTS = PTS + Intersects(x1,y1,x2,y2,x3,y3,r1,r2,r3)
#print i,j
PTS = PTS + Intersects(x1,y1,x3,y3,x2,y2,r1,r3,r2)
#print j,k
PTS = PTS + Intersects(x2,y2,x3,y3,x1,y1,r2,r3,r1)
#print k,i
#"""
STO = []
CentreWeight = 10000000000000
#print STO
#print PTS
kmeans(np.array(PTS),np.array(STO))
def checkfences(pt): # ref to global variable FENCES
for i in range(0,len(FENCES)):
if(infence(FENCES[i],pt)):
return i
return -1
def getDistance(rssi, txPower):
return pow(10, ( txPower - rssi) / (10 * ATTN))
"""
fence1 = [[0,0],[0,1],[1,1],[1,0]] # define points in the order of loop
point1 = [1.05,1]
point2 = [2,2]
infence(fence1,point1)
"""
def infence(fence,pt):
bbPath = matplotlib.path.Path(np.array(fence))
return bbPath.contains_point((pt[0], pt[1]))
def plotfences(media):
for i in range(0,len(FENCES)):
rectangle = media.Rectangle((FENCES[i][0][0],FENCES[i][0][1]), 1, 1, fc='None')
media.gca().add_patch(rectangle)
def postdata(segment,x,y):
r=requests.post("http://0.0.0.0:8521/position", data = {"segment":segment ,"x":x, "y":y})
# main Begins Here
conn = r.connect( "0.0.0.0", 28015 , db='heck')
ATTN = 2
power_to_A_lookup_table = {"B4:99:4C:66:4B:38": -58, "B4:99:4C:66:5A:26": -62, "B4:99:4C:66:2C:58": -62}
distance_lookup_table = {"B4:99:4C:66:4B:38": -1, "B4:99:4C:66:5A:26": -1, "B4:99:4C:66:2C:58": -1}
#old_distance_lookup_table = {"B4:99:4C:57:AE:E3": -1, "B4:99:4C:57:D2:AA": -1, "B4:99:4C:57:EC:C6": -1}
#x = {u'old_val': {u'uid': u'B4:99:4C:57:EC:C6', u'rssi': -61, u'name': u'Bluetooth Device', u'timestamp': 1453011839.46865}, u'new_val': {u'uid': u'B4:99:4C:57:EC:C6', u'rssi': -55, u'name': u'Bluetooth Device', u'timestamp': 1453011857.281005}}
feed = r.table('beacons').changes().run(conn)
for change in feed:
if change['new_val']['uid'] in power_to_A_lookup_table:
#old_distance_lookup_table = distance_lookup_table
distance_lookup_table[change['new_val']['uid']] = getDistance(int(change['new_val']['rssi']), power_to_A_lookup_table[change['new_val']['uid']])
#for i in distance_lookup_table:
#print "here\n"
# print str(i) + " -> " + str(distance_lookup_table[i])
#t=threading.Thread(target=print_distance_lookup_table)
#d=threading.Thread(target=display_data)
display_data(distance_lookup_table)
#d.daemon = True
#t.daemon = True
#t.start()
#d.start()
| mit |
tiagofrepereira2012/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 52 | 69800 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testEstimatorWithCoreFeatureColumns(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(iris.data[:, i], dtype=dtypes.float32),
[-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [fc_core.numeric_column(str(i)) for i in range(4)]
linear_features = [
fc_core.bucketized_column(
cont_features[i],
sorted(set(test_data.get_quantile_based_buckets(
iris.data[:, i], 10)))) for i in range(4)
]
linear_features.append(
fc_core.categorical_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
global_step = classifier.get_variable_value('global_step')
if global_step == 100:
# Expected is 100, but because of the global step increment bug, is 50.
self.assertEqual(50, step_counter.steps)
else:
# Occasionally, training stops when global_step == 101, due to a race
# condition.
self.assertEqual(51, step_counter.steps)
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=110)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=110)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
shanzhenren/ClusType | src/algorithm.py | 1 | 10630 | from collections import defaultdict
from operator import itemgetter
from math import log, sqrt
import random as rn
import time
from numpy import * # install numpy
from scipy import * # install scipy
from numpy.linalg import norm
import numpy.linalg as npl
from scipy.sparse import *
import scipy.sparse.linalg as spsl
from sklearn.preprocessing import normalize ### install from http://scikit-learn.org/stable/
def create_matrix(size_row, size_col):
return csr_matrix((size_row, size_col))
def create_dense_matrix(size_row, size_col):
return mat(zeros((size_row, size_col)))
def set_Y(train_mid, seedMention_tid_score, mid_mention, size_row, size_col):
row = []
col = []
val = []
num_NIL = 0
num_target = 0
NIL_set = set()
for mid in train_mid:
# in training set
mention = mid_mention[mid]
if mention in seedMention_tid_score:
# in ground truth
tid = seedMention_tid_score[mention][0]
score = seedMention_tid_score[mention][1]
if tid == (size_col - 1):
# NIL
num_NIL += 1
# NIL_set.add((mid, tid, score))
NIL_set.add((mid, tid, 1.0))
else:
num_target += 1
row.append(mid)
col.append(tid)
# val.append(score)
val.append(1.0)
if num_target < 1:
print 'No target type entity seeded!!!!'
### random sample NIL examples
# neg_size = num_NIL
neg_size = min(num_NIL, 5*num_target)
# neg_size = int(min(num_NIL, num_target/(size_col-1.0)))
neg_example = rn.sample(NIL_set, neg_size)
for entry in neg_example:
row.append(entry[0])
col.append(entry[1])
val.append(entry[2])
Y = coo_matrix((val, (row, col)), shape = (size_row, size_col)).tocsr()
# print Y.nnz, '#ground truth mentions in Y'
print 'Percent Seeded Mention:', (Y.nnz+0.0)/len(mid_mention) * 100, '% of', len(mid_mention), \
', #target/All = ', num_target/(Y.nnz+0.0) * 100
return Y
def update_Y_closed_form(S_M, Y, Y0, Theta, PiC, gamma, mu):
# row = []
# col = []
# val = []
for j in range(PiC.shape[1]):
# for each candidate j, slicing to get submatrix
mid_list = PiC[:, j].nonzero()[0].tolist()
Y0_j = Y0[mid_list, :]
Theta_j = Theta[mid_list, :]
S_M_j = S_M[mid_list, :][:, mid_list]
if S_M_j.shape[0] * S_M_j.shape[1] < 2520800000:
# transform to dense matrix
tmp = ((1+gamma+mu)*identity(len(mid_list)) - gamma*S_M_j).todense()
Y_j = npl.inv(tmp) * (Theta_j + mu*Y0_j)
Y[mid_list, :] = Y_j
# # sparse
# Yc = spsl.inv((1+gamma+mu)*identity(len(mid_list)) - gamma*S_M_j) * (Theta_j + mu*Y0_j)
# Yc = spsl.spsolve( ((1+gamma+mu)*identity(len(mid_list)) - gamma*S_M_j), (Theta_j + mu*Y0_j) )
# row_idx, col_idx = Yc.nonzero()
# for i in range(len(row_idx)):
# mid = mid_list[row_idx[i]]
# row.append(mid)
# col.append(col_idx[i])
# val.append(Yc[row_idx[i], col_idx[i]])
if j % 1000 == 0:
print 'candidate', j
# Y = coo_matrix((val, (row, col)), shape = Y0.shape).tocsr()
return Y
def inverse_matrix(X):
X.data[:] = 1/(X.data)
return X
def clustype_appx(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER, K):
PiLL = PiL.T*PiL
PiRR = PiR.T*PiR
### initialization #############################################################
m = PiC.shape[0]
n, l = S_L.shape
C = create_dense_matrix(n, T)
PL = create_dense_matrix(l, T)
PR = create_dense_matrix(l, T)
Y = Y0.copy()
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
### Start algorithm #############################################################
for i in range(ITER):
lambda4 = 1+gamma+mu
Y = 1/lambda4 * (gamma*S_M*Y + Theta + mu*Y0)
C = 1/(2+lambda_O) * ( S_L*PL + S_R*PR + lambda_O*PiC.T*(Y-PiL*PL-PiR*PR) )
PL = inverse_matrix(identity(PiL.shape[1]) + lambda_O*PiLL) * (S_L.T*C + lambda_O*PiL.T*(Y-PiC*C-PiR*PR))
PR = inverse_matrix(identity(PiR.shape[1]) + lambda_O*PiRR) * (S_R.T*C + lambda_O*PiR.T*(Y-PiC*C-PiL*PL))
obj_old = obj
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
if (i+1) % 10 == 0:
print 'iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
# Y = PiC*C
# Y = PiL*PL + PiR*PR
Y = PiC*C + PiL*PL + PiR*PR
return (Y, C, PL, PR)
def clustype_noClus_inner(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER, tol, C, PL, PR, Y):
PiLL = PiL.T*PiL
PiRR = PiR.T*PiR
### initialization #############################################################
m = PiC.shape[0]
n, l = S_L.shape
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
### Start algorithm #############################################################
for i in range(ITER):
lambda4 = 1+gamma+mu
Y = 1/lambda4 * (gamma*S_M*Y + Theta + mu*Y0)
C = 1/(2+lambda_O) * ( S_L*PL + S_R*PR + lambda_O*PiC.T*(Y-PiL*PL-PiR*PR) )
PL = inverse_matrix(identity(PiL.shape[1]) + lambda_O*PiLL) * (S_L.T*C + lambda_O*PiL.T*(Y-PiC*C-PiR*PR))
PR = inverse_matrix(identity(PiR.shape[1]) + lambda_O*PiRR) * (S_R.T*C + lambda_O*PiR.T*(Y-PiC*C-PiL*PL))
obj_old = obj
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
rel = abs(obj_old - obj)/obj_old
if (i+1) % 10 == 0:
print '\tClusType_noClus_inner Iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
if rel < tol:
print ' ClusType_noClus_inner Converges!'
Y = PiC*C + PiL*PL + PiR*PR
return (Y, C, PL, PR)
# Y = PiC*C
# Y = PiL*PL + PiR*PR
Y = PiC*C + PiL*PL + PiR*PR
print ' ClusType_noClus_inner Reach MaxIter!'
return (Y, C, PL, PR)
def clustype_noClus_PiLR(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER):
### pre-compuatation #############################################################
m = PiC.shape[0]
n, l = S_L.shape
PiLL = PiL.T*PiL # l-by-l
PiRR = PiR.T*PiR # l-by-l
### initialization #############################################################
C = create_dense_matrix(n, T)
PL = create_dense_matrix(l, T)
PR = create_dense_matrix(l, T)
Y = Y0.copy()
theta = PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O*(norm(Y-theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
### Start algorithm #############################################################
for i in range(ITER):
lambda4 = 1+gamma+mu
Y = 1/lambda4 * (gamma*S_M*Y + theta + mu*Y0)
C = 1/2.0 * ( S_L*PL + S_R*PR )
PL = inverse_matrix(identity(PiL.shape[1]) + lambda_O*PiLL) * lambda_O*PiL.T*(Y-PiR*PR)
PR = inverse_matrix(identity(PiR.shape[1]) + lambda_O*PiRR) * lambda_O*PiR.T*(Y-PiL*PL)
obj_old = obj
theta = PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
if (i+1) % 10 == 0:
print 'iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
Y = PiL*PL + PiR*PR
return Y
def clustype_noClus_PiC(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER):
### initialization #############################################################
m = PiC.shape[0]
n, l = S_L.shape
C = create_dense_matrix(n, T)
PL = create_dense_matrix(l, T)
PR = create_dense_matrix(l, T)
Y = Y0.copy()
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-PiC*C,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
### Start algorithm #############################################################
for i in range(ITER):
lambda4 = 1+gamma+mu
Y = 1/lambda4 * (gamma*S_M*Y + PiC*C + mu*Y0)
C = 1/(2+lambda_O) * ( S_L*PL + S_R*PR + lambda_O*PiC.T*Y )
PL = S_L.T*C
PR = S_R.T*C
obj_old = obj
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-PiC*C,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
if (i+1) % 10 == 0:
print 'iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
Y = PiC*C
return Y
def clustype_onlycandidate(S_L, S_R, PiC, PiL, PiR, Y0, T, ITER):
### pre-compuatation #############################################################
u = 0.5 # u=0.5
### initialization #############################################################
m = PiC.shape[0]
n, l = S_L.shape
C0 = PiC.T * Y0
C = C0.copy()
PL = create_dense_matrix(l, T)
PR = create_dense_matrix(l, T)
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace((2+u)*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR - 2*u*C.T*C0 + u*C0.T*C0)
### Start algorithm #############################################################
for i in range(ITER):
C = 1/(2+u) * (S_L*PL + S_R*PR + u*C0)
PL = S_L.T*C
PR = S_R.T*C
obj_old = obj
obj = trace((2+u)*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR - 2*u*C.T*C0 + u*C0.T*C0)
if (i+1) % 10 == 0:
print 'ClusType_Cand Iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
Y = PiC*C
return (Y, C, PL, PR)
| gpl-3.0 |
JasonKessler/scattertext | scattertext/test/test_PriorFactory.py | 1 | 4207 | from unittest import TestCase
import numpy as np
import pandas as pd
from scattertext import LogOddsRatioInformativeDirichletPrior
from scattertext.PriorFactory import PriorFactory
from scattertext.test.test_semioticSquare import get_test_corpus
class TestPriorFactory(TestCase):
def test_all_categories(self):
corpus = get_test_corpus()
priors, my_corpus = (PriorFactory(corpus, starting_count=0, category='hamlet')
.use_all_categories()
.build())
tdf = corpus.get_term_freq_df()
self.assertEqual(len(priors), len(tdf))
np.testing.assert_equal(priors.values,
corpus.get_term_freq_df().sum(axis=1).values)
def test_neutral_categories(self):
corpus = get_test_corpus()
priors = (PriorFactory(corpus, 'hamlet', starting_count=0.001,
not_categories=['swift'])
.use_neutral_categories()
.get_priors())
self.assertEqual(priors.min(), 0.001)
self.assertEqual(priors.shape[0], corpus._X.shape[1])
corpus = get_test_corpus()
priors = (PriorFactory(corpus, 'hamlet', starting_count=0.001,
not_categories=['swift'])
.use_neutral_categories()
.drop_zero_priors()
.get_priors())
jzcnts = corpus.get_term_freq_df()['jay-z/r. kelly freq'].where(lambda x: x > 0).dropna()
np.testing.assert_equal(priors.values,
jzcnts.values + 0.001)
def test_get_general_term_frequencies(self):
corpus = get_test_corpus()
fact = (PriorFactory(corpus,
category='hamlet',
not_categories=['swift'],
starting_count=0)
.use_general_term_frequencies()
.use_all_categories()
)
priors, clean_corpus = fact.build()
expected_prior = pd.merge(corpus.get_term_doc_count_df(),
corpus.get_term_and_background_counts()[['background']],
left_index=True,
right_index=True,
how='left').fillna(0.).sum(axis=1)
np.testing.assert_allclose(priors.values, expected_prior.values)
def test_align_to_target(self):
full_corpus = get_test_corpus()
corpus = full_corpus.remove_categories(['swift'])
priors = PriorFactory(full_corpus).use_all_categories().get_priors()
with self.assertRaises(ValueError):
(LogOddsRatioInformativeDirichletPrior(priors)
.get_scores(*corpus.get_term_freq_df().values.T))
priors = (PriorFactory(full_corpus)
.use_all_categories()
.align_to_target(corpus)
.get_priors())
(LogOddsRatioInformativeDirichletPrior(priors)
.get_scores(*corpus.get_term_freq_df().values.T))
def test_use_categories(self):
full_corpus = get_test_corpus()
priors = PriorFactory(full_corpus).use_categories(['swift']).get_priors()
corpus = full_corpus.remove_categories(['swift'])
with self.assertRaises(ValueError):
(LogOddsRatioInformativeDirichletPrior(priors)
.get_scores(*corpus.get_term_freq_df().values.T))
priors = (PriorFactory(full_corpus)
.use_all_categories()
.align_to_target(corpus)
.get_priors())
(LogOddsRatioInformativeDirichletPrior(priors)
.get_scores(*corpus.get_term_freq_df().values.T))
def test_get_custom_term_frequencies(self):
corpus = get_test_corpus()
fact = (PriorFactory(corpus, starting_count=0.04)
.use_custom_term_frequencies(pd.Series({'halt': 3, 'i': 8}))
.drop_zero_priors()
)
priors, clean_corpus = fact.build()
self.assertEqual(set(clean_corpus.get_terms()), {'i', 'halt'})
np.testing.assert_equal(priors.sort_values().values, [3.04, 8.04])
| apache-2.0 |
atcemgil/notes | DrawNN.py | 1 | 2429 | #Code from https://gist.github.com/craffel/2d727968c3aaebd10359
import matplotlib.pyplot as plt
def draw_neural_net(ax, left, right, bottom, top, layer_sizes, bias=0, draw_edges=False):
'''
Draw a neural network cartoon using matplotilb.
:usage:
>>> fig = plt.figure(figsize=(12, 12))
>>> draw_neural_net(fig.gca(), .1, .9, .1, .9, [4, 7, 2])
:parameters:
- ax : matplotlib.axes.AxesSubplot
The axes on which to plot the cartoon (get e.g. by plt.gca())
- left : float
The center of the leftmost node(s) will be placed here
- right : float
The center of the rightmost node(s) will be placed here
- bottom : float
The center of the bottommost node(s) will be placed here
- top : float
The center of the topmost node(s) will be placed here
- layer_sizes : list of int
List of layer sizes, including input and output dimensionality
- bias : Boolean
Draw an extra bias node at each layer
- draw_edges : Boolean
If false, omit edge connections
'''
n_layers = len(layer_sizes)
v_spacing = (top - bottom)/float(max(layer_sizes)+bias)
h_spacing = (right - left)/float(len(layer_sizes) - 1)
# Nodes
for n, layer_size in enumerate(layer_sizes):
layer_top = v_spacing*(layer_size - 1)/2. + (top + bottom)/2.
bias_node = (bias if n<len(layer_sizes)-1 else 0)
for m in range(layer_size+bias_node ):
node_color = 'w' if m<layer_size else 'b'
circle = plt.Circle((n*h_spacing + left, layer_top - m*v_spacing), v_spacing/8.,
color=node_color, ec='k', zorder=4)
ax.add_artist(circle)
# Edges
if draw_edges:
for n, (layer_size_a, layer_size_b) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
layer_top_a = v_spacing*(layer_size_a - 1)/2. + (top + bottom)/2.
layer_top_b = v_spacing*(layer_size_b - 1)/2. + (top + bottom)/2.
for m in range(layer_size_a+bias):
for o in range(layer_size_b):
line = plt.Line2D([n*h_spacing + left, (n + 1)*h_spacing + left],
[layer_top_a - m*v_spacing, layer_top_b - o*v_spacing],
c='k')
ax.add_artist(line) | mit |
uberpye/gwdetchar | gwdetchar/io/tests/test_html.py | 1 | 22674 | # -*- coding: utf-8 -*-
# Copyright (C) Alex Urban (2019)
#
# This file is part of the GW DetChar python package.
#
# GW DetChar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GW DetChar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gwdetchar. If not, see <http://www.gnu.org/licenses/>.
"""Tests for `gwdetchar.io.html`
"""
import os
import sys
import shutil
import datetime
import sys
from pytz import reference
from getpass import getuser
from MarkupPy import markup
try:
from unittest import mock
except ImportError: # python < 3
import mock
import pytest
from matplotlib import use
use('Agg') # nopep8
from gwpy.segments import (Segment, DataQualityFlag)
from .. import html
from ..._version import get_versions
from ...utils import parse_html
__author__ = 'Alex Urban <[email protected]>'
# global test objects
VERSION = get_versions()['version']
COMMIT = get_versions()['full-revisionid']
NEW_BOOTSTRAP_PAGE = """<!DOCTYPE HTML>
<html lang="en">
<head>
<meta http-equiv="refresh" content="60" />
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta content="width=device-width, initial-scale=1.0" name="viewport" />
<base href="{base}" />
<link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet" type="text/css" media="all" />
<link href="https://cdnjs.cloudflare.com/ajax/libs/fancybox/2.1.5/jquery.fancybox.min.css" rel="stylesheet" type="text/css" media="all" />
<link href="https://fonts.googleapis.com/css?family=Roboto:300,400%7CRoboto+Mono" rel="stylesheet" type="text/css" media="all" />
<link href="static/bootstrap-ligo.min.css" rel="stylesheet" type="text/css" media="all" />
<link href="static/gwdetchar.min.css" rel="stylesheet" type="text/css" media="all" />
<script src="https://code.jquery.com/jquery-1.12.3.min.js" type="text/javascript"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.13.0/moment.min.js" type="text/javascript"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js" type="text/javascript"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/fancybox/2.1.5/jquery.fancybox.min.js" type="text/javascript"></script>
<script src="static/bootstrap-ligo.min.js" type="text/javascript"></script>
<script src="static/gwdetchar.min.js" type="text/javascript"></script>
</head>
<body>
<div class="container">
</body>
</html>""" # nopep8
TEST_CONFIGURATION = """[section]
key = value"""
ABOUT = """<div class="row">
<div class="col-md-12">
<h2>On the command-line</h2>
<p>This page was generated with the following command-line call:</p>
<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%"><span></span>$ gwdetchar-scattering -i X1
</pre></div>
<p>The install path used was <code>{}</code>.</p>
<h2>Configuration files</h2>
<p>The following INI-format configuration file(s) were passed on the comand-line and are reproduced here in full:</p>
<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%"><span></span><span style="color: #008000; font-weight: bold">[section]</span>
<span style="color: #7D9029">key</span> <span style="color: #666666">=</span> <span style="color: #BA2121">value</span>
</pre></div>
<h2>Environment</h2><table class="table table-hover table-condensed table-responsive" id="package-table"><caption>Table of packages installed in the production environment</caption><thead><tr><th scope="col">Name</th><th scope="col">Version</th></tr></thead><tbody><tr><td>gwdetchar</td><td>1.2.3</td></tr><tr><td>gwpy</td><td>1.0.0</td></tr></tbody></table><button class="btn btn-default btn-table" onclick="exportTableToCSV("package-table.csv", "package-table")">Export to CSV</button>
</div>
</div>""".format(sys.prefix) # nopep8
ABOUT_WITH_CONFIG_LIST = """<div class="row">
<div class="col-md-12">
<h2>On the command-line</h2>
<p>This page was generated with the following command-line call:</p>
<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%"><span></span>$ gwdetchar-scattering -i X1
</pre></div>
<p>The install path used was <code>{}</code>.</p>
<h2>Configuration files</h2>
<p>The following INI-format configuration file(s) were passed on the comand-line and are reproduced here in full:</p>
<div class="panel-group" id="accordion">
<div class="panel panel-default">
<a href="#file0" data-toggle="collapse" data-parent="#accordion">
<div class="panel-heading">
<h4 class="panel-title">test.ini</h4>
</div>
</a>
<div id="file0" class="panel-collapse collapse">
<div class="panel-body">
<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%"><span></span><span style="color: #008000; font-weight: bold">[section]</span>
<span style="color: #7D9029">key</span> <span style="color: #666666">=</span> <span style="color: #BA2121">value</span>
</pre></div>
</div>
</div>
</div>
</div>
<h2>Environment</h2><table class="table table-hover table-condensed table-responsive" id="package-table"><caption>Table of packages installed in the production environment</caption><thead><tr><th scope="col">Name</th><th scope="col">Version</th></tr></thead><tbody><tr><td>gwdetchar</td><td>1.2.3</td></tr><tr><td>gwpy</td><td>1.0.0</td></tr></tbody></table><button class="btn btn-default btn-table" onclick="exportTableToCSV("package-table.csv", "package-table")">Export to CSV</button>
</div>
</div>""".format(sys.prefix) # nopep8
HTML_FOOTER = """<footer class="footer">
<div class="container">
<div class="row">
<div class="col-md-12">
<p>This page was created by {user} at {date}.</p>
<p><a href="https://github.com/gwdetchar/gwdetchar/tree/%s" target="_blank">View gwdetchar-%s on GitHub</a> | <a href="https://github.com/gwdetchar/gwdetchar/issues" target="_blank">Report an issue</a></p>
</div>
</div>
</div>
</footer>""" % (COMMIT, VERSION) # nopep8
HTML_CLOSE = """</div>
%s
</body>
</html>""" % HTML_FOOTER # nopep8
FLAG_CONTENT = """<div class="panel panel-warning">
<div class="panel-heading">
<a class="panel-title" href="#flag0" data-toggle="collapse" data-parent="#accordion">X1:TEST_FLAG</a>
</div>
<div id="flag0" class="panel-collapse collapse">
<div class="panel-body">{plots}
{content}
</div>
</div>
</div>""" # nopep8
FLAG_HTML = FLAG_CONTENT.format(content="""<pre># seg\tstart\tstop\tduration
0\t0\t66\t66.0
</pre>""", plots='')
FLAG_HTML_WITH_PLOTS = FLAG_CONTENT.format(
content='<pre># seg\tstart\tstop\tduration\n0\t0\t66\t66.0\n</pre>',
plots='\n<a id="a_X1-TEST_FLAG_66" target="_blank" title="Known (small) '
'and active (large) analysis segments for X1:TEST_FLAG" '
'class="fancybox" href="plots/X1-TEST_FLAG-0-66.png" '
'data-fancybox-group="images">\n<img id="img_X1-TEST_FLAG_66" '
'alt="X1-TEST_FLAG-0-66.png" class="img-responsive" '
'src="plots/X1-TEST_FLAG-0-66.png" />\n</a>')
FLAG_HTML_NO_SEGMENTS = FLAG_CONTENT.format(
content='<p>No segments were found.</p>', plots='')
FLAG = DataQualityFlag(known=[(0, 66)], active=[(0, 66)], name='X1:TEST_FLAG')
OMEGA_SCAFFOLD = """<div class="panel well panel-default">
<div class="panel-heading clearfix">
<h3 class="panel-title"><a href="https://cis.ligo.org/channel/byname/X1:STRAIN" title="CIS entry for X1:STRAIN" style="font-family: Monaco, "Courier New", monospace; color: black;" target="_blank">X1:STRAIN</a></h3>
</div>
<ul class="list-group">
<li class="list-group-item">
<div class="container">
<div class="row">
<div class="pull-right">
<a href="./1126259462" class="text-dark">[full scan]</a>
</div>
<h4>1126259462</h4>
</div>
<div class="row">
<div class="col-sm-4">
<a href="./1126259462/plots/X1-STRAIN-qscan_whitened-1.png" id="a_X1-STRAIN_1" title="X1-STRAIN-qscan_whitened-1.png" class="fancybox" target="_blank" data-fancybox-group="images">
<img id="img_X1-STRAIN_1" alt="X1-STRAIN-qscan_whitened-1.png" class="img-responsive" src="./1126259462/plots/X1-STRAIN-qscan_whitened-1.png" />
</a>
</div>
<div class="col-sm-4">
<a href="./1126259462/plots/X1-STRAIN-qscan_whitened-4.png" id="a_X1-STRAIN_4" title="X1-STRAIN-qscan_whitened-4.png" class="fancybox" target="_blank" data-fancybox-group="images">
<img id="img_X1-STRAIN_4" alt="X1-STRAIN-qscan_whitened-4.png" class="img-responsive" src="./1126259462/plots/X1-STRAIN-qscan_whitened-4.png" />
</a>
</div>
<div class="col-sm-4">
<a href="./1126259462/plots/X1-STRAIN-qscan_whitened-16.png" id="a_X1-STRAIN_16" title="X1-STRAIN-qscan_whitened-16.png" class="fancybox" target="_blank" data-fancybox-group="images">
<img id="img_X1-STRAIN_16" alt="X1-STRAIN-qscan_whitened-16.png" class="img-responsive" src="./1126259462/plots/X1-STRAIN-qscan_whitened-16.png" />
</a>
</div>
</div>
</div>
</li>
</ul>
</div>""" # nopep8
# -- HTML unit tests ----------------------------------------------------------
def test_fancy_plot():
# create a dummy FancyPlot instance
test = html.FancyPlot('test.png')
assert test.img is 'test.png'
assert test.caption is 'test.png'
# check that its properties are unchanged when the argument
# to FancyPlot() is also a FancyPlot instance
test = html.FancyPlot(test)
assert test.img is 'test.png'
assert test.caption is 'test.png'
def test_finalize_static_urls(tmpdir):
base = str(tmpdir)
static = os.path.join(base, 'static')
css, js = html.finalize_static_urls(
static, base, html.CSS_FILES, html.JS_FILES)
assert css == [
'https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/'
'bootstrap.min.css', # nopep8
'https://cdnjs.cloudflare.com/ajax/libs/fancybox/2.1.5/'
'jquery.fancybox.min.css', # nopep8
'https://fonts.googleapis.com/css?'
'family=Roboto:300,400%7CRoboto+Mono', # nopep8
'static/bootstrap-ligo.min.css',
'static/gwdetchar.min.css']
assert js == [
'https://code.jquery.com/jquery-1.12.3.min.js',
'https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.13.0/'
'moment.min.js', # nopep8
'https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js',
'https://cdnjs.cloudflare.com/ajax/libs/fancybox/2.1.5/'
'jquery.fancybox.min.js', # nopep8
'static/bootstrap-ligo.min.js',
'static/gwdetchar.min.js']
shutil.rmtree(str(tmpdir), ignore_errors=True)
def test_new_bootstrap_page():
base = os.path.abspath(os.path.curdir)
page = html.new_bootstrap_page(base=base, topbtn=False, refresh=True)
assert parse_html(str(page)) == parse_html(
NEW_BOOTSTRAP_PAGE.format(base=base))
def test_navbar():
navbar = html.navbar(['test'], collapse=False)
assert parse_html(navbar) == parse_html(
'<header class="navbar navbar-fixed-top">\n'
'<div class="container">\n<div class="navbar-header">\n'
'</div>\n<nav>\n<ul class="nav navbar-nav">\n<li>\ntest\n</li>\n'
'</ul>\n</nav>\n</div>\n</header>')
def test_dropdown():
menu = html.dropdown('test', [])
assert parse_html(str(menu)) == parse_html(
'<a href="#" class="dropdown-toggle" data-toggle="dropdown">\n'
'test\n<b class="caret"></b>\n</a>\n<ul class="dropdown-menu">\n</ul>')
menu = html.dropdown('test', ['test', '#'], active=0)
assert parse_html(str(menu)) == parse_html(
'<a href="#" class="dropdown-toggle" data-toggle="dropdown">\n'
'test\n<b class="caret"></b>\n</a>\n<ul class="dropdown-menu">\n'
'<li class="active">\ntest\n</li>\n<li>\n#\n</li>\n</ul>')
menu = html.dropdown('test', ['test', '#'], active=[0, 1])
assert parse_html(str(menu)) == parse_html(
'<a href="#" class="dropdown-toggle" data-toggle="dropdown">\n'
'test\n<b class="caret"></b>\n</a>\n<ul class="dropdown-menu">\n'
'<li>\ntest\n</li>\n<li>\n#\n</li>\n</ul>')
def test_dropdown_link():
page = markup.page()
html.dropdown_link(page, None)
assert parse_html(str(page)) == parse_html(
'<li class="divider">\n</li>')
page = markup.page()
html.dropdown_link(page, 'test', active=True)
assert parse_html(str(page)) == parse_html(
'<li class="active">\ntest\n</li>')
page = markup.page()
html.dropdown_link(page, 'test')
assert parse_html(str(page)) == parse_html(
'<li>\ntest\n</li>')
def test_get_brand():
(brand, class_) = html.get_brand('H1', 'Test', 0, about='about')
assert class_ == 'navbar navbar-fixed-top navbar-h1'
assert parse_html(brand) == parse_html(
'<div class="navbar-brand">H1</div>\n'
'<div class="navbar-brand">Test</div>\n'
'<div class="btn-group pull-right ifo-links">\n'
'<a class="navbar-brand dropdown-toggle" href="#" '
'data-toggle="dropdown">\nLinks\n<b class="caret"></b>\n</a>\n'
'<ul class="dropdown-menu">\n'
'<li class="dropdown-header">Internal</li>\n'
'<li>\n<a href="about">About this page</a>\n</li>\n'
'<li class="divider"></li>\n'
'<li class="dropdown-header">External</li>\n'
'<li>\n<a href="https://ldas-jobs.ligo-wa.caltech.edu/~detchar/'
'summary/day/19800106" target="_blank">LHO Summary Pages</a>\n'
'</li>\n<li>\n<a href="https://alog.ligo-wa.caltech.edu/aLOG" '
'target="_blank">LHO Logbook</a>\n</li>\n</ul>\n</div>')
@mock.patch(
"gwdetchar.io.html.package_list",
return_value=[
{"name": "gwpy", "version": "1.0.0"},
{"name": "gwdetchar", "version": "1.2.3"},
],
)
def test_about_this_page(package_list, tmpdir):
outdir = str(tmpdir)
config_file = os.path.join(outdir, 'test.ini')
with open(config_file, 'w') as fobj:
fobj.write(TEST_CONFIGURATION)
testargs = ['/opt/bin/gwdetchar-scattering', '-i', 'X1']
with mock.patch.object(sys, 'argv', testargs):
# test with a single config file
about = html.about_this_page(config_file)
assert parse_html(about) == parse_html(ABOUT)
# test with a list of config files
about = html.about_this_page([config_file])
assert parse_html(about) == parse_html(ABOUT_WITH_CONFIG_LIST)
# clean up
shutil.rmtree(outdir, ignore_errors=True)
def test_write_param():
page = html.write_param('test', 'test')
assert parse_html(str(page)) == parse_html(
'<p>\n<strong>test: </strong>\ntest\n</p>')
def test_get_command_line():
testargs = ['/opt/bin/gwdetchar-conlog', '-i', 'X1']
with mock.patch.object(sys, 'argv', testargs):
cmdline = html.get_command_line()
assert parse_html(cmdline) == parse_html(
'<p>This page was generated with the following command-line call:'
'</p>\n<div class="highlight" style="background: #f8f8f8">'
'<pre style="line-height: 125%"><span></span>$ gwdetchar-conlog '
'-i X1\n</pre></div>\n\n<p>The install path used was <code>{}'
'</code>.</p>'.format(sys.prefix))
def test_get_command_line_module():
testargs = ['__main__.py', '--html-only']
with mock.patch.object(sys, 'argv', testargs):
cmdline = html.get_command_line()
assert parse_html(cmdline) == parse_html(
'<p>This page was generated with the following command-line call:'
'</p>\n<div class="highlight" style="background: #f8f8f8">'
'<pre style="line-height: 125%"><span></span>$ python -m '
'gwdetchar.io.tests.test_html\n</pre></div>\n\n'
'<p>The install path used was <code>{}</code>.</p>'.format(
sys.prefix))
@pytest.mark.parametrize('args, kwargs, result', [
(('test.html', 'Test link'), {},
'<a href="test.html" target="_blank">Test link</a>'),
(('test.html', 'Test link'), {'class_': 'test-case'},
'<a class="test-case" href="test.html" target="_blank">Test link</a>'),
])
def test_html_link(args, kwargs, result):
h1 = parse_html(html.html_link(*args, **kwargs))
h2 = parse_html(result)
assert h1 == h2
def test_cis_link():
h1 = parse_html(html.cis_link('X1:TEST-CHANNEL'))
h2 = parse_html(
'<a style="font-family: Monaco, "Courier New", '
'monospace; color: black;" href="https://cis.ligo.org/channel/byname/'
'X1:TEST-CHANNEL" target="_blank" title="CIS entry for '
'X1:TEST-CHANNEL">X1:TEST-CHANNEL</a>'
)
assert h1 == h2
def test_fancybox_img():
img = html.FancyPlot('X1-TEST_AUX-test-4.png')
out = html.fancybox_img(img)
assert parse_html(out) == parse_html(
'<a class="fancybox" href="X1-TEST_AUX-test-4.png" target="_blank" '
'data-fancybox-group="images" id="a_X1-TEST_AUX_4" '
'title="X1-TEST_AUX-test-4.png">\n'
'<img class="img-responsive" alt="X1-TEST_AUX-test-4.png" '
'src="X1-TEST_AUX-test-4.png" id="img_X1-TEST_AUX_4"/>\n'
'</a>')
def test_scaffold_plots():
h1 = parse_html(html.scaffold_plots([
html.FancyPlot('X1-TEST_AUX-test-4.png'),
html.FancyPlot('X1-TEST_AUX-test-16.png')], nperrow=2))
h2 = parse_html(
'<div class="row">\n'
'<div class="col-sm-6">\n'
'<a class="fancybox" href="X1-TEST_AUX-test-4.png" target="_blank" '
'id="a_X1-TEST_AUX_4" data-fancybox-group="images" '
'title="X1-TEST_AUX-test-4.png">\n'
'<img class="img-responsive" alt="X1-TEST_AUX-test-4.png" '
'id="img_X1-TEST_AUX_4" src="X1-TEST_AUX-test-4.png" />\n'
'</a>\n'
'</div>\n'
'<div class="col-sm-6">\n'
'<a class="fancybox" href="X1-TEST_AUX-test-16.png" target="_blank"'
' id="a_X1-TEST_AUX_16" data-fancybox-group="images" '
'title="X1-TEST_AUX-test-16.png">\n'
'<img class="img-responsive" alt="X1-TEST_AUX-test-16.png" '
'id="img_X1-TEST_AUX_16" src="X1-TEST_AUX-test-16.png" />\n'
'</a>\n'
'</div>\n'
'</div>')
assert h1 == h2
def test_write_arguments():
page = html.write_arguments([('test', 'test')], 0, 1, flag='X1:TEST')
assert '<h2 id="parameters">Parameters</h2>' in page
assert '<strong>Start time: </strong>\n0 (1980-01-06 00:00:00)' in page
assert '<strong>End time: </strong>\n1 (1980-01-06 00:00:01)' in page
assert '<strong>State flag: </strong>\nX1:TEST' in page
assert '<strong>test: </strong>\ntest' in page
assert '<strong>Command-line: </strong>' in page
def test_table():
headers = ['Test']
data = [['test']]
caption = 'This is a test table.'
page = html.table(headers=headers, data=data, caption=caption, id='test')
assert parse_html(page) == parse_html(
'<table class="table table-hover table-condensed table-responsive" '
'id="test"><caption>This is a test table.</caption><thead><tr>'
'<th scope="col">Test</th></tr></thead><tbody><tr><td>test</td></tr>'
'</tbody></table><button class="btn btn-default btn-table" '
'onclick="exportTableToCSV("test.csv", "test")">'
'Export to CSV</button>')
def test_write_flag_html():
page = html.write_flag_html(FLAG)
assert parse_html(str(page)) == parse_html(FLAG_HTML)
page2 = html.write_flag_html(
DataQualityFlag(known=[], active=[], name='X1:TEST_FLAG'))
assert parse_html(str(page2)) == parse_html(FLAG_HTML_NO_SEGMENTS)
def test_write_flag_html_with_plots(tmpdir):
tmpdir.mkdir('plots')
os.chdir(str(tmpdir))
page = html.write_flag_html(FLAG, span=Segment(0, 66), plotdir='plots')
assert parse_html(str(page)) == parse_html(FLAG_HTML_WITH_PLOTS)
shutil.rmtree(str(tmpdir), ignore_errors=True)
def test_scaffold_omega_scans():
times = [1126259462]
channel = 'X1:STRAIN'
page = html.scaffold_omega_scans(times, channel)
assert parse_html(page) == parse_html(OMEGA_SCAFFOLD)
def test_write_footer():
now = datetime.datetime.now()
tz = reference.LocalTimezone().tzname(now)
date = now.strftime('%H:%M {} on %d %B %Y'.format(tz))
out = html.write_footer()
assert parse_html(str(out)) == parse_html(
HTML_FOOTER.format(user=getuser(), date=date))
def test_close_page(tmpdir):
target = os.path.join(str(tmpdir), 'test.html')
now = datetime.datetime.now()
tz = reference.LocalTimezone().tzname(now)
date = now.strftime('%H:%M {} on %d %B %Y'.format(tz))
page = html.close_page(html.markup.page(), target)
assert parse_html(str(page)) == parse_html(
HTML_CLOSE.format(user=getuser(), date=str(date)))
assert os.path.isfile(target)
with open(target, 'r') as fp:
assert fp.read() == str(page)
shutil.rmtree(target, ignore_errors=True)
@mock.patch("{}.Path.is_dir".format(html.Path.__module__))
@mock.patch("subprocess.check_output", return_value="{\"key\": 0}")
@pytest.mark.parametrize("isdir, cmd", [
pytest.param(
False,
"{} -m pip list installed --format json".format(sys.executable),
id="pip",
),
pytest.param(
True,
"conda list --prefix {} --json".format(sys.prefix),
id="conda",
),
])
def test_package_list(check_output, is_dir, isdir, cmd):
is_dir.return_value = isdir
assert html.package_list() == {"key": 0}
check_output.assert_called_with(cmd.split())
@mock.patch(
"gwdetchar.io.html.package_list",
return_value=[
{"name": "gwpy", "version": "1.0.0"},
{"name": "gwdetchar", "version": "1.2.3"},
],
)
def test_package_table(package_list):
assert parse_html(
html.package_table(class_="test", caption="Test"),
) == parse_html(
"<h2>Environment</h2><table class=\"test\" id=\"package-table\">"
"<caption>Test</caption>"
"<thead>"
"<tr><th scope=\"col\">Name</th><th scope=\"col\">Version</th></tr>"
"</thead><tbody>"
"<tr><td>gwdetchar</td><td>1.2.3</td></tr>"
"<tr><td>gwpy</td><td>1.0.0</td></tr>"
"</tbody></table>"
"<button class=\"btn btn-default btn-table\" "
"onclick=\"exportTableToCSV("package-table.csv", "
""package-table")\">Export to CSV</button>",
)
| gpl-3.0 |
dacb/elvizCluster | unit_tests/test_elviz_abundance_utils.py | 2 | 1469 | import unittest
import pandas as pd
class testCompletenessOfSummarisedData(unittest.TestCase):
def test_animal_data(self):
"""
Make sure each sample's fraction of abundance values sums very close
to 1. On toy data set only.
"""
animal_df = pd.read_csv("./summarised_animals.txt", sep='\t')
sum_by_sample = animal_df.groupby(
['oxy', 'rep', 'week'])['fraction of reads'].sum()
self.assertTrue((sum_by_sample > 0.999).all())
self.assertTrue((sum_by_sample < 1.001).all())
class testAbundannceSummary(unittest.TestCase):
def test_summary_with_all_taxonomy_remaining(self):
"""
Make sure each sample's fraction of abundance values sums very close
to 1.
"""
summary_df = \
pd.read_csv("../results/reduced_data--all_taxonomy_remains.csv")
sum_by_sample = summary_df.groupby(
['oxy', 'rep', 'week'])['fraction of reads'].sum()
self.assertTrue((sum_by_sample > 0.999).all())
self.assertTrue((sum_by_sample < 1.001).all())
if __name__ == '__main__':
animal_df = pd.read_csv("./summarised_animals.txt", sep='\t')
print(animal_df.head())
sums = animal_df.groupby(
['oxy', 'rep', 'week'])['fraction of reads'].sum()
# make sure all the sums are 1:
print(sums)
# Run the Unit Tests
# Note: this has to be last or the stuff above won't run.
unittest.main()
| bsd-3-clause |
ajdawson/colormaps | setup.py | 1 | 2163 | """Build and install the colormaps package."""
# Copyright (c) 2012 Andrew Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from distutils.core import setup
for line in open('lib/colormaps/__init__.py').readlines():
if (line.startswith('__version__')):
exec(line.strip())
package_data = {'colormaps': ['palette/*.txt', 'palette/ncl/*.txt',
'palette/brewer/diverging/*.txt',
'palette/brewer/qualitative/*.txt',
'palette/brewer/sequential/*.txt']}
if __name__ == '__main__':
setup(
name='colormaps',
version=__version__,
description='Easily generate colormaps for matplotlib',
author='Andrew Dawson',
author_email='[email protected]',
url='http://github.com/ajdawson/colormaps',
long_description="""
colormaps can generate colormaps of varying lengths from sets of
base colors. It is designed to allow total control of colormaps
in matplotlib.
""",
packages=['colormaps'],
package_dir={'': 'lib'},
package_data=package_data,)
| mit |
stefan-balke/librosa | tests/test_display.py | 2 | 9298 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# CREATED:2015-02-14 22:51:01 by Brian McFee <[email protected]>
'''Unit tests for display module'''
import warnings
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except KeyError:
pass
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
import matplotlib.style
matplotlib.style.use('seaborn-ticks')
import matplotlib.pyplot as plt
import librosa
import librosa.display
import numpy as np
from nose.tools import nottest, raises, eq_
from mpl_ic import image_comparison
warnings.resetwarnings()
warnings.simplefilter('always')
@nottest
def get_spec(y, sr):
C = np.abs(librosa.cqt(y, sr=sr))
return librosa.stft(y), C, sr
__EXAMPLE_FILE = 'data/test1_22050.wav'
y, sr = librosa.load(__EXAMPLE_FILE)
S, C, sr = get_spec(y, sr)
S_abs = np.abs(S)
S_signed = np.abs(S) - np.median(np.abs(S))
S_bin = S_signed > 0
tempo, beats = librosa.beat.beat_track(y=y, sr=sr, trim=False)
beats = librosa.util.fix_frames(beats, x_max=C.shape[1])
beat_t = librosa.frames_to_time(beats, sr=sr)
Csync = librosa.util.sync(C, beats, aggregate=np.median)
@image_comparison(baseline_images=['complex'], extensions=['png'])
def test_complex_input():
plt.figure()
librosa.display.specshow(S)
@image_comparison(baseline_images=['abs'], extensions=['png'])
def test_abs_input():
plt.figure()
librosa.display.specshow(S_abs)
@image_comparison(baseline_images=['cqt_note'], extensions=['png'])
def test_cqt_note():
plt.figure()
librosa.display.specshow(C, y_axis='cqt_note')
@image_comparison(baseline_images=['cqt_hz'], extensions=['png'])
def test_cqt_hz():
plt.figure()
librosa.display.specshow(C, y_axis='cqt_hz')
@image_comparison(baseline_images=['tempo'], extensions=['png'])
def test_tempo():
T = librosa.feature.tempogram(y=y, sr=sr)
plt.figure()
librosa.display.specshow(T, y_axis='tempo', cmap='magma')
@image_comparison(baseline_images=['tonnetz'], extensions=['png'])
def test_tonnetz():
plt.figure()
chroma = librosa.feature.chroma_cqt(C=C)
ton = librosa.feature.tonnetz(chroma=chroma)
librosa.display.specshow(ton, y_axis='tonnetz')
@image_comparison(baseline_images=['chroma'], extensions=['png'])
def test_chroma():
plt.figure()
plt.subplot(3, 1, 1)
chr1 = librosa.feature.chroma_stft(S=S_abs**2, sr=sr)
librosa.display.specshow(chr1, y_axis='chroma')
plt.subplot(3, 1, 2)
chr2 = librosa.feature.chroma_stft(S=S_abs**2, sr=sr, n_chroma=2*12)
librosa.display.specshow(chr2, y_axis='chroma', bins_per_octave=2*12)
plt.subplot(3, 1, 3)
chr3 = librosa.feature.chroma_stft(S=S_abs**2, sr=sr, n_chroma=3*12)
librosa.display.specshow(chr3, y_axis='chroma', bins_per_octave=3*12)
@image_comparison(baseline_images=['double_chroma'], extensions=['png'])
def test_double_chroma():
plt.figure()
chr1 = librosa.feature.chroma_stft(S=S_abs**2, sr=sr)
chr1 = np.vstack((chr1, chr1))
librosa.display.specshow(chr1, y_axis='chroma', bins_per_octave=12)
@image_comparison(baseline_images=['x_mel'], extensions=['png'])
def test_x_mel():
plt.figure()
M = librosa.feature.melspectrogram(S=S_abs**2)
librosa.display.specshow(M.T, x_axis='mel')
@image_comparison(baseline_images=['y_mel'], extensions=['png'])
def test_y_mel():
plt.figure()
M = librosa.feature.melspectrogram(S=S_abs**2)
librosa.display.specshow(M, y_axis='mel')
@image_comparison(baseline_images=['y_mel_bounded'], extensions=['png'])
def test_y_mel_bounded():
plt.figure()
fmin, fmax = 110, 880
M = librosa.feature.melspectrogram(S=S_abs**2, fmin=fmin, fmax=fmax)
librosa.display.specshow(M, y_axis='mel', fmin=fmin, fmax=fmax)
@image_comparison(baseline_images=['x_none_y_linear'], extensions=['png'])
def test_xaxis_none_yaxis_linear():
plt.figure()
plt.subplot(3, 1, 1)
librosa.display.specshow(S_abs, y_axis='linear')
plt.subplot(3, 1, 2)
librosa.display.specshow(S_signed, y_axis='linear')
plt.subplot(3, 1, 3)
librosa.display.specshow(S_bin, y_axis='linear')
@image_comparison(baseline_images=['x_none_y_log'], extensions=['png'])
def test_xaxis_none_yaxis_log():
plt.figure()
plt.subplot(3, 1, 1)
librosa.display.specshow(S_abs, y_axis='log')
plt.subplot(3, 1, 2)
librosa.display.specshow(S_signed, y_axis='log')
plt.subplot(3, 1, 3)
librosa.display.specshow(S_bin, y_axis='log')
@image_comparison(baseline_images=['x_linear_y_none'], extensions=['png'])
def test_xaxis_linear_yaxis_none():
plt.figure()
plt.subplot(3, 1, 1)
librosa.display.specshow(S_abs.T, x_axis='linear')
plt.subplot(3, 1, 2)
librosa.display.specshow(S_signed.T, x_axis='linear')
plt.subplot(3, 1, 3)
librosa.display.specshow(S_bin.T, x_axis='linear')
@image_comparison(baseline_images=['x_log_y_none'], extensions=['png'])
def test_xaxis_log_yaxis_none():
plt.figure()
plt.subplot(3, 1, 1)
librosa.display.specshow(S_abs.T, x_axis='log')
plt.subplot(3, 1, 2)
librosa.display.specshow(S_signed.T, x_axis='log')
plt.subplot(3, 1, 3)
librosa.display.specshow(S_bin.T, x_axis='log')
@image_comparison(baseline_images=['x_time_y_none'], extensions=['png'])
def test_xaxis_time_yaxis_none():
plt.figure()
librosa.display.specshow(S_abs, x_axis='time')
@image_comparison(baseline_images=['x_none_y_time'], extensions=['png'])
def test_xaxis_none_yaxis_time():
plt.figure()
librosa.display.specshow(S_abs.T, y_axis='time')
@image_comparison(baseline_images=['x_frames_y_none'], extensions=['png'])
def test_xaxis_frames_yaxis_none():
plt.figure()
librosa.display.specshow(S_abs, x_axis='frames')
@image_comparison(baseline_images=['x_none_y_frames'], extensions=['png'])
def test_xaxis_none_yaxis_frames():
plt.figure()
librosa.display.specshow(S_abs.T, y_axis='frames')
@image_comparison(baseline_images=['x_lag_y_none'], extensions=['png'])
def test_xaxis_lag_yaxis_none():
plt.figure()
librosa.display.specshow(S_abs, x_axis='lag')
@image_comparison(baseline_images=['x_none_y_lag'], extensions=['png'])
def test_xaxis_time_yaxis_lag():
plt.figure()
librosa.display.specshow(S_abs.T, y_axis='lag')
@image_comparison(baseline_images=['time_scales_auto'], extensions=['png'])
def test_time_scales_auto():
# sr = 22050, hop_length = 512, S.shape[1] = 198
# 197 * 512 / 22050 ~= 4.6s
plt.figure()
plt.subplot(4, 1, 1)
# sr * 10 -> ms
librosa.display.specshow(S_abs, sr=10 * sr, x_axis='time')
plt.subplot(4, 1, 2)
# sr -> s
librosa.display.specshow(S_abs, sr=sr, x_axis='time')
plt.subplot(4, 1, 3)
# sr / 20 -> m
librosa.display.specshow(S_abs, sr=sr // 20, x_axis='time')
plt.subplot(4, 1, 4)
# sr / (60 * 20) -> h
librosa.display.specshow(S_abs, sr=sr // (60 * 20), x_axis='time')
plt.tight_layout()
@image_comparison(baseline_images=['waveplot_mono'], extensions=['png'])
def test_waveplot_mono():
plt.figure()
plt.subplot(3, 1, 1)
librosa.display.waveplot(y, sr=sr, max_points=None, x_axis='off')
plt.subplot(3, 1, 2)
librosa.display.waveplot(y, sr=sr, x_axis='off')
plt.subplot(3, 1, 3)
librosa.display.waveplot(y, sr=sr, x_axis='time')
@image_comparison(baseline_images=['waveplot_stereo'], extensions=['png'])
def test_waveplot_stereo():
ys = np.vstack([y[np.newaxis, :], 2 * y[np.newaxis, :]])
plt.figure()
librosa.display.waveplot(ys, sr=sr)
@raises(librosa.ParameterError)
def test_unknown_wavaxis():
plt.figure()
librosa.display.waveplot(y, sr=sr, x_axis='something not in the axis map')
@raises(librosa.ParameterError)
def test_waveplot_bad_maxsr():
plt.figure()
librosa.display.waveplot(y, sr=sr, max_sr=0)
@raises(librosa.ParameterError)
def test_waveplot_bad_maxploints():
plt.figure()
librosa.display.waveplot(y, sr=sr, max_points=0)
def test_unknown_axis():
@raises(librosa.ParameterError)
def __test(axis):
kwargs = dict()
kwargs.setdefault(axis, 'something not in the axis map')
plt.figure()
librosa.display.specshow(S_abs, **kwargs)
yield __test, 'x_axis'
yield __test, 'y_axis'
def test_cmap_robust():
def __test(data):
cmap1 = librosa.display.cmap(data, robust=False)
cmap2 = librosa.display.cmap(data, robust=True)
assert type(cmap1) is type(cmap2)
if isinstance(cmap1, matplotlib.colors.ListedColormap):
assert np.allclose(cmap1.colors, cmap2.colors)
elif isinstance(cmap1, matplotlib.colors.LinearSegmentedColormap):
eq_(cmap1.name, cmap2.name)
else:
eq_(cmap1, cmap2)
# Inputs here are constructed to not need robust sign estimation
for D in [1.0 + S_abs, -(1.0 + S_abs), S_signed, S_bin]:
yield __test, D
@image_comparison(baseline_images=['coords'], extensions=['png'])
def test_coords():
plt.figure()
librosa.display.specshow(Csync, x_coords=beat_t, x_axis='time', y_axis='cqt_note')
@raises(librosa.ParameterError)
def test_bad_coords():
librosa.display.specshow(S_abs, x_coords=np.arange(S.shape[1] // 2))
| isc |
jstoxrocky/statsmodels | statsmodels/regression/tests/test_regression.py | 6 | 37622 | """
Test functions for models.regression
"""
# TODO: Test for LM
from statsmodels.compat.python import long, lrange
import warnings
import pandas
import numpy as np
from numpy.testing import (assert_almost_equal, assert_approx_equal,
assert_raises, assert_equal, assert_allclose)
from scipy.linalg import toeplitz
from statsmodels.tools.tools import add_constant, categorical
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.regression.linear_model import OLS, WLS, GLS, yule_walker
from statsmodels.datasets import longley
from scipy.stats import t as student_t
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_7 = 7
DECIMAL_0 = 0
class CheckRegressionResults(object):
"""
res2 contains results from Rmodelwrap or were obtained from a statistical
packages such as R, Stata, or SAS and were written to model_results
"""
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_standarderrors = DECIMAL_4
def test_standarderrors(self):
assert_almost_equal(self.res1.bse,self.res2.bse,
self.decimal_standarderrors)
decimal_confidenceintervals = DECIMAL_4
def test_confidenceintervals(self):
#NOTE: stata rounds residuals (at least) to sig digits so approx_equal
conf1 = self.res1.conf_int()
conf2 = self.res2.conf_int()
for i in range(len(conf1)):
assert_approx_equal(conf1[i][0], conf2[i][0],
self.decimal_confidenceintervals)
assert_approx_equal(conf1[i][1], conf2[i][1],
self.decimal_confidenceintervals)
decimal_conf_int_subset = DECIMAL_4
def test_conf_int_subset(self):
if len(self.res1.params) > 1:
ci1 = self.res1.conf_int(cols=(1,2))
ci2 = self.res1.conf_int()[1:3]
assert_almost_equal(ci1, ci2, self.decimal_conf_int_subset)
else:
pass
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_rsquared = DECIMAL_4
def test_rsquared(self):
assert_almost_equal(self.res1.rsquared, self.res2.rsquared,
self.decimal_rsquared)
decimal_rsquared_adj = DECIMAL_4
def test_rsquared_adj(self):
assert_almost_equal(self.res1.rsquared_adj, self.res2.rsquared_adj,
self.decimal_rsquared_adj)
def test_degrees(self):
assert_equal(self.res1.model.df_model, self.res2.df_model)
assert_equal(self.res1.model.df_resid, self.res2.df_resid)
decimal_ess = DECIMAL_4
def test_ess(self):
#Explained Sum of Squares
assert_almost_equal(self.res1.ess, self.res2.ess,
self.decimal_ess)
decimal_ssr = DECIMAL_4
def test_sumof_squaredresids(self):
assert_almost_equal(self.res1.ssr, self.res2.ssr, self.decimal_ssr)
decimal_mse_resid = DECIMAL_4
def test_mse_resid(self):
#Mean squared error of residuals
assert_almost_equal(self.res1.mse_model, self.res2.mse_model,
self.decimal_mse_resid)
decimal_mse_model = DECIMAL_4
def test_mse_model(self):
assert_almost_equal(self.res1.mse_resid, self.res2.mse_resid,
self.decimal_mse_model)
decimal_mse_total = DECIMAL_4
def test_mse_total(self):
assert_almost_equal(self.res1.mse_total, self.res2.mse_total,
self.decimal_mse_total, err_msg="Test class %s" % self)
decimal_fvalue = DECIMAL_4
def test_fvalue(self):
#didn't change this, not sure it should complain -inf not equal -inf
#if not (np.isinf(self.res1.fvalue) and np.isinf(self.res2.fvalue)):
assert_almost_equal(self.res1.fvalue, self.res2.fvalue,
self.decimal_fvalue)
decimal_loglike = DECIMAL_4
def test_loglike(self):
assert_almost_equal(self.res1.llf, self.res2.llf, self.decimal_loglike)
decimal_aic = DECIMAL_4
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, self.decimal_aic)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, self.decimal_bic)
decimal_pvalues = DECIMAL_4
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues,
self.decimal_pvalues)
decimal_wresid = DECIMAL_4
def test_wresid(self):
assert_almost_equal(self.res1.wresid, self.res2.wresid,
self.decimal_wresid)
decimal_resids = DECIMAL_4
def test_resids(self):
assert_almost_equal(self.res1.resid, self.res2.resid,
self.decimal_resids)
decimal_norm_resids = DECIMAL_4
def test_norm_resids(self):
assert_almost_equal(self.res1.resid_pearson, self.res2.resid_pearson,
self.decimal_norm_resids)
#TODO: test fittedvalues and what else?
class TestOLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
from .results.results_regression import Longley
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
res2 = Longley()
res2.wresid = res1.wresid # workaround hack
cls.res1 = res1
cls.res2 = res2
res_qr = OLS(data.endog, data.exog).fit(method="qr")
model_qr = OLS(data.endog, data.exog)
Q, R = np.linalg.qr(data.exog)
model_qr.exog_Q, model_qr.exog_R = Q, R
model_qr.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
model_qr.rank = np_matrix_rank(R)
res_qr2 = model_qr.fit(method="qr")
cls.res_qr = res_qr
cls.res_qr_manual = res_qr2
def test_eigenvalues(self):
eigenval_perc_diff = (self.res_qr.eigenvals - self.res_qr_manual.eigenvals)
eigenval_perc_diff /= self.res_qr.eigenvals
zeros = np.zeros_like(eigenval_perc_diff)
assert_almost_equal(eigenval_perc_diff, zeros, DECIMAL_7)
# Robust error tests. Compare values computed with SAS
def test_HC0_errors(self):
#They are split up because the copied results do not have any DECIMAL_4
#places for the last place.
assert_almost_equal(self.res1.HC0_se[:-1],
self.res2.HC0_se[:-1], DECIMAL_4)
assert_approx_equal(np.round(self.res1.HC0_se[-1]), self.res2.HC0_se[-1])
def test_HC1_errors(self):
assert_almost_equal(self.res1.HC1_se[:-1],
self.res2.HC1_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC1_se[-1], self.res2.HC1_se[-1])
def test_HC2_errors(self):
assert_almost_equal(self.res1.HC2_se[:-1],
self.res2.HC2_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC2_se[-1], self.res2.HC2_se[-1])
def test_HC3_errors(self):
assert_almost_equal(self.res1.HC3_se[:-1],
self.res2.HC3_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC3_se[-1], self.res2.HC3_se[-1])
def test_qr_params(self):
assert_almost_equal(self.res1.params,
self.res_qr.params, 6)
def test_qr_normalized_cov_params(self):
#todo: need assert_close
assert_almost_equal(np.ones_like(self.res1.normalized_cov_params),
self.res1.normalized_cov_params /
self.res_qr.normalized_cov_params, 5)
def test_missing(self):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
data.endog[[3, 7, 14]] = np.nan
mod = OLS(data.endog, data.exog, missing='drop')
assert_equal(mod.endog.shape[0], 13)
assert_equal(mod.exog.shape[0], 13)
def test_rsquared_adj_overfit(self):
# Test that if df_resid = 0, rsquared_adj = 0.
# This is a regression test for user issue:
# https://github.com/statsmodels/statsmodels/issues/868
with warnings.catch_warnings(record=True):
x = np.random.randn(5)
y = np.random.randn(5, 6)
results = OLS(x, y).fit()
rsquared_adj = results.rsquared_adj
assert_equal(rsquared_adj, np.nan)
def test_qr_alternatives(self):
assert_allclose(self.res_qr.params, self.res_qr_manual.params,
rtol=5e-12)
def test_norm_resid(self):
resid = self.res1.wresid
norm_resid = resid / np.sqrt(np.sum(resid**2.0) / self.res1.df_resid)
model_norm_resid = self.res1.resid_pearson
assert_almost_equal(model_norm_resid, norm_resid, DECIMAL_7)
def test_norm_resid_zero_variance(self):
with warnings.catch_warnings(record=True):
y = self.res1.model.endog
res = OLS(y,y).fit()
assert_allclose(res.scale, 0, atol=1e-20)
assert_allclose(res.wresid, res.resid_pearson, atol=5e-11)
class TestRTO(CheckRegressionResults):
@classmethod
def setupClass(cls):
from .results.results_regression import LongleyRTO
data = longley.load()
res1 = OLS(data.endog, data.exog).fit()
res2 = LongleyRTO()
res2.wresid = res1.wresid # workaround hack
cls.res1 = res1
cls.res2 = res2
res_qr = OLS(data.endog, data.exog).fit(method="qr")
cls.res_qr = res_qr
class TestFtest(object):
"""
Tests f_test vs. RegressionResults
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
R = np.identity(7)[:-1,:]
cls.Ftest = cls.res1.f_test(R)
def test_F(self):
assert_almost_equal(self.Ftest.fvalue, self.res1.fvalue, DECIMAL_4)
def test_p(self):
assert_almost_equal(self.Ftest.pvalue, self.res1.f_pvalue, DECIMAL_4)
def test_Df_denom(self):
assert_equal(self.Ftest.df_denom, self.res1.model.df_resid)
def test_Df_num(self):
assert_equal(self.Ftest.df_num, 6)
class TestFTest2(object):
"""
A joint test that the coefficient on
GNP = the coefficient on UNEMP and that the coefficient on
POP = the coefficient on YEAR for the Longley dataset.
Ftest1 is from statsmodels. Results are from Rpy using R's car library.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
R2 = [[0,1,-1,0,0,0,0],[0, 0, 0, 0, 1, -1, 0]]
cls.Ftest1 = res1.f_test(R2)
hyp = 'x2 = x3, x5 = x6'
cls.NewFtest1 = res1.f_test(hyp)
def test_new_ftest(self):
assert_equal(self.NewFtest1.fvalue, self.Ftest1.fvalue)
def test_fvalue(self):
assert_almost_equal(self.Ftest1.fvalue, 9.7404618732968196, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ftest1.pvalue, 0.0056052885317493459,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ftest1.df_denom, 9)
def test_df_num(self):
assert_equal(self.Ftest1.df_num, 2)
class TestFtestQ(object):
"""
A joint hypothesis test that Rb = q. Coefficient tests are essentially
made up. Test values taken from Stata.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
R = np.array([[0,1,1,0,0,0,0],
[0,1,0,1,0,0,0],
[0,1,0,0,0,0,0],
[0,0,0,0,1,0,0],
[0,0,0,0,0,1,0]])
q = np.array([0,0,0,1,0])
cls.Ftest1 = res1.f_test((R,q))
def test_fvalue(self):
assert_almost_equal(self.Ftest1.fvalue, 70.115557, 5)
def test_pvalue(self):
assert_almost_equal(self.Ftest1.pvalue, 6.229e-07, 10)
def test_df_denom(self):
assert_equal(self.Ftest1.df_denom, 9)
def test_df_num(self):
assert_equal(self.Ftest1.df_num, 5)
class TestTtest(object):
"""
Test individual t-tests. Ie., are the coefficients significantly
different than zero.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
R = np.identity(7)
cls.Ttest = cls.res1.t_test(R)
hyp = 'x1 = 0, x2 = 0, x3 = 0, x4 = 0, x5 = 0, x6 = 0, const = 0'
cls.NewTTest = cls.res1.t_test(hyp)
def test_new_tvalue(self):
assert_equal(self.NewTTest.tvalue, self.Ttest.tvalue)
def test_tvalue(self):
assert_almost_equal(self.Ttest.tvalue, self.res1.tvalues, DECIMAL_4)
def test_sd(self):
assert_almost_equal(self.Ttest.sd, self.res1.bse, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ttest.pvalue, student_t.sf(
np.abs(self.res1.tvalues), self.res1.model.df_resid)*2,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ttest.df_denom, self.res1.model.df_resid)
def test_effect(self):
assert_almost_equal(self.Ttest.effect, self.res1.params)
class TestTtest2(object):
"""
Tests the hypothesis that the coefficients on POP and YEAR
are equal.
Results from RPy using 'car' package.
"""
@classmethod
def setupClass(cls):
R = np.zeros(7)
R[4:6] = [1,-1]
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
cls.Ttest1 = res1.t_test(R)
def test_tvalue(self):
assert_almost_equal(self.Ttest1.tvalue, -4.0167754636397284,
DECIMAL_4)
def test_sd(self):
assert_almost_equal(self.Ttest1.sd, 455.39079425195314, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ttest1.pvalue, 2*0.0015163772380932246,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ttest1.df_denom, 9)
def test_effect(self):
assert_almost_equal(self.Ttest1.effect, -1829.2025687186533, DECIMAL_4)
class TestGLS(object):
"""
These test results were obtained by replication with R.
"""
@classmethod
def setupClass(cls):
from .results.results_regression import LongleyGls
data = longley.load()
exog = add_constant(np.column_stack((data.exog[:,1],
data.exog[:,4])), prepend=False)
tmp_results = OLS(data.endog, exog).fit()
rho = np.corrcoef(tmp_results.resid[1:],
tmp_results.resid[:-1])[0][1] # by assumption
order = toeplitz(np.arange(16))
sigma = rho**order
GLS_results = GLS(data.endog, exog, sigma=sigma).fit()
cls.res1 = GLS_results
cls.res2 = LongleyGls()
# attach for test_missing
cls.sigma = sigma
cls.exog = exog
cls.endog = data.endog
def test_aic(self):
assert_approx_equal(self.res1.aic+2, self.res2.aic, 3)
def test_bic(self):
assert_approx_equal(self.res1.bic, self.res2.bic, 2)
def test_loglike(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_0)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_1)
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid, DECIMAL_4)
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale, DECIMAL_4)
def test_tvalues(self):
assert_almost_equal(self.res1.tvalues, self.res2.tvalues, DECIMAL_4)
def test_standarderrors(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
DECIMAL_4)
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
def test_missing(self):
endog = self.endog.copy() # copy or changes endog for other methods
endog[[4,7,14]] = np.nan
mod = GLS(endog, self.exog, sigma=self.sigma, missing='drop')
assert_equal(mod.endog.shape[0], 13)
assert_equal(mod.exog.shape[0], 13)
assert_equal(mod.sigma.shape, (13,13))
class TestGLS_alt_sigma(CheckRegressionResults):
"""
Test that GLS with no argument is equivalent to OLS.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
ols_res = OLS(data.endog, data.exog).fit()
gls_res = GLS(data.endog, data.exog).fit()
gls_res_scalar = GLS(data.endog, data.exog, sigma=1)
cls.endog = data.endog
cls.exog = data.exog
cls.res1 = gls_res
cls.res2 = ols_res
cls.res3 = gls_res_scalar
# self.res2.conf_int = self.res2.conf_int()
def test_wrong_size_sigma_1d(self):
n = len(self.endog)
assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones(n-1))
def test_wrong_size_sigma_2d(self):
n = len(self.endog)
assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones((n-1,n-1)))
# def check_confidenceintervals(self, conf1, conf2):
# assert_almost_equal(conf1, conf2, DECIMAL_4)
class TestLM(object):
@classmethod
def setupClass(cls):
# TODO: Test HAC method
X = np.random.randn(100,3)
b = np.ones((3,1))
e = np.random.randn(100,1)
y = np.dot(X,b) + e
# Cases?
# Homoskedastic
# HC0
cls.res1_full = OLS(y,X).fit()
cls.res1_restricted = OLS(y,X[:,0]).fit()
cls.res2_full = cls.res1_full.get_robustcov_results('HC0')
cls.res2_restricted = cls.res1_restricted.get_robustcov_results('HC0')
cls.X = X
cls.Y = y
def test_LM_homoskedastic(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
S = np.dot(resid,resid) / n * np.dot(X.T,X) / n
Sinv = np.linalg.inv(S)
s = np.mean(X * resid[:,None], 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res1_full.compare_lm_test(self.res1_restricted)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_nodemean(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
S = np.dot(scores.T,scores) / n
Sinv = np.linalg.inv(S)
s = np.mean(scores, 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, demean=False)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_demean(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
scores_demean = scores - scores.mean(0)
S = np.dot(scores_demean.T,scores_demean) / n
Sinv = np.linalg.inv(S)
s = np.mean(scores, 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_LRversion(self):
resid = self.res1_restricted.wresid
resid_full = self.res1_full.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
s = np.mean(scores, 0)
scores = X * resid_full[:,None]
S = np.dot(scores.T,scores) / n
Sinv = np.linalg.inv(S)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, use_lr = True)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_nonnested(self):
assert_raises(ValueError, self.res2_restricted.compare_lm_test, self.res2_full)
class TestOLS_GLS_WLS_equivalence(object):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
y = data.endog
X = data.exog
n = y.shape[0]
w = np.ones(n)
cls.results = []
cls.results.append(OLS(y, X).fit())
cls.results.append(WLS(y, X, w).fit())
cls.results.append(GLS(y, X, 100*w).fit())
cls.results.append(GLS(y, X, np.diag(0.1*w)).fit())
def test_ll(self):
llf = np.array([r.llf for r in self.results])
llf_1 = np.ones_like(llf) * self.results[0].llf
assert_almost_equal(llf, llf_1, DECIMAL_7)
ic = np.array([r.aic for r in self.results])
ic_1 = np.ones_like(ic) * self.results[0].aic
assert_almost_equal(ic, ic_1, DECIMAL_7)
ic = np.array([r.bic for r in self.results])
ic_1 = np.ones_like(ic) * self.results[0].bic
assert_almost_equal(ic, ic_1, DECIMAL_7)
def test_params(self):
params = np.array([r.params for r in self.results])
params_1 = np.array([self.results[0].params] * len(self.results))
assert_allclose(params, params_1)
def test_ss(self):
bse = np.array([r.bse for r in self.results])
bse_1 = np.array([self.results[0].bse] * len(self.results))
assert_allclose(bse, bse_1)
def test_rsquared(self):
rsquared = np.array([r.rsquared for r in self.results])
rsquared_1 = np.array([self.results[0].rsquared] * len(self.results))
assert_almost_equal(rsquared, rsquared_1, DECIMAL_7)
class TestGLS_WLS_equivalence(TestOLS_GLS_WLS_equivalence):
# reuse test methods
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
y = data.endog
X = data.exog
n = y.shape[0]
np.random.seed(5)
w = np.random.uniform(0.5, 1, n)
w_inv = 1. / w
cls.results = []
cls.results.append(WLS(y, X, w).fit())
cls.results.append(WLS(y, X, 0.01 * w).fit())
cls.results.append(GLS(y, X, 100 * w_inv).fit())
cls.results.append(GLS(y, X, np.diag(0.1 * w_inv)).fit())
def test_rsquared(self):
# TODO: WLS rsquared is ok, GLS might have wrong centered_tss
# We only check that WLS and GLS rsquared is invariant to scaling
# WLS and GLS have different rsquared
assert_almost_equal(self.results[1].rsquared, self.results[0].rsquared,
DECIMAL_7)
assert_almost_equal(self.results[3].rsquared, self.results[2].rsquared,
DECIMAL_7)
class TestNonFit(object):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.endog = data.endog
cls.exog = data.exog
cls.ols_model = OLS(data.endog, data.exog)
def test_df_resid(self):
df_resid = self.endog.shape[0] - self.exog.shape[1]
assert_equal(self.ols_model.df_resid, long(9))
class TestWLS_CornerCases(object):
@classmethod
def setupClass(cls):
cls.exog = np.ones((1,))
cls.endog = np.ones((1,))
weights = 1
cls.wls_res = WLS(cls.endog, cls.exog, weights=weights).fit()
def test_wrong_size_weights(self):
weights = np.ones((10,10))
assert_raises(ValueError, WLS, self.endog, self.exog, weights=weights)
class TestWLSExogWeights(CheckRegressionResults):
#Test WLS with Greene's credit card data
#reg avgexp age income incomesq ownrent [aw=1/incomesq]
def __init__(self):
from .results.results_regression import CCardWLS
from statsmodels.datasets.ccard import load
dta = load()
dta.exog = add_constant(dta.exog, prepend=False)
nobs = 72.
weights = 1/dta.exog[:,2]
# for comparison with stata analytic weights
scaled_weights = ((weights * nobs)/weights.sum())
self.res1 = WLS(dta.endog, dta.exog, weights=scaled_weights).fit()
self.res2 = CCardWLS()
self.res2.wresid = scaled_weights ** .5 * self.res2.resid
# correction because we use different definition for loglike/llf
corr_ic = 2 * (self.res1.llf - self.res2.llf)
self.res2.aic -= corr_ic
self.res2.bic -= corr_ic
self.res2.llf += 0.5 * np.sum(np.log(self.res1.model.weights))
def test_wls_example():
#example from the docstring, there was a note about a bug, should
#be fixed now
Y = [1,3,4,5,2,3,4]
X = lrange(1,8)
X = add_constant(X, prepend=False)
wls_model = WLS(Y,X, weights=lrange(1,8)).fit()
#taken from R lm.summary
assert_almost_equal(wls_model.fvalue, 0.127337843215, 6)
assert_almost_equal(wls_model.scale, 2.44608530786**2, 6)
def test_wls_tss():
y = np.array([22, 22, 22, 23, 23, 23])
X = [[1, 0], [1, 0], [1, 1], [0, 1], [0, 1], [0, 1]]
ols_mod = OLS(y, add_constant(X, prepend=False)).fit()
yw = np.array([22, 22, 23.])
Xw = [[1,0],[1,1],[0,1]]
w = np.array([2, 1, 3.])
wls_mod = WLS(yw, add_constant(Xw, prepend=False), weights=w).fit()
assert_equal(ols_mod.centered_tss, wls_mod.centered_tss)
class TestWLSScalarVsArray(CheckRegressionResults):
@classmethod
def setupClass(cls):
from statsmodels.datasets.longley import load
dta = load()
dta.exog = add_constant(dta.exog, prepend=True)
wls_scalar = WLS(dta.endog, dta.exog, weights=1./3).fit()
weights = [1/3.] * len(dta.endog)
wls_array = WLS(dta.endog, dta.exog, weights=weights).fit()
cls.res1 = wls_scalar
cls.res2 = wls_array
#class TestWLS_GLS(CheckRegressionResults):
# @classmethod
# def setupClass(cls):
# from statsmodels.datasets.ccard import load
# data = load()
# cls.res1 = WLS(data.endog, data.exog, weights = 1/data.exog[:,2]).fit()
# cls.res2 = GLS(data.endog, data.exog, sigma = data.exog[:,2]).fit()
#
# def check_confidenceintervals(self, conf1, conf2):
# assert_almost_equal(conf1, conf2(), DECIMAL_4)
def test_wls_missing():
from statsmodels.datasets.ccard import load
data = load()
endog = data.endog
endog[[10, 25]] = np.nan
mod = WLS(data.endog, data.exog, weights = 1/data.exog[:,2], missing='drop')
assert_equal(mod.endog.shape[0], 70)
assert_equal(mod.exog.shape[0], 70)
assert_equal(mod.weights.shape[0], 70)
class TestWLS_OLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
cls.res2 = WLS(data.endog, data.exog).fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
class TestGLS_OLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = GLS(data.endog, data.exog).fit()
cls.res2 = OLS(data.endog, data.exog).fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
#TODO: test AR
# why the two-stage in AR?
#class test_ar(object):
# from statsmodels.datasets.sunspots import load
# data = load()
# model = AR(data.endog, rho=4).fit()
# R_res = RModel(data.endog, aic="FALSE", order_max=4)
# def test_params(self):
# assert_almost_equal(self.model.rho,
# pass
# def test_order(self):
# In R this can be defined or chosen by minimizing the AIC if aic=True
# pass
class TestYuleWalker(object):
@classmethod
def setupClass(cls):
from statsmodels.datasets.sunspots import load
data = load()
cls.rho, cls.sigma = yule_walker(data.endog, order=4,
method="mle")
cls.R_params = [1.2831003105694765, -0.45240924374091945,
-0.20770298557575195, 0.047943648089542337]
def test_params(self):
assert_almost_equal(self.rho, self.R_params, DECIMAL_4)
class TestDataDimensions(CheckRegressionResults):
@classmethod
def setupClass(cls):
np.random.seed(54321)
cls.endog_n_ = np.random.uniform(0,20,size=30)
cls.endog_n_one = cls.endog_n_[:,None]
cls.exog_n_ = np.random.uniform(0,20,size=30)
cls.exog_n_one = cls.exog_n_[:,None]
cls.degen_exog = cls.exog_n_one[:-1]
cls.mod1 = OLS(cls.endog_n_one, cls.exog_n_one)
cls.mod1.df_model += 1
cls.res1 = cls.mod1.fit()
# Note that these are created for every subclass..
# A little extra overhead probably
cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_one)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
class TestGLS_large_data(TestDataDimensions):
@classmethod
def setupClass(cls):
nobs = 1000
y = np.random.randn(nobs,1)
X = np.random.randn(nobs,20)
sigma = np.ones_like(y)
cls.gls_res = GLS(y, X, sigma=sigma).fit()
cls.gls_res_scalar = GLS(y, X, sigma=1).fit()
cls.gls_res_none= GLS(y, X).fit()
cls.ols_res = OLS(y, X).fit()
def test_large_equal_params(self):
assert_almost_equal(self.ols_res.params, self.gls_res.params, DECIMAL_7)
def test_large_equal_loglike(self):
assert_almost_equal(self.ols_res.llf, self.gls_res.llf, DECIMAL_7)
def test_large_equal_params_none(self):
assert_almost_equal(self.gls_res.params, self.gls_res_none.params,
DECIMAL_7)
class TestNxNx(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxNx, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_, cls.exog_n_)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
class TestNxOneNx(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxOneNx, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
class TestNxNxOne(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxNxOne, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_, cls.exog_n_one)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
def test_bad_size():
np.random.seed(54321)
data = np.random.uniform(0,20,31)
assert_raises(ValueError, OLS, data, data[1:])
def test_const_indicator():
np.random.seed(12345)
X = np.random.randint(0, 3, size=30)
X = categorical(X, drop=True)
y = np.dot(X, [1., 2., 3.]) + np.random.normal(size=30)
modc = OLS(y, add_constant(X[:,1:], prepend=True)).fit()
mod = OLS(y, X, hasconst=True).fit()
assert_almost_equal(modc.rsquared, mod.rsquared, 12)
def test_706():
# make sure one regressor pandas Series gets passed to DataFrame
# for conf_int.
y = pandas.Series(np.random.randn(10))
x = pandas.Series(np.ones(10))
res = OLS(y,x).fit()
conf_int = res.conf_int()
np.testing.assert_equal(conf_int.shape, (1, 2))
np.testing.assert_(isinstance(conf_int, pandas.DataFrame))
def test_summary():
# test 734
import re
dta = longley.load_pandas()
X = dta.exog
X["constant"] = 1
y = dta.endog
with warnings.catch_warnings(record=True):
res = OLS(y, X).fit()
table = res.summary().as_latex()
# replace the date and time
table = re.sub("(?<=\n\\\\textbf\{Date:\} &).+?&",
" Sun, 07 Apr 2013 &", table)
table = re.sub("(?<=\n\\\\textbf\{Time:\} &).+?&",
" 13:46:07 &", table)
expected = """\\begin{center}
\\begin{tabular}{lclc}
\\toprule
\\textbf{Dep. Variable:} & TOTEMP & \\textbf{ R-squared: } & 0.995 \\\\
\\textbf{Model:} & OLS & \\textbf{ Adj. R-squared: } & 0.992 \\\\
\\textbf{Method:} & Least Squares & \\textbf{ F-statistic: } & 330.3 \\\\
\\textbf{Date:} & Sun, 07 Apr 2013 & \\textbf{ Prob (F-statistic):} & 4.98e-10 \\\\
\\textbf{Time:} & 13:46:07 & \\textbf{ Log-Likelihood: } & -109.62 \\\\
\\textbf{No. Observations:} & 16 & \\textbf{ AIC: } & 233.2 \\\\
\\textbf{Df Residuals:} & 9 & \\textbf{ BIC: } & 238.6 \\\\
\\textbf{Df Model:} & 6 & \\textbf{ } & \\\\
\\bottomrule
\\end{tabular}
\\begin{tabular}{lccccc}
& \\textbf{coef} & \\textbf{std err} & \\textbf{t} & \\textbf{P$>$$|$t$|$} & \\textbf{[95.0\\% Conf. Int.]} \\\\
\\midrule
\\textbf{GNPDEFL} & 15.0619 & 84.915 & 0.177 & 0.863 & -177.029 207.153 \\\\
\\textbf{GNP} & -0.0358 & 0.033 & -1.070 & 0.313 & -0.112 0.040 \\\\
\\textbf{UNEMP} & -2.0202 & 0.488 & -4.136 & 0.003 & -3.125 -0.915 \\\\
\\textbf{ARMED} & -1.0332 & 0.214 & -4.822 & 0.001 & -1.518 -0.549 \\\\
\\textbf{POP} & -0.0511 & 0.226 & -0.226 & 0.826 & -0.563 0.460 \\\\
\\textbf{YEAR} & 1829.1515 & 455.478 & 4.016 & 0.003 & 798.788 2859.515 \\\\
\\textbf{constant} & -3.482e+06 & 8.9e+05 & -3.911 & 0.004 & -5.5e+06 -1.47e+06 \\\\
\\bottomrule
\\end{tabular}
\\begin{tabular}{lclc}
\\textbf{Omnibus:} & 0.749 & \\textbf{ Durbin-Watson: } & 2.559 \\\\
\\textbf{Prob(Omnibus):} & 0.688 & \\textbf{ Jarque-Bera (JB): } & 0.684 \\\\
\\textbf{Skew:} & 0.420 & \\textbf{ Prob(JB): } & 0.710 \\\\
\\textbf{Kurtosis:} & 2.434 & \\textbf{ Cond. No. } & 4.86e+09 \\\\
\\bottomrule
\\end{tabular}
%\\caption{OLS Regression Results}
\\end{center}"""
assert_equal(table, expected)
class TestRegularizedFit(object):
# Make sure there are no issues when there are no selected
# variables.
def test_empty_model(self):
np.random.seed(742)
n = 100
endog = np.random.normal(size=n)
exog = np.random.normal(size=(n, 3))
model = OLS(endog, exog)
result = model.fit_regularized(alpha=1000)
assert_equal(result.params, 0.)
assert_equal(result.bse, 0.)
def test_regularized(self):
import os
from . import glmnet_r_results
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.loadtxt(os.path.join(cur_dir, "results", "lasso_data.csv"),
delimiter=",")
tests = [x for x in dir(glmnet_r_results) if x.startswith("rslt_")]
for test in tests:
vec = getattr(glmnet_r_results, test)
n = vec[0]
p = vec[1]
L1_wt = float(vec[2])
lam = float(vec[3])
params = vec[4:].astype(np.float64)
endog = data[0:n, 0]
exog = data[0:n, 1:(p+1)]
endog = endog - endog.mean()
endog /= endog.std(ddof=1)
exog = exog - exog.mean(0)
exog /= exog.std(0, ddof=1)
mod = OLS(endog, exog)
rslt = mod.fit_regularized(L1_wt=L1_wt, alpha=lam)
assert_almost_equal(rslt.params, params, decimal=3)
# Smoke test for summary
smry = rslt.summary()
def test_formula_missing_cat():
# gh-805
import statsmodels.api as sm
from statsmodels.formula.api import ols
from patsy import PatsyError
dta = sm.datasets.grunfeld.load_pandas().data
dta.ix[0, 'firm'] = np.nan
mod = ols(formula='value ~ invest + capital + firm + year',
data=dta.dropna())
res = mod.fit()
mod2 = ols(formula='value ~ invest + capital + firm + year',
data=dta)
res2 = mod2.fit()
assert_almost_equal(res.params.values, res2.params.values)
assert_raises(PatsyError, ols, 'value ~ invest + capital + firm + year',
data=dta, missing='raise')
def test_missing_formula_predict():
# see 2171
nsample = 30
data = pandas.DataFrame({'x': np.linspace(0, 10, nsample)})
null = pandas.DataFrame({'x': np.array([np.nan])})
data = pandas.concat([data, null])
beta = np.array([1, 0.1])
e = np.random.normal(size=nsample+1)
data['y'] = beta[0] + beta[1] * data['x'] + e
model = OLS.from_formula('y ~ x', data=data)
fit = model.fit()
pred = fit.predict(exog=data[:-1])
if __name__=="__main__":
import nose
# run_module_suite()
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
# nose.runmodule(argv=[__file__,'-vvs','-x'], exit=False) #, '--pdb'
| bsd-3-clause |
google/rysim | python/results_analyzer/Main.py | 1 | 119456 | # Copyright 2014 The RySim Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
from array import *
import collections
import gflags
import numpy
import os
import pprint
import re
import scipy.integrate
import scipy.interpolate
import sqlite3
import sys
from matplotlib import pylab
import pandas as pd
import statsmodels.formula.api as sm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as pyplot
# Global state
experiment_db = None
event_count_buckets = [5000, 10000, 20000, 40000, 50000]
bucketing_factor = 0.001
kernel_results_table = None
kernel_machine_results_table = None
kernel_machine_type_results_table = None
fit_comparison_table = dict()
# gflag defn's and registration
FLAGS = gflags.FLAGS
gflags.DEFINE_string('root_dir', '.',
'Root directory to start searching and where to store the database. Defaults to the current '
'directory')
gflags.DEFINE_string('output_db', 'experiment.db',
'Name of the database file that should be created. If the file already exists it will be '
'overwritten. Defaults to "experiment.db"')
gflags.DEFINE_bool('read_inputs', False,
'Controls if the application should re-read the inputs. If so the output DB will be clobbered '
'entirely. If not only the analysis tables will be removed')
class DBWrapper(object):
def __init__(self, db_filename):
self.db = sqlite3.connect(db_filename, check_same_thread=False)
def commit(self):
self.db.commit()
def execute_safe(self, cmd):
self.execute(cmd)
self.commit()
def execute(self, cmd):
self.db.execute(cmd)
def select(self, cmd):
return self.db.execute(cmd)
def cleanup(self):
self.db.commit()
self.db.close()
self.db = None
class ResultsTable(object):
filtered_table_entry = collections.namedtuple('FilteredTableEntry', ['event_count', 'event_count_std',
'agents', 'agents_std',
'connections', 'connections_std',
'cpu', 'cpu_std',
'maxmem', 'maxmem_std'])
filtered_entry = collections.namedtuple('FilteredEntry', ['mean', 'std'])
def __init__(self):
self.raw_table = dict()
self.filtered_table = dict()
def get_keys(self):
return self.filtered_table.keys()
def add_entry(self, key, bucket, model, event_count, agents, connections, cpu, maxmem):
if key not in self.raw_table.keys():
self.raw_table[key] = dict()
if model not in self.raw_table[key].keys():
self.raw_table[key][model] = dict()
if agents not in self.raw_table[key][model].keys():
self.raw_table[key][model][agents] = dict()
if connections not in self.raw_table[key][model][agents].keys():
self.raw_table[key][model][agents][connections] = dict()
if bucket not in self.raw_table[key][model][agents][connections].keys():
self.raw_table[key][model][agents][connections][bucket] = dict()
self.raw_table[key][model][agents][connections][bucket]["cpu"] = list()
self.raw_table[key][model][agents][connections][bucket]["maxmem"] = list()
self.raw_table[key][model][agents][connections][bucket]["event_count"] = list()
self.raw_table[key][model][agents][connections][bucket]["cpu"].append(cpu)
self.raw_table[key][model][agents][connections][bucket]["maxmem"].append(maxmem)
self.raw_table[key][model][agents][connections][bucket]["event_count"].append(event_count)
def create_filtered_table(self):
self.filtered_table = dict()
for key in self.raw_table.keys():
self.filtered_table[key] = list()
for model in self.raw_table[key].keys():
for agents in self.raw_table[key][model].keys():
for connections in self.raw_table[key][model][agents].keys():
for bucket in self.raw_table[key][model][agents][connections].keys():
if len(self.raw_table[key][model][agents][connections][bucket]["event_count"]) is 0:
continue
event_count = ResultsTable.filter_bucket_entry(
self.raw_table[key][model][agents][connections][bucket]["event_count"])
cpu = ResultsTable.filter_bucket_entry(
self.raw_table[key][model][agents][connections][bucket]["cpu"])
maxmem = ResultsTable.filter_bucket_entry(
self.raw_table[key][model][agents][connections][bucket]["maxmem"])
self.filtered_table[key].append(ResultsTable.filtered_table_entry(
event_count=event_count.mean, event_count_std=event_count.std,
agents=agents, agents_std=0,
connections=connections, connections_std=0,
cpu=cpu.mean, cpu_std=cpu.std,
maxmem=maxmem.mean, maxmem_std=maxmem.std))
@staticmethod
def filter_bucket_entry(entry):
return ResultsTable.filtered_entry(mean=numpy.mean(entry), std=numpy.std(entry))
def get_entries_for_key(self, key):
return self.filtered_table[key]
def get_event_count_lists_for_key(self, key):
key_data = self.get_entries_for_key(key)
return ResultsTable.filtered_entry(mean=[row[0] for row in key_data], std=[row[1] for row in key_data])
def get_agents_lists_for_key(self, key):
key_data = self.get_entries_for_key(key)
return ResultsTable.filtered_entry(mean=[row[2] for row in key_data], std=[row[3] for row in key_data])
def get_connections_lists_for_key(self, key):
key_data = self.get_entries_for_key(key)
return ResultsTable.filtered_entry(mean=[row[4] for row in key_data], std=[row[5] for row in key_data])
def get_cpu_lists_for_key(self, key):
key_data = self.get_entries_for_key(key)
return ResultsTable.filtered_entry(mean=[row[6] for row in key_data], std=[row[7] for row in key_data])
def get_maxmem_lists_for_key(self, key):
key_data = self.get_entries_for_key(key)
return ResultsTable.filtered_entry(mean=[row[8] for row in key_data], std=[row[9] for row in key_data])
class ScoreTable(object):
def __init__(self, kernels, tag):
global fit_comparison_table
fit_comparison_table[tag] = 0.0
self.tag = tag
self.r2_values = list()
self.kernels = kernels
self.table = dict()
self.total_count = 0
self.total_score_idx = len(kernels)
for kernel in kernels:
self.table[kernel] = array('I', [0] * (1 + len(kernels)))
def get_table(self):
return self.table
def get_total_count(self):
return self.total_count
def add_1d_fit_score(self, fits):
self.total_count += 1
f_list = list()
for kernel in fits.keys():
slope = fits[kernel][0][0]
intercept = fits[kernel][1]
self.r2_values.append(float(fits[kernel][2]))
x_min = float(fits[kernel][3][0])
x_max = float(fits[kernel][4][0])
f_list.append((scipy.integrate.quad(lambda x: slope * x + intercept,
x_min, x_max)[0],
kernel[0]))
f_list.sort()
for i in range(0, len(f_list)):
self.table[f_list[i][1]][i] += 1
self.table[f_list[i][1]][self.total_score_idx] += len(f_list) - i
global fit_comparison_table
fit_comparison_table[self.tag] = numpy.mean(self.r2_values)
def add_2d_fit_score(self, fits):
self.total_count += 1
f_list = list()
for kernel in fits.keys():
slope_x = fits[kernel][0][0]
slope_y = fits[kernel][0][1]
intercept = fits[kernel][1]
self.r2_values.append(float(fits[kernel][2]))
x_min = float(fits[kernel][3][0])
x_max = float(fits[kernel][4][0])
y_min = float(fits[kernel][3][1])
y_max = float(fits[kernel][4][1])
f_list.append((scipy.integrate.dblquad(lambda x, y: slope_x * x + slope_y * y + intercept,
x_min, x_max,
lambda x: y_min, lambda x: y_max)[0],
kernel[0]))
f_list.sort()
for i in range(0, len(f_list)):
self.table[f_list[i][1]][i] += 1
self.table[f_list[i][1]][self.total_score_idx] += len(f_list) - i
global fit_comparison_table
fit_comparison_table[self.tag] = numpy.mean(self.r2_values)
def add_3d_fit_score(self, fits):
self.total_count += 1
f_list = list()
for kernel in fits.keys():
slope_x = fits[kernel][0][0]
slope_y = fits[kernel][0][1]
slope_z = fits[kernel][0][2]
intercept = fits[kernel][1]
self.r2_values.append(float(fits[kernel][2]))
x_min = float(fits[kernel][3][0])
x_max = float(fits[kernel][4][0])
y_min = float(fits[kernel][3][1])
y_max = float(fits[kernel][4][1])
z_min = float(fits[kernel][3][2])
z_max = float(fits[kernel][4][2])
f_list.append((scipy.integrate.tplquad(lambda x, y, z: slope_x * x + slope_y * y + slope_z * z + intercept,
x_min, x_max,
lambda x: y_min, lambda x: y_max,
lambda x, y: z_min, lambda x, y: z_max)[0],
kernel[0]))
f_list.sort()
for i in range(0, len(f_list)):
self.table[f_list[i][1]][i] += 1
self.table[f_list[i][1]][self.total_score_idx] += len(f_list) - i
global fit_comparison_table
fit_comparison_table[self.tag] = numpy.mean(self.r2_values)
class MachineComparisonTable(object):
machine_core_counts = {'m3.large': 2,
'm3.2xlarge': 8,
'm3.medium': 1,
'm3.xlarge': 4}
def __init__(self, kernels):
self.per_kernel_means = dict()
self.per_kernel_data = dict()
self.kernels = kernels
self.table = dict()
self.per_kernel_splines = dict()
for kernel in self.kernels:
self.per_kernel_means[kernel] = dict()
self.per_kernel_data[kernel] = dict()
self.box_props = dict(linewidth=0.5, color='DimGray', markeredgecolor='DimGray')
def add_1d_fit_score(self, fits, machine):
machine_entry = self.get_machine_entry(machine)
for kernel in fits.keys():
slope = fits[kernel][0][0]
intercept = fits[kernel][1]
x_min = float(fits[kernel][3][0])
x_max = float(fits[kernel][4][0])
machine_entry[kernel[0]].append(scipy.integrate.quad(lambda x: slope * x + intercept, x_min, x_max)[0])
def add_2d_fit_score(self, fits, machine):
machine_entry = self.get_machine_entry(machine)
for kernel in fits.keys():
slope_x = fits[kernel][0][0]
slope_y = fits[kernel][0][1]
intercept = fits[kernel][1]
x_min = float(fits[kernel][3][0])
x_max = float(fits[kernel][4][0])
y_min = float(fits[kernel][3][1])
y_max = float(fits[kernel][4][1])
machine_entry[kernel[0]].append(scipy.integrate.dblquad(lambda x, y: slope_x * x + slope_y * y + intercept,
x_min, x_max,
lambda x: y_min, lambda x: y_max)[0])
def add_3d_fit_score(self, fits, machine):
machine_entry = self.get_machine_entry(machine)
for kernel in fits.keys():
slope_x = fits[kernel][0][0]
slope_y = fits[kernel][0][1]
slope_z = fits[kernel][0][2]
intercept = fits[kernel][1]
x_min = float(fits[kernel][3][0])
x_max = float(fits[kernel][4][0])
y_min = float(fits[kernel][3][1])
y_max = float(fits[kernel][4][1])
z_min = float(fits[kernel][3][2])
z_max = float(fits[kernel][4][2])
machine_entry[kernel[0]].append(scipy.integrate.tplquad(
lambda x, y, z: slope_x * x + slope_y * y + slope_z * z + intercept,
x_min, x_max,
lambda x: y_min, lambda x: y_max,
lambda x, y: z_min, lambda x, y: z_max)[0])
def get_machine_entry(self, machine):
if machine in self.table:
return self.table[machine]
else:
self.table[machine] = dict()
for kernel in self.kernels:
self.table[machine][kernel] = list()
return self.table[machine]
def generate_per_kernel_means(self):
for machine in self.table.keys():
for kernel in self.kernels:
self.per_kernel_means[kernel][MachineComparisonTable.machine_core_counts[machine]] = \
numpy.mean(self.table[machine][kernel])
def generate_per_kernel_data(self):
for machine in self.table.keys():
for kernel in self.kernels:
self.per_kernel_data[kernel][MachineComparisonTable.machine_core_counts[machine]] = \
self.table[machine][kernel]
def generate_mean_list(self, kernel):
mean_list = list()
for cores, value in self.per_kernel_means[kernel].iteritems():
mean_list.append((cores, value))
mean_list.sort()
return mean_list
def generate_1d_plot(self, dependent_caption, dependent_filename, independent_caption, independent_filename, kernel,
key_label_filename):
data_list = self.generate_mean_list(kernel)
x_list = list()
y_list = list()
for entry in data_list:
x_list.append(entry[0])
y_list.append(entry[1])
x_data = numpy.array(x_list)
y_data = numpy.array(y_list)
x_new = numpy.linspace(x_data.min(), x_data.max(), 300)
y_new = scipy.interpolate.spline(x_data, y_data, x_new)
self.per_kernel_splines[kernel] = dict()
self.per_kernel_splines[kernel]['x_data'] = x_data
self.per_kernel_splines[kernel]['y_data'] = y_data
self.per_kernel_splines[kernel]['x_new'] = x_new
self.per_kernel_splines[kernel]['y_new'] = y_new
GenericArtifacts.set_figure_params()
filename_base = "machine_comparison_{}_vs_{}_{}_{}".format(independent_filename, dependent_filename,
str(kernel).lower(), key_label_filename)
plot_filename = os.path.join(FLAGS.root_dir, "{}_plot.eps".format(filename_base))
print "\tGenerating {}".format(plot_filename)
pylab.figure(1)
pylab.clf()
pylab.plot(x_data, y_data, linestyle='-', color='k')
pylab.scatter(x_data, y_data, marker='s', color='k', label=kernel)
pylab.autoscale()
pylab.xlabel("Number of Cores")
pylab.ylabel(dependent_caption)
pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4)
pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait')
caption = "Plot of Machine Comparison of {} for {} vs {}".format(kernel, independent_caption,
dependent_caption)
tex_filename = os.path.join(FLAGS.root_dir, "{}_plot.tex".format(filename_base))
print "\tGenerating {}".format(tex_filename)
tex_figure_path = os.path.join("figures", "auto", "{}_plot.eps".format(filename_base))
output_latex = r"""\begin{figure}
\centering
"""
output_latex += "\\includegraphics{%s}\n" % tex_figure_path
output_latex += "\\caption{%s}\n" % caption
output_latex += "\\label{fig:%s}\n" % filename_base
output_latex += r"""\end{figure}"""
with open(tex_filename, 'w') as f:
f.write(output_latex)
def generate_box_whisker_plot(self, dependent_caption, dependent_filename, independent_caption,
independent_filename, kernel, key_label_filename):
positions = self.per_kernel_data[kernel].keys()
positions.sort()
box_data = list()
for position in positions:
box_data.append(self.per_kernel_data[kernel][position])
x_data = self.per_kernel_splines[kernel]['x_data']
y_data = self.per_kernel_splines[kernel]['y_data']
GenericArtifacts.set_figure_params()
filename_base = "machine_comparison_box_{}_vs_{}_{}_{}".format(independent_filename, dependent_filename,
str(kernel).lower(), key_label_filename)
plot_filename = os.path.join(FLAGS.root_dir, "{}_bwplot.eps".format(filename_base))
print "\tGenerating {}".format(plot_filename)
pylab.figure(1)
pylab.clf()
flier_props = self.box_props.copy()
flier_props['marker'] = 's'
pylab.boxplot(x=box_data, positions=positions, boxprops=self.box_props,
whiskerprops=self.box_props, capprops=self.box_props, flierprops=flier_props,
medianprops=self.box_props, meanprops=self.box_props)
pylab.plot(x_data, y_data, linestyle='-', color='k')
pylab.scatter(x_data, y_data, marker='s', color='k', label=kernel)
pylab.autoscale()
pylab.xlabel("Number of Cores")
pylab.ylabel(dependent_caption)
pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4)
pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait')
caption = "Box \& Whisker Plot of Machine Comparison of {} for {} vs {}".format(kernel, independent_caption,
dependent_caption)
tex_filename = os.path.join(FLAGS.root_dir, "{}_bwplot.tex".format(filename_base))
print "\tGenerating {}".format(tex_filename)
tex_figure_path = os.path.join("figures", "auto", "{}_bwplot.eps".format(filename_base))
output_latex = r"""\begin{figure}
\centering
"""
output_latex += "\\includegraphics{%s}\n" % tex_figure_path
output_latex += "\\caption{%s}\n" % caption
output_latex += "\\label{fig:%s}\n" % filename_base
output_latex += r"""\end{figure}"""
with open(tex_filename, 'w') as f:
f.write(output_latex)
def generate_table(self, dependent_caption, dependent_filename, independent_caption, independent_filename, kernel,
key_label_caption, key_label_filename):
filename_base = "machine_comparison_{}_vs_{}_{}_{}".format(independent_filename, dependent_filename,
str(kernel).lower(), key_label_filename)
tex_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base))
print "\tGenerating {}".format(tex_filename)
output_latex = r"""\begin{table}[h]
\centering
"""
output_latex += r"""\begin{tabular}{|c|c|}
\hline
"""
output_latex += r"""Cores & Score \\
\hline
"""
for entry in self.generate_mean_list(kernel):
cores = entry[0]
score = entry[1]
output_latex += "%d & %.4e \\\\ \n" % (cores, score)
output_latex += r"""\hline
\end{tabular}
"""
output_latex += "\\caption{Machine Comparison of %s for %s vs %s in %s}\n" % (kernel, independent_caption,
dependent_caption,
key_label_caption)
output_latex += "\\label{tab:%s}\n" % filename_base
output_latex += r"""\end{table}"""
with open(tex_filename, 'w') as f:
f.write(output_latex)
def generate_multiline_plot(self, dependent_caption, dependent_filename, independent_caption, independent_filename,
key_label_filename):
filename_base = "machine_comparison_{}_vs_{}_{}".format(independent_filename, dependent_filename,
key_label_filename)
plot_filename = os.path.join(FLAGS.root_dir, "{}_plot.eps".format(filename_base))
print "\tGenerating {}".format(plot_filename)
pylab.figure(1)
pylab.clf()
markers = ['v', '^', 's', 'D', 'x', '*', 'h']
markers_count = 0
for kernel in self.kernels:
x_data = self.per_kernel_splines[kernel]['x_data']
y_data = self.per_kernel_splines[kernel]['y_data']
pylab.plot(x_data, y_data, linestyle='-', color='k')
pylab.scatter(x_data, y_data, marker=markers[markers_count], color='k', label=kernel)
markers_count += 1
pylab.autoscale()
pylab.xlabel("Number of Cores")
pylab.ylabel(dependent_caption)
pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4)
pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait')
caption = "Multi-line Plot of Machine Comparison for {} vs {}".format(independent_caption, dependent_caption)
tex_filename = os.path.join(FLAGS.root_dir, "{}_plot.tex".format(filename_base))
print "\tGenerating {}".format(tex_filename)
tex_figure_path = os.path.join("figures", "auto", "{}_plot.eps".format(filename_base))
output_latex = r"""\begin{figure}
\centering
"""
output_latex += "\\includegraphics{%s}\n" % tex_figure_path
output_latex += "\\caption{%s}\n" % caption
output_latex += "\\label{fig:%s}\n" % filename_base
output_latex += r"""\end{figure}"""
with open(tex_filename, 'w') as f:
f.write(output_latex)
def generate_multiline_box_whisker_plot(self, dependent_caption, dependent_filename, independent_caption,
independent_filename, key_label_filename):
filename_base = "machine_comparison_box_{}_vs_{}_{}".format(independent_filename, dependent_filename,
key_label_filename)
plot_filename = os.path.join(FLAGS.root_dir, "{}_bwplot.eps".format(filename_base))
print "\tGenerating {}".format(plot_filename)
pylab.figure(1)
pylab.clf()
markers = ['v', '^', 's', 'D', 'x', '*', 'h']
markers_count = 0
for kernel in self.kernels:
x_data = self.per_kernel_splines[kernel]['x_data']
y_data = self.per_kernel_splines[kernel]['y_data']
positions = self.per_kernel_data[kernel].keys()
positions.sort()
box_data = list()
for position in positions:
box_data.append(self.per_kernel_data[kernel][position])
flier_props = self.box_props.copy()
flier_props['marker'] = markers[markers_count]
width = 0.1 * float(markers_count + 1)
whisker_props = self.box_props.copy()
whisker_props['linestyle'] = 'none'
pylab.boxplot(x=box_data, positions=positions, widths=width, boxprops=self.box_props,
whiskerprops=whisker_props, showcaps=False, showfliers=False,
medianprops=self.box_props, meanprops=self.box_props)
pylab.plot(x_data, y_data, linestyle='-', color='k')
pylab.scatter(x_data, y_data, marker=markers[markers_count], color='k', label=kernel)
markers_count += 1
pylab.autoscale()
pylab.xlabel("Number of Cores")
pylab.ylabel(dependent_caption)
pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4)
pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait')
caption = "Multi-line Box \& Whisker Plot of Machine Comparison for {} vs {}".format(independent_caption,
dependent_caption)
tex_filename = os.path.join(FLAGS.root_dir, "{}_bwplot.tex".format(filename_base))
print "\tGenerating {}".format(tex_filename)
tex_figure_path = os.path.join("figures", "auto", "{}_bwplot.eps".format(filename_base))
output_latex = r"""\begin{figure}
\centering
"""
output_latex += "\\includegraphics{%s}\n" % tex_figure_path
output_latex += "\\caption{%s}\n" % caption
output_latex += "\\label{fig:%s}\n" % filename_base
output_latex += r"""\end{figure}"""
with open(tex_filename, 'w') as f:
f.write(output_latex)
def generate_artifacts(self, key_label_caption, key_label_filename, independent_caption, independent_filename,
dependent_caption, dependent_filename):
self.generate_per_kernel_means()
self.generate_per_kernel_data()
for kernel in self.kernels:
self.generate_1d_plot(dependent_caption, dependent_filename, independent_caption, independent_filename,
kernel, key_label_filename)
self.generate_box_whisker_plot(dependent_caption, dependent_filename, independent_caption,
independent_filename, kernel, key_label_filename)
self.generate_table(dependent_caption, dependent_filename, independent_caption, independent_filename,
kernel, key_label_caption, key_label_filename)
self.generate_multiline_plot(dependent_caption, dependent_filename, independent_caption, independent_filename,
key_label_filename)
self.generate_multiline_box_whisker_plot(dependent_caption, dependent_filename, independent_caption,
independent_filename, key_label_filename)
class GenericArtifacts:
__metaclass__ = ABCMeta
linear_regression = collections.namedtuple('LinearRegression', ['slope', 'intercept', 'r_squared', 'min', 'max'])
def __init__(self, results_table, key_label_tuple):
self.results_table = results_table
self.key_label_tuple = key_label_tuple
self.keys = self.results_table.get_keys()
self.sub_key_label_tuple = None if len(self.key_label_tuple) is 1 else self.key_label_tuple[1:]
self.sub_keys = None if not self.sub_key_label_tuple else set()
self.kernels = set()
self.cpu_ranges = dict()
self.maxmem_ranges = dict()
self.event_count_ranges = dict()
self.agents_ranges = dict()
self.connections_ranges = dict()
self.event_count_vs_cpu_fits = dict()
self.event_count_vs_maxmem_fits = dict()
self.agents_vs_cpu_fits = dict()
self.agents_vs_maxmem_fits = dict()
self.connections_vs_cpu_fits = dict()
self.connections_vs_maxmem_fits = dict()
self.event_count_and_agents_vs_cpu_fits = dict()
self.event_count_and_agents_vs_maxmem_fits = dict()
self.event_count_and_connections_vs_cpu_fits = dict()
self.event_count_and_connections_vs_maxmem_fits = dict()
self.agents_and_connections_vs_cpu_fits = dict()
self.agents_and_connections_vs_maxmem_fits = dict()
self.event_count_and_agents_and_connections_vs_cpu_fits = dict()
self.event_count_and_agents_and_connections_vs_maxmem_fits = dict()
for key in self.keys:
self.calculate_fits_for_key(key)
self.kernels.add(key[0])
if self.sub_keys is not None:
self.sub_keys.add(key[1:])
def calculate_fits_for_key(self, key):
self.cpu_ranges[key] = self.results_table.get_cpu_lists_for_key(key).mean
self.maxmem_ranges[key] = self.results_table.get_maxmem_lists_for_key(key).mean
self.event_count_ranges[key] = self.results_table.get_event_count_lists_for_key(key).mean
self.agents_ranges[key] = self.results_table.get_event_count_lists_for_key(key).mean
self.connections_ranges[key] = self.results_table.get_event_count_lists_for_key(key).mean
self.event_count_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_1d(
self.event_count_ranges[key], self.cpu_ranges[key])
self.event_count_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_1d(
self.event_count_ranges[key], self.maxmem_ranges[key])
self.agents_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_1d(
self.agents_ranges[key], self.cpu_ranges[key])
self.agents_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_1d(
self.agents_ranges[key], self.maxmem_ranges[key])
self.connections_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_1d(
self.connections_ranges[key], self.cpu_ranges[key])
self.connections_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_1d(
self.connections_ranges[key], self.maxmem_ranges[key])
self.event_count_and_agents_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_2d(
self.event_count_ranges[key], self.agents_ranges[key], self.cpu_ranges[key])
self.event_count_and_agents_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_2d(
self.event_count_ranges[key], self.agents_ranges[key], self.maxmem_ranges[key])
self.event_count_and_connections_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_2d(
self.event_count_ranges[key], self.connections_ranges[key], self.cpu_ranges[key])
self.event_count_and_connections_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_2d(
self.event_count_ranges[key], self.connections_ranges[key], self.maxmem_ranges[key])
self.agents_and_connections_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_2d(
self.agents_ranges[key], self.connections_ranges[key], self.cpu_ranges[key])
self.agents_and_connections_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_2d(
self.agents_ranges[key], self.connections_ranges[key], self.maxmem_ranges[key])
self.event_count_and_agents_and_connections_vs_cpu_fits[key] = \
GenericArtifacts.calculate_linear_regression_3d(
self.event_count_ranges[key], self.agents_ranges[key], self.connections_ranges[key],
self.cpu_ranges[key])
self.event_count_and_agents_and_connections_vs_maxmem_fits[key] = \
GenericArtifacts.calculate_linear_regression_3d(
self.event_count_ranges[key], self.agents_ranges[key], self.connections_ranges[key],
self.maxmem_ranges[key])
def filter_dict_for_sub_key(self, raw_dict, sub_key):
return_dict = dict()
for kernel in iter(self.kernels):
return_dict[(kernel,)] = raw_dict[(kernel,) + sub_key]
return return_dict
@staticmethod
def key_tuple_to_caption_string(key_tuple, capitialize=False):
return_string = ""
for entry in key_tuple:
if not capitialize:
return_string += "{} and ".format(entry)
else:
return_string += "{} and ".format(str(entry).capitalize())
return return_string[:-5]
@staticmethod
def key_tuple_to_filename_string(key_tuple, lowercase=False):
return_string = ""
for entry in key_tuple:
if not lowercase:
return_string += "{}_".format(entry)
else:
return_string += "{}_".format(str(entry).lower())
return return_string[:-1]
@abstractmethod
def generate_multiline_plots(self):
pass
@abstractmethod
def generate_fit_tables(self):
pass
@abstractmethod
def generate_score_tables(self):
pass
@abstractmethod
def generate_machine_comparison_tables(self):
pass
@staticmethod
def set_figure_params():
fig_width = 7.5 # width in inches
fig_height = 3.75 # height in inches
fig_size = [fig_width, fig_height]
fig_params = {'backend': 'ps',
'axes.labelsize': 8,
'text.fontsize': 8,
'legend.fontsize': 8,
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'text.usetex': True,
'figure.figsize': fig_size}
pylab.rcParams.update(fig_params)
@staticmethod
def calculate_linear_regression_1d(x_list, f_list):
results = sm.ols(formula="F ~ X", data=({'F': f_list, 'X': x_list})).fit()
slope = list()
slope.append(results.params['X'])
min_value = list()
min_value.append(min(x_list))
max_value = list()
max_value.append(max(x_list))
intercept = results.params['Intercept']
r_squared = results.rsquared
return GenericArtifacts.linear_regression(slope=slope, intercept=intercept, r_squared=r_squared,
min=min_value, max=max_value)
@staticmethod
def calculate_linear_regression_2d(x_list, y_list, f_list):
results = sm.ols(formula="F ~ X + Y", data=({'F': f_list, 'X': x_list, 'Y': y_list})).fit()
slope = list()
slope.append(results.params['X'])
slope.append(results.params['Y'])
min_value = list()
min_value.append(min(x_list))
min_value.append(min(y_list))
max_value = list()
max_value.append(max(x_list))
max_value.append(max(y_list))
intercept = results.params['Intercept']
r_squared = results.rsquared
return GenericArtifacts.linear_regression(slope=slope, intercept=intercept, r_squared=r_squared,
min=min_value, max=max_value)
@staticmethod
def calculate_linear_regression_3d(x_list, y_list, z_list, f_list):
results = sm.ols(formula="F ~ X + Y + Z", data=({'F': f_list, 'X': x_list, 'Y': y_list, 'Z': z_list})).fit()
slope = list()
slope.append(results.params['X'])
slope.append(results.params['Y'])
slope.append(results.params['Z'])
min_value = list()
min_value.append(min(x_list))
min_value.append(min(y_list))
min_value.append(min(z_list))
max_value = list()
max_value.append(max(x_list))
max_value.append(max(y_list))
max_value.append(max(z_list))
intercept = results.params['Intercept']
r_squared = results.rsquared
return GenericArtifacts.linear_regression(slope=slope, intercept=intercept, r_squared=r_squared,
min=min_value, max=max_value)
@staticmethod
def generate_1d_multiline_plot(fits, x_ranges, x_label, f_label, caption, filename_base):
markers = ['v', '^', 's', 'D', 'x', '*', 'h']
GenericArtifacts.set_figure_params()
filename_base = filename_base.replace('.', '_')
plot_filename = os.path.join(FLAGS.root_dir, "{}.eps".format(filename_base))
print "\tGenerating {}".format(plot_filename)
pylab.figure(1)
pylab.clf()
marker_count = 0
for kernel in fits.keys():
x_list = [0]
x_max = max(x_ranges[kernel])
x_list.append(x_max / 2)
x_list.append(x_max)
f_fit = lambda x: x * fits[kernel][0][0] + fits[kernel][1]
y_list = [f_fit(entry) for entry in x_list]
pylab.plot(x_list, y_list, marker=markers[marker_count], linestyle='-', color='k', label=kernel[0])
marker_count += 1
pylab.autoscale()
pylab.xlabel(x_label)
pylab.ylabel(f_label)
pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4, mode="expand", borderaxespad=0.)
pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait')
tex_filename = os.path.join(FLAGS.root_dir, "{}.tex".format(filename_base))
print "\tGenerating {}".format(tex_filename)
tex_figure_path = os.path.join("figures", "auto", filename_base)
output_latex = r"""\begin{figure}
\centering
"""
output_latex += "\\includegraphics{%s}\n" % tex_figure_path
output_latex += "\\caption{%s}\n" % caption
output_latex += "\\label{fig:%s}\n" % filename_base
output_latex += r"""\end{figure}"""
with open(tex_filename, 'w') as f:
f.write(output_latex)
@staticmethod
def generate_1d_fit_table(key_labels, fits, caption, filename_base):
filename_base = filename_base.replace('.', '_')
table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base))
print "\tGenerating {}".format(table_filename)
output_latex = r"""\begin{table}[h]
\centering
"""
output_latex += "\\begin{tabular}{|"
for _ in key_labels:
output_latex += "l|"
output_latex += "|c|c|c|}\n"
output_latex += "\\hline\n"
for label in key_labels:
output_latex += "{} & ".format(label)
output_latex += "Slope & Intercept & $R^2$ \\\\\n\\hline\n"
for key in fits.keys():
for entry in key:
output_latex += "%s & " % entry
output_latex += " %.4g & %.4g & %.4g \\\\\n" % (fits[key][0][0], fits[key][1], fits[key][2])
output_latex += r"""\hline
\end{tabular}
"""
output_latex += "\\caption{%s}\n" % caption
output_latex += "\\label{tab:%s}\n" % filename_base
output_latex += r"""\end{table}"""
with open(table_filename, 'w') as f:
f.write(output_latex)
@staticmethod
def generate_2d_fit_table(key_labels, fits, x_label, y_label, caption, filename_base):
filename_base = filename_base.replace('.', '_')
table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base))
print "\tGenerating {}".format(table_filename)
output_latex = r"""\begin{table}[h]
\centering
"""
output_latex += "\\begin{tabular}{|"
for _ in key_labels:
output_latex += "l|"
output_latex += "|c|c|c|c|}\n"
output_latex += "\\hline\n"
for label in key_labels:
output_latex += "{} & ".format(label)
output_latex += "{} Slope & {} Slope & Intercept & $R^2$ \\\\\n\\hline\n".format(x_label, y_label)
for key in fits.keys():
for entry in key:
output_latex += "%s & " % entry
output_latex += "%.4g & %.4g & %.4g & %.4g \\\\\n" % (fits[key][0][0], fits[key][0][1], fits[key][1],
fits[key][2])
output_latex += r"""\hline
\end{tabular}
"""
output_latex += "\\caption{%s}\n" % caption
output_latex += "\\label{tab:%s}\n" % filename_base
output_latex += r"""\end{table}"""
with open(table_filename, 'w') as f:
f.write(output_latex)
@staticmethod
def generate_3d_fit_table(key_labels, fits, x_label, y_label, z_label, caption, filename_base):
filename_base = filename_base.replace('.', '_')
table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base))
print "\tGenerating {}".format(table_filename)
output_latex = r"""\begin{table}[h]
\centering
"""
output_latex += "\\begin{tabular}{|"
for _ in key_labels:
output_latex += "l|"
output_latex += "|c|c|c|c|c|}\n"
output_latex += "\\hline\n"
for label in key_labels:
output_latex += "{} & ".format(label)
output_latex += "{} Slope & {} Slope & {} Slope & Intercept & $R^2$ \\\\\n\\hline\n".format(x_label, y_label,
z_label)
for key in fits.keys():
for entry in key:
output_latex += "%s & " % entry
output_latex += "%.4g & %.4g & %.4g & %.4g & %.4g \\\\\n" % (fits[key][0][0], fits[key][0][1],
fits[key][0][2], fits[key][1], fits[key][2])
output_latex += r"""\hline
\end{tabular}
"""
output_latex += "\\caption{%s}\n" % caption
output_latex += "\\label{tab:%s}\n" % filename_base
output_latex += r"""\end{table}"""
with open(table_filename, 'w') as f:
f.write(output_latex)
@staticmethod
def generate_score_table(score_table, caption, filename_base):
ordinal_ranks = ["1st", "2nd", "3rd", "4th", "5th", "6th", "7th", "8th", "9th", "10th"]
filename_base = filename_base.replace('.', '_')
table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base))
print "\tGenerating {}".format(table_filename)
output_latex = r"""\begin{table}[h]
\centering
"""
output_latex += "\\begin{tabular}{|l|"
for _ in score_table.get_table().keys():
output_latex += "|c"
output_latex += "||c"
output_latex += "|}\\hline\n"
output_latex += "Kernel "
for i in range(0, len(score_table.get_table().keys())):
output_latex += "& %s " % ordinal_ranks[i]
output_latex += "& Total Score "
output_latex += "\\\\\n"
output_latex += r"""\hline
"""
total_count = score_table.get_total_count()
assert(total_count > 0)
for kernel in score_table.get_table().keys():
output_latex += "%s " % kernel
for entry in score_table.get_table()[kernel]:
if entry > 0:
output_latex += "& %d " % entry
else:
output_latex += "& \\textemdash "
output_latex += "\\\\\n"
output_latex += r"""\hline
\end{tabular}
"""
output_latex += "\\caption{%s}\n" % caption
output_latex += "\\label{tab:%s}\n" % filename_base
output_latex += r"""\end{table}"""
with open(table_filename, 'w') as f:
f.write(output_latex)
@staticmethod
def generate_score_percentage_table(score_table, caption, filename_base):
ordinal_ranks = ["1st", "2nd", "3rd", "4th", "5th", "6th", "7th", "8th", "9th", "10th"]
filename_base = filename_base.replace('.', '_')
table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base))
print "\tGenerating {}".format(table_filename)
output_latex = r"""\begin{table}[h]
\centering
"""
output_latex += "\\begin{tabular}{|l|"
for _ in score_table.get_table().keys():
output_latex += "|c"
output_latex += "|}\\hline\n"
output_latex += "Kernel "
for i in range(0, len(score_table.get_table().keys())):
output_latex += "& %s " % ordinal_ranks[i]
output_latex += "\\\\\n"
output_latex += r"""\hline
"""
total_count = float(score_table.get_total_count())
assert(total_count > 0.0)
for kernel in score_table.get_table().keys():
output_latex += "%s " % kernel
for i in range(0, len(score_table.get_table().keys())):
entry = score_table.get_table()[kernel][i]
if entry > 0:
output_latex += "& %5.4f " % (float(entry) / total_count)
else:
output_latex += "& \\textemdash "
output_latex += "\\\\\n"
output_latex += r"""\hline
\end{tabular}
"""
output_latex += "\\caption{%s}\n" % caption
output_latex += "\\label{tab:%s}\n" % filename_base
output_latex += r"""\end{table}"""
with open(table_filename, 'w') as f:
f.write(output_latex)
class KernelArtifacts(GenericArtifacts):
def __init__(self, results_table):
super(KernelArtifacts, self).__init__(results_table, ("Kernel",))
def generate_multiline_plots(self):
key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple)
key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True)
GenericArtifacts.generate_1d_multiline_plot(self.event_count_vs_cpu_fits, self.event_count_ranges,
"Event Count", "CPU Time (mS)",
"Trend lines for Event Count vs CPU Time for per {} fits".format(
key_label_caption),
"event_count_vs_cpu_per_{}_multiline_plot".format(
key_label_filename))
GenericArtifacts.generate_1d_multiline_plot(self.event_count_vs_maxmem_fits, self.event_count_ranges,
"Event Count", "Max Memory (kB)",
"Trend lines for Event Count vs Max Memory for per {} fits".format(
key_label_caption),
"event_count_vs_maxmem_per_{}_multiline_plot".format(
key_label_filename))
GenericArtifacts.generate_1d_multiline_plot(self.agents_vs_cpu_fits, self.agents_ranges,
"Agents", "CPU Time (mS)",
"Trend lines for Agents vs CPU Time for per {} fits".format(
key_label_caption),
"agents_vs_cpu_per_{}_multiline_plot".format(key_label_filename))
GenericArtifacts.generate_1d_multiline_plot(self.agents_vs_maxmem_fits, self.agents_ranges,
"Agents", "Max Memory (kB)",
"Trend lines for Agents vs Max Memory for per {} fits".format(
key_label_caption),
"agents_vs_maxmem_per_{}_multiline_plot".format(key_label_filename))
GenericArtifacts.generate_1d_multiline_plot(self.connections_vs_cpu_fits, self.connections_ranges,
"Connections", "CPU Time (mS)",
"Trend lines for Connections vs CPU Time for per {} fits".format(
key_label_caption),
"connections_vs_cpu_per_{}_multiline_plot".format(
key_label_filename))
GenericArtifacts.generate_1d_multiline_plot(self.connections_vs_maxmem_fits, self.connections_ranges,
"Connections", "Max Memory (kB)",
"Trend lines for Connections vs Max Memory for per {} fits".format(
key_label_caption),
"connections_vs_maxmem_per_{}_multiline_plot".format(
key_label_filename))
def generate_fit_tables(self):
key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple)
key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True)
GenericArtifacts.generate_1d_fit_table(self.key_label_tuple,
self.event_count_vs_cpu_fits,
"Event Count vs CPU Time (mS) for per {} fits".format(key_label_caption),
"event_count_vs_cpu_per_{}_fit".format(key_label_filename))
GenericArtifacts.generate_1d_fit_table(self.key_label_tuple,
self.event_count_vs_maxmem_fits,
"Event Count vs Max Memory (kB) for per {} fits".format(
key_label_caption),
"event_count_vs_maxmem_per_{}_fit".format(key_label_filename))
GenericArtifacts.generate_1d_fit_table(self.key_label_tuple,
self.agents_vs_cpu_fits,
"Agents vs CPU Time (mS) for per {} fits".format(key_label_caption),
"agents_vs_cpu_per_{}_fit".format(key_label_filename))
GenericArtifacts.generate_1d_fit_table(self.key_label_tuple,
self.agents_vs_maxmem_fits,
"Agents vs Max Memory (kB) for per {} fits".format(key_label_caption),
"agents_vs_maxmem_per_{}_fit".format(key_label_filename))
GenericArtifacts.generate_1d_fit_table(self.key_label_tuple,
self.connections_vs_cpu_fits,
"Connections vs CPU Time (mS) for per {} fits".format(key_label_caption),
"connections_vs_cpu_per_{}_fit".format(key_label_filename))
GenericArtifacts.generate_1d_fit_table(self.key_label_tuple,
self.connections_vs_maxmem_fits,
"Connections vs Max Memory (kB) for per {} fits".format(
key_label_caption),
"connections_vs_maxmem_per_{}_fit".format(key_label_filename))
GenericArtifacts.generate_2d_fit_table(self.key_label_tuple,
self.event_count_and_agents_vs_cpu_fits, "Event Count", "Agents",
"Event Count and Agents vs CPU Time (mS) for per {} fits".format(
key_label_caption),
"event_count_and_agents_vs_cpu_per_{}_fit".format(key_label_filename))
GenericArtifacts.generate_2d_fit_table(self.key_label_tuple,
self.event_count_and_agents_vs_maxmem_fits, "Event Count", "Agents",
"Event Count and Agents vs Max Memory (kB) for per {} fits".format(
key_label_caption),
"event_count_and_agents_vs_maxmem_per_{}_fit".format(key_label_filename))
GenericArtifacts.generate_2d_fit_table(self.key_label_tuple,
self.event_count_and_connections_vs_cpu_fits,
"Event Count", "Connections",
"Event Count and Connections vs CPU Time (mS) for per {} fits".format(
key_label_caption),
"event_count_and_connections_vs_cpu_per_{}_fit".format(
key_label_filename))
GenericArtifacts.generate_2d_fit_table(self.key_label_tuple,
self.event_count_and_connections_vs_maxmem_fits,
"Event Count", "Connections",
"Event Count and Connections vs Max Memory (kB) for per {} fits".format(
key_label_caption),
"event_count_and_connections_vs_maxmem_per_{}_fit".format(
key_label_filename))
GenericArtifacts.generate_2d_fit_table(self.key_label_tuple,
self.agents_and_connections_vs_cpu_fits, "Agents", "Connections",
"Agents and Connections vs CPU Time (mS) for per {} fits".format(
key_label_caption),
"agents_and_connections_vs_cpu_per_{}_fit".format(key_label_filename))
GenericArtifacts.generate_2d_fit_table(self.key_label_tuple,
self.agents_and_connections_vs_maxmem_fits, "Agents", "Connections",
"Agents and Connections vs Max Memory (kB) for per {} fits".format(
key_label_caption),
"agents_and_connections_vs_maxmem_per_{}_fit".format(key_label_filename))
GenericArtifacts.generate_3d_fit_table(self.key_label_tuple,
self.event_count_and_agents_and_connections_vs_cpu_fits,
"Event Count", "Agents", "Connections",
"Event Count and Agents and Connections vs CPU Time (mS) for per {} "
"fits".format(key_label_caption),
"event_count_and_agents_and_connections_vs_cpu_per_{}_fit".format(
key_label_filename))
GenericArtifacts.generate_3d_fit_table(self.key_label_tuple,
self.event_count_and_agents_and_connections_vs_maxmem_fits,
"Event Count", "Agents", "Connections",
"Event Count and Agents and Connections vs Max Memory (kB) for per "
"{} fits".format(key_label_caption),
"event_count_and_agents_and_connections_vs_maxmem_per_{}_fit".format(
key_label_filename))
def generate_score_tables(self):
key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple)
key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True)
score_tables = dict()
selection = (("Kernel", ), ("Agents", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_1d_fit_score(self.agents_vs_cpu_fits)
selection = (("Kernel", ), ("Connections", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_1d_fit_score(self.connections_vs_cpu_fits)
selection = (("Kernel", ), ("Events", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_1d_fit_score(self.event_count_vs_cpu_fits)
selection = (("Kernel", ), ("Agents", "Connections", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_2d_fit_score(self.agents_and_connections_vs_cpu_fits)
selection = (("Kernel", ), ("Events", "Agents", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_2d_fit_score(self.event_count_and_agents_vs_cpu_fits)
selection = (("Kernel", ), ("Events", "Connections", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_2d_fit_score(self.event_count_and_connections_vs_cpu_fits)
selection = (("Kernel", ), ("Events", "Agents", "Connections", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_3d_fit_score(self.event_count_and_agents_and_connections_vs_cpu_fits)
selection = (("Kernel", ), ("Agents", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_1d_fit_score(self.agents_vs_maxmem_fits)
selection = (("Kernel", ), ("Connections", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_1d_fit_score(self.connections_vs_maxmem_fits)
selection = (("Kernel", ), ("Events", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_1d_fit_score(self.event_count_vs_maxmem_fits)
selection = (("Kernel", ), ("Agents", "Connections", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_2d_fit_score(self.agents_and_connections_vs_maxmem_fits)
selection = (("Kernel", ), ("Events", "Agents", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_2d_fit_score(self.event_count_and_agents_vs_maxmem_fits)
selection = (("Kernel", ), ("Events", "Connections", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_2d_fit_score(self.event_count_and_connections_vs_maxmem_fits)
selection = (("Kernel", ), ("Events", "Agents", "Connections", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
score_tables[selection].add_3d_fit_score(self.event_count_and_agents_and_connections_vs_maxmem_fits)
for selection, table in score_tables.iteritems():
independent_vars = selection[1]
independent_caption = ""
independent_filename = ""
for var in independent_vars:
independent_caption += "{} and ".format(var)
independent_filename += "{}_".format(str(var).lower())
independent_caption = independent_caption[:-5]
independent_filename = independent_filename[:-1]
dependent_caption = selection[2]
if dependent_caption == "CPU":
dependent_filename = "cpu"
else:
dependent_filename = "maxmem"
GenericArtifacts.generate_score_table(table,
"Scores based on {} vs {} for {} fits".format(independent_caption,
dependent_caption,
key_label_caption),
"{}_vs_{}_per_{}_fits_scores".format(independent_filename,
dependent_filename,
key_label_filename))
GenericArtifacts.generate_score_percentage_table(table,
"Score percentages based on {} vs {} for {} fits".format(
independent_caption,
dependent_caption,
key_label_caption),
"{}_vs_{}_per_{}_fits_score_percentage".format(
independent_filename,
dependent_filename,
key_label_filename))
def generate_machine_comparison_tables(self):
pass
class KernelMachineArtifacts(GenericArtifacts):
def __init__(self, results_table):
super(KernelMachineArtifacts, self).__init__(results_table, ("Kernel", "Machine"))
def generate_multiline_plots(self):
key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple)
key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True)
for sub_key in iter(self.sub_keys):
sub_key_caption = GenericArtifacts.key_tuple_to_caption_string(sub_key)
sub_key_filename = GenericArtifacts.key_tuple_to_filename_string(sub_key)
event_count_ranges = self.filter_dict_for_sub_key(self.event_count_ranges, sub_key)
agents_ranges = self.filter_dict_for_sub_key(self.agents_ranges, sub_key)
connections_ranges = self.filter_dict_for_sub_key(self.connections_ranges, sub_key)
event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key)
event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key)
agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key)
agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key)
connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key)
connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key)
GenericArtifacts.generate_1d_multiline_plot(event_count_vs_cpu_fits, event_count_ranges,
"Event Count", "CPU Time (mS)",
"Trend lines for Event Count vs CPU Time for per {} fits for {}"
.format(key_label_caption, sub_key_caption),
"event_count_vs_cpu_per_{}_multiline_plot_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_1d_multiline_plot(event_count_vs_maxmem_fits, event_count_ranges,
"Event Count", "Max Memory (kB)",
"Trend lines for Event Count vs Max Memory for per {} fits "
"for {}".format(key_label_caption, sub_key_caption),
"event_count_vs_maxmem_per_{}_multiline_plot_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_1d_multiline_plot(agents_vs_cpu_fits, agents_ranges,
"Agents", "CPU Time (mS)",
"Trend lines for Agents vs CPU Time for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"agents_vs_cpu_per_{}_multiline_plot_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_1d_multiline_plot(agents_vs_maxmem_fits, agents_ranges,
"Agents", "Max Memory (kB)",
"Trend lines for Agents vs Max Memory for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"agents_vs_maxmem_per_{}_multiline_plot_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_1d_multiline_plot(connections_vs_cpu_fits, connections_ranges,
"Connections", "CPU Time (mS)",
"Trend lines for Connections vs CPU Time for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"connections_vs_cpu_per_{}_multiline_plot_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_1d_multiline_plot(connections_vs_maxmem_fits, connections_ranges,
"Connections", "Max Memory (kB)",
"Trend lines for Connections vs Max Memory for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"connections_vs_maxmem_per_{}_multiline_plot_for_{}".format(
key_label_filename, sub_key_filename))
def generate_fit_tables(self):
key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple)
key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True)
for sub_key in iter(self.sub_keys):
sub_key_caption = GenericArtifacts.key_tuple_to_caption_string(sub_key)
sub_key_filename = GenericArtifacts.key_tuple_to_filename_string(sub_key)
event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key)
event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key)
agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key)
agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key)
connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key)
connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key)
event_count_and_agents_vs_cpu_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_vs_cpu_fits, sub_key)
event_count_and_agents_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_vs_maxmem_fits, sub_key)
event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key(
self.event_count_and_connections_vs_cpu_fits, sub_key)
event_count_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.event_count_and_connections_vs_maxmem_fits, sub_key)
agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key(
self.agents_and_connections_vs_cpu_fits, sub_key)
agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.agents_and_connections_vs_maxmem_fits, sub_key)
event_count_and_agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_and_connections_vs_cpu_fits, sub_key)
event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key)
GenericArtifacts.generate_1d_fit_table(("Kernel",),
event_count_vs_cpu_fits,
"Event Count vs CPU Time (mS) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"event_count_vs_cpu_per_{}_fit_for_"
"{}".format(key_label_filename, sub_key_filename))
GenericArtifacts.generate_1d_fit_table(("Kernel",),
event_count_vs_maxmem_fits,
"Event Count vs Max Memory (kB) for per {} fits for {}".format(
key_label_caption, sub_key_caption),
"event_count_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename,
sub_key_filename))
GenericArtifacts.generate_1d_fit_table(("Kernel",),
agents_vs_cpu_fits,
"Agents vs CPU Time (mS) for per {} fits for {}".format(
key_label_caption, sub_key_caption),
"agents_vs_cpu_per_{}_fit_for_{}".format(key_label_filename,
sub_key_filename))
GenericArtifacts.generate_1d_fit_table(("Kernel",),
agents_vs_maxmem_fits,
"Agents vs Max Memory (kB) for per {} fits for {}".format(
key_label_caption, sub_key_caption),
"agents_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename,
sub_key_filename))
GenericArtifacts.generate_1d_fit_table(("Kernel",),
connections_vs_cpu_fits,
"Connections vs CPU Time (mS) for per {} fits for {}".format(
key_label_caption, sub_key_caption),
"connections_vs_cpu_per_{}_fit_for_{}".format(key_label_filename,
sub_key_filename))
GenericArtifacts.generate_1d_fit_table(("Kernel",),
connections_vs_maxmem_fits,
"Connections vs Max Memory (kB) for per {} fits for {}".format(
key_label_caption, sub_key_caption),
"connections_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename,
sub_key_filename))
GenericArtifacts.generate_2d_fit_table(("Kernel",),
event_count_and_agents_vs_cpu_fits, "Event Count", "Agents",
"Event Count and Agents vs CPU Time (mS) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"event_count_and_agents_vs_cpu_per_{}_fit_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_2d_fit_table(("Kernel",),
event_count_and_agents_vs_maxmem_fits, "Event Count", "Agents",
"Event Count and Agents vs Max Memory (kB) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"event_count_and_agents_vs_maxmem_per_{}_fit_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_2d_fit_table(("Kernel",),
event_count_and_connections_vs_cpu_fits,
"Event Count", "Connections",
"Event Count and Connections vs CPU Time (mS) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"event_count_and_connections_vs_cpu_per_{}_fit_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_2d_fit_table(("Kernel",),
event_count_and_connections_vs_maxmem_fits,
"Event Count", "Connections",
"Event Count and Connections vs Max Memory (kB) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"event_count_and_connections_vs_maxmem_per_{}_fit_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_2d_fit_table(("Kernel",),
agents_and_connections_vs_cpu_fits, "Agents", "Connections",
"Agents and Connections vs CPU Time (mS) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"agents_and_connections_vs_cpu_per_{}_fit_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_2d_fit_table(("Kernel",),
agents_and_connections_vs_maxmem_fits, "Agents", "Connections",
"Agents and Connections vs Max Memory (kB) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"agents_and_connections_vs_maxmem_per_{}_fit_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_3d_fit_table(("Kernel",),
event_count_and_agents_and_connections_vs_cpu_fits,
"Event Count", "Agents", "Connections",
"Event Count and Agents and Connections vs CPU Time (mS) for per {} "
"fits".format(key_label_caption, sub_key_caption),
"event_count_and_agents_and_connections_vs_cpu_per_{}_fit_for_"
"{}".format(key_label_filename, sub_key_filename))
GenericArtifacts.generate_3d_fit_table(("Kernel",),
event_count_and_agents_and_connections_vs_maxmem_fits,
"Event Count", "Agents", "Connections",
"Event Count and Agents and Connections vs Max Memory (kB) for per "
"{} fits for {}".format(key_label_caption, sub_key_caption),
"event_count_and_agents_and_connections_vs_maxmem_per_{}_fit_for_"
"{}".format(key_label_filename, sub_key_filename))
def generate_score_tables(self):
key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple)
key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True)
score_tables = dict()
selection = (("Kernel", "Machine", ), ("Agents", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", ), ("Connections", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", ), ("Events", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", ), ("Agents", "Connections", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", ), ("Events", "Agents", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", ), ("Events", "Connections", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", ), ("Events", "Agents", "Connections", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", ), ("Agents", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", ), ("Connections", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", ), ("Events", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", ), ("Agents", "Connections", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", ), ("Events", "Agents", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", ), ("Events", "Connections", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", ), ("Events", "Agents", "Connections", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
for sub_key in iter(self.sub_keys):
event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key)
event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key)
agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key)
agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key)
connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key)
connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key)
event_count_and_agents_vs_cpu_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_vs_cpu_fits, sub_key)
event_count_and_agents_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_vs_maxmem_fits, sub_key)
event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key(
self.event_count_and_connections_vs_cpu_fits, sub_key)
event_count_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.event_count_and_connections_vs_maxmem_fits, sub_key)
agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key(
self.agents_and_connections_vs_cpu_fits, sub_key)
agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.agents_and_connections_vs_maxmem_fits, sub_key)
event_count_and_agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_and_connections_vs_cpu_fits, sub_key)
event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key)
score_tables[(("Kernel", "Machine", ), ("Agents", ), "CPU")].add_1d_fit_score(agents_vs_cpu_fits)
score_tables[(("Kernel", "Machine", ), ("Agents", ), "Max Memory")].add_1d_fit_score(agents_vs_maxmem_fits)
score_tables[(("Kernel", "Machine", ), ("Connections", ), "CPU")].add_1d_fit_score(connections_vs_cpu_fits)
score_tables[(("Kernel", "Machine", ), ("Connections", ), "Max Memory")].add_1d_fit_score(
connections_vs_maxmem_fits)
score_tables[(("Kernel", "Machine", ), ("Events", ), "CPU")].add_1d_fit_score(event_count_vs_cpu_fits)
score_tables[(("Kernel", "Machine", ), ("Events", ), "Max Memory")].add_1d_fit_score(
event_count_vs_maxmem_fits)
score_tables[(("Kernel", "Machine", ), ("Agents", "Connections",), "CPU")].add_2d_fit_score(
agents_and_connections_vs_cpu_fits)
score_tables[(("Kernel", "Machine", ), ("Agents", "Connections",), "Max Memory")].add_2d_fit_score(
agents_and_connections_vs_maxmem_fits)
score_tables[(("Kernel", "Machine", ), ("Events", "Agents",), "CPU")].add_2d_fit_score(
event_count_and_agents_vs_cpu_fits)
score_tables[(("Kernel", "Machine", ), ("Events", "Agents",), "Max Memory")].add_2d_fit_score(
event_count_and_agents_vs_maxmem_fits)
score_tables[(("Kernel", "Machine", ), ("Events", "Connections",), "CPU")].add_2d_fit_score(
event_count_and_connections_vs_cpu_fits)
score_tables[(("Kernel", "Machine", ), ("Events", "Connections",), "Max Memory")].add_2d_fit_score(
event_count_and_connections_vs_maxmem_fits)
score_tables[(("Kernel", "Machine", ), ("Events", "Agents", "Connections",), "CPU")].add_2d_fit_score(
event_count_and_agents_and_connections_vs_cpu_fits)
score_tables[(("Kernel", "Machine", ), ("Events", "Agents", "Connections",), "Max Memory")].\
add_3d_fit_score(event_count_and_agents_and_connections_vs_maxmem_fits)
for selection, table in score_tables.iteritems():
independent_vars = selection[1]
independent_caption = ""
independent_filename = ""
for var in independent_vars:
independent_caption += "{} and ".format(var)
independent_filename += "{}_".format(str(var).lower())
independent_caption = independent_caption[:-5]
independent_filename = independent_filename[:-1]
dependent_caption = selection[2]
if dependent_caption == "CPU":
dependent_filename = "cpu"
else:
dependent_filename = "maxmem"
GenericArtifacts.generate_score_table(table,
"Scores based on {} vs {} for {} fits".format(independent_caption,
dependent_caption,
key_label_caption),
"{}_vs_{}_per_{}_fits_scores".format(independent_filename,
dependent_filename,
key_label_filename))
GenericArtifacts.generate_score_percentage_table(table,
"Score percentages based on {} vs {} for {} fits".format(
independent_caption,
dependent_caption,
key_label_caption),
"{}_vs_{}_per_{}_fits_score_percentage".format(
independent_filename,
dependent_filename,
key_label_filename))
def generate_machine_comparison_tables(self):
pass
class KernelMachineTypeArtifacts(GenericArtifacts):
def __init__(self, results_table):
super(KernelMachineTypeArtifacts, self).__init__(results_table, ("Kernel", "Machine", "Type"))
def generate_multiline_plots(self):
pass
def generate_fit_tables(self):
key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple)
key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True)
for sub_key in iter(self.sub_keys):
sub_key_caption = GenericArtifacts.key_tuple_to_caption_string(sub_key)
sub_key_filename = GenericArtifacts.key_tuple_to_filename_string(sub_key)
event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key)
event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key)
agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key)
agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key)
connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key)
connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key)
event_count_and_agents_vs_cpu_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_vs_cpu_fits, sub_key)
event_count_and_agents_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_vs_maxmem_fits, sub_key)
event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key(
self.event_count_and_connections_vs_cpu_fits, sub_key)
event_count_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.event_count_and_connections_vs_maxmem_fits, sub_key)
agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key(
self.agents_and_connections_vs_cpu_fits, sub_key)
agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.agents_and_connections_vs_maxmem_fits, sub_key)
event_count_and_agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_and_connections_vs_cpu_fits, sub_key)
event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key)
GenericArtifacts.generate_1d_fit_table(("Kernel",),
event_count_vs_cpu_fits,
"Event Count vs CPU Time (mS) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"event_count_vs_cpu_per_{}_fit_for_"
"{}".format(key_label_filename, sub_key_filename))
GenericArtifacts.generate_1d_fit_table(("Kernel",),
event_count_vs_maxmem_fits,
"Event Count vs Max Memory (kB) for per {} fits for {}".format(
key_label_caption, sub_key_caption),
"event_count_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename,
sub_key_filename))
GenericArtifacts.generate_1d_fit_table(("Kernel",),
agents_vs_cpu_fits,
"Agents vs CPU Time (mS) for per {} fits for {}".format(
key_label_caption, sub_key_caption),
"agents_vs_cpu_per_{}_fit_for_{}".format(key_label_filename,
sub_key_filename))
GenericArtifacts.generate_1d_fit_table(("Kernel",),
agents_vs_maxmem_fits,
"Agents vs Max Memory (kB) for per {} fits for {}".format(
key_label_caption, sub_key_caption),
"agents_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename,
sub_key_filename))
GenericArtifacts.generate_1d_fit_table(("Kernel",),
connections_vs_cpu_fits,
"Connections vs CPU Time (mS) for per {} fits for {}".format(
key_label_caption, sub_key_caption),
"connections_vs_cpu_per_{}_fit_for_{}".format(key_label_filename,
sub_key_filename))
GenericArtifacts.generate_1d_fit_table(("Kernel",),
connections_vs_maxmem_fits,
"Connections vs Max Memory (kB) for per {} fits for {}".format(
key_label_caption, sub_key_caption),
"connections_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename,
sub_key_filename))
GenericArtifacts.generate_2d_fit_table(("Kernel",),
event_count_and_agents_vs_cpu_fits, "Event Count", "Agents",
"Event Count and Agents vs CPU Time (mS) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"event_count_and_agents_vs_cpu_per_{}_fit_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_2d_fit_table(("Kernel",),
event_count_and_agents_vs_maxmem_fits, "Event Count", "Agents",
"Event Count and Agents vs Max Memory (kB) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"event_count_and_agents_vs_maxmem_per_{}_fit_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_2d_fit_table(("Kernel",),
event_count_and_connections_vs_cpu_fits,
"Event Count", "Connections",
"Event Count and Connections vs CPU Time (mS) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"event_count_and_connections_vs_cpu_per_{}_fit_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_2d_fit_table(("Kernel",),
event_count_and_connections_vs_maxmem_fits,
"Event Count", "Connections",
"Event Count and Connections vs Max Memory (kB) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"event_count_and_connections_vs_maxmem_per_{}_fit_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_2d_fit_table(("Kernel",),
agents_and_connections_vs_cpu_fits, "Agents", "Connections",
"Agents and Connections vs CPU Time (mS) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"agents_and_connections_vs_cpu_per_{}_fit_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_2d_fit_table(("Kernel",),
agents_and_connections_vs_maxmem_fits, "Agents", "Connections",
"Agents and Connections vs Max Memory (kB) for per {} fits for "
"{}".format(key_label_caption, sub_key_caption),
"agents_and_connections_vs_maxmem_per_{}_fit_for_{}".format(
key_label_filename, sub_key_filename))
GenericArtifacts.generate_3d_fit_table(("Kernel",),
event_count_and_agents_and_connections_vs_cpu_fits,
"Event Count", "Agents", "Connections",
"Event Count and Agents and Connections vs CPU Time (mS) for per {} "
"fits".format(key_label_caption, sub_key_caption),
"event_count_and_agents_and_connections_vs_cpu_per_{}_fit_for_"
"{}".format(key_label_filename, sub_key_filename))
GenericArtifacts.generate_3d_fit_table(("Kernel",),
event_count_and_agents_and_connections_vs_maxmem_fits,
"Event Count", "Agents", "Connections",
"Event Count and Agents and Connections vs Max Memory (kB) for per "
"{} fits for {}".format(key_label_caption, sub_key_caption),
"event_count_and_agents_and_connections_vs_maxmem_per_{}_fit_for_"
"{}".format(key_label_filename, sub_key_filename))
def generate_score_tables(self):
key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple)
key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True)
score_tables = dict()
selection = (("Kernel", "Machine", "Type", ), ("Agents", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", "Type", ), ("Connections", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", "Type", ), ("Events", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", "Type", ), ("Agents", "Connections", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", "Type", ), ("Events", "Agents", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", "Type", ), ("Events", "Connections", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", "Type", ), ("Events", "Agents", "Connections", ), "CPU")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", "Type", ), ("Agents", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", "Type", ), ("Connections", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", "Type", ), ("Events", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", "Type", ), ("Agents", "Connections", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", "Type", ), ("Events", "Agents", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", "Type", ), ("Events", "Connections", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
selection = (("Kernel", "Machine", "Type", ), ("Events", "Agents", "Connections", ), "Max Memory")
score_tables[selection] = ScoreTable(self.kernels, selection)
for sub_key in iter(self.sub_keys):
event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key)
event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key)
agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key)
agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key)
connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key)
connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key)
event_count_and_agents_vs_cpu_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_vs_cpu_fits, sub_key)
event_count_and_agents_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_vs_maxmem_fits, sub_key)
event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key(
self.event_count_and_connections_vs_cpu_fits, sub_key)
event_count_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.event_count_and_connections_vs_maxmem_fits, sub_key)
agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key(
self.agents_and_connections_vs_cpu_fits, sub_key)
agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.agents_and_connections_vs_maxmem_fits, sub_key)
event_count_and_agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_and_connections_vs_cpu_fits, sub_key)
event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key)
score_tables[(("Kernel", "Machine", "Type", ), ("Agents", ), "CPU")].add_1d_fit_score(agents_vs_cpu_fits)
score_tables[(("Kernel", "Machine", "Type", ), ("Agents", ), "Max Memory")].add_1d_fit_score(
agents_vs_maxmem_fits)
score_tables[(("Kernel", "Machine", "Type", ), ("Connections", ), "CPU")].add_1d_fit_score(
connections_vs_cpu_fits)
score_tables[(("Kernel", "Machine", "Type", ), ("Connections", ), "Max Memory")].add_1d_fit_score(
connections_vs_maxmem_fits)
score_tables[(("Kernel", "Machine", "Type", ), ("Events", ), "CPU")].add_1d_fit_score(
event_count_vs_cpu_fits)
score_tables[(("Kernel", "Machine", "Type", ), ("Events", ), "Max Memory")].add_1d_fit_score(
event_count_vs_maxmem_fits)
score_tables[(("Kernel", "Machine", "Type", ), ("Agents", "Connections",), "CPU")].add_2d_fit_score(
agents_and_connections_vs_cpu_fits)
score_tables[(("Kernel", "Machine", "Type", ), ("Agents", "Connections",), "Max Memory")].add_2d_fit_score(
agents_and_connections_vs_maxmem_fits)
score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Agents",), "CPU")].add_2d_fit_score(
event_count_and_agents_vs_cpu_fits)
score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Agents",), "Max Memory")].add_2d_fit_score(
event_count_and_agents_vs_maxmem_fits)
score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Connections",), "CPU")].add_2d_fit_score(
event_count_and_connections_vs_cpu_fits)
score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Connections",), "Max Memory")].add_2d_fit_score(
event_count_and_connections_vs_maxmem_fits)
score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Agents", "Connections",), "CPU")].\
add_2d_fit_score(event_count_and_agents_and_connections_vs_cpu_fits)
score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Agents", "Connections",), "Max Memory")].\
add_3d_fit_score(event_count_and_agents_and_connections_vs_maxmem_fits)
for selection, table in score_tables.iteritems():
independent_vars = selection[1]
independent_caption = ""
independent_filename = ""
for var in independent_vars:
independent_caption += "{} and ".format(var)
independent_filename += "{}_".format(str(var).lower())
independent_caption = independent_caption[:-5]
independent_filename = independent_filename[:-1]
dependent_caption = selection[2]
if dependent_caption == "CPU":
dependent_filename = "cpu"
else:
dependent_filename = "maxmem"
GenericArtifacts.generate_score_table(table,
"Scores based on {} vs {} for {} fits".format(independent_caption,
dependent_caption,
key_label_caption),
"{}_vs_{}_per_{}_fits_scores".format(independent_filename,
dependent_filename,
key_label_filename))
GenericArtifacts.generate_score_percentage_table(table,
"Score percentages based on {} vs {} for {} fits".format(
independent_caption,
dependent_caption,
key_label_caption),
"{}_vs_{}_per_{}_fits_score_percentage".format(
independent_filename,
dependent_filename,
key_label_filename))
def generate_machine_comparison_tables(self):
key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple)
key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True)
machine_comparison_tables = dict()
machine_comparison_tables[(("Events", "Connections", ), "CPU")] = MachineComparisonTable(self.kernels)
machine_comparison_tables[(("Events", "Agents", "Connections", ), "Max Memory")] = MachineComparisonTable(
self.kernels)
for sub_key in self.sub_keys:
machine = sub_key[0]
event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key(
self.event_count_and_connections_vs_cpu_fits, sub_key)
event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key(
self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key)
machine_comparison_tables[(("Events", "Connections",), "CPU")].add_2d_fit_score(
event_count_and_connections_vs_cpu_fits, machine)
machine_comparison_tables[(("Events", "Agents", "Connections",), "Max Memory")].add_3d_fit_score(
event_count_and_agents_and_connections_vs_maxmem_fits, machine)
selection = (("Events", "Connections",), "CPU")
independent_caption = "Events \& Connections"
independent_filename = "events_connections"
dependent_caption = "CPU"
dependent_filename = "cpu"
print "Printing results for {}".format(selection)
machine_comparison_tables[selection].generate_artifacts(key_label_caption, key_label_filename,
independent_caption, independent_filename,
dependent_caption, dependent_filename)
selection = (("Events", "Agents", "Connections",), "Max Memory")
independent_caption = "Events \& Agents \& Connections"
independent_filename = "events_agents_connections"
dependent_caption = "Maximum Memory"
dependent_filename = "maxmem"
print "Printing results for {}".format(selection)
machine_comparison_tables[selection].generate_artifacts(key_label_caption, key_label_filename,
independent_caption, independent_filename,
dependent_caption, dependent_filename)
def read_raw_inputs():
print "Reading in raw results"
create_str = "CREATE TABLE IF NOT EXISTS raw_results (machine text, kernel text, type text, model text, " \
"iteration long, event_count long, final_time long, cpu long, maxmem long, agents long, " \
"connections long, bucket long)"
experiment_db.execute(create_str)
for input_file in os.listdir(FLAGS.root_dir):
if re.search(r'run_result.*\.db', input_file):
result_file = os.path.join(FLAGS.root_dir, input_file)
print 'Reading results from {}'.format(result_file)
input_db = DBWrapper(result_file)
read_results(input_db)
input_db.cleanup()
def get_correct_type(row):
row = list(row)
model = row[3]
if re.match("CompleteBi.*", model):
row[2] = "complete-bipartite"
elif re.match("SmallModel.*", model):
row[2] = "Watts-Strogatz"
elif re.match("Cycle.*", model):
row[2] = "cycle"
elif re.match("Hyper.*", model):
row[2] = "hypercube"
elif re.match("Star.*", model):
row[2] = "star"
elif re.match("Complete.*", model):
row[2] = "complete"
elif re.match("Erdos.*", model):
row[2] = "erdos-reyni"
elif re.match("Wheel.*", model):
row[2] = "wheel"
elif re.match("Circular.*", model):
row[2] = "circular-ladder"
elif re.match("Periodic.*", model):
row[2] = "periodic-2grid"
elif re.match("NonPeriodic.*", model):
row[2] = "nonperiodic-2grid"
else:
print "Unknown model {}".format(model)
assert False
return row
def get_bucket_event_count(event_count):
global event_count_buckets
global bucketing_factor
for bucket in event_count_buckets:
if (1.0 + bucketing_factor) * bucket >= event_count >= (1.0 - bucketing_factor) * bucket:
return bucket
return None
def read_results(input_db):
global experiment_db
cmd_str = "SELECT machine, kernel, type, model, iteration, event_count, final_time, cpu, maxmem, agents, " \
"connections FROM 'raw_results'"
for row in input_db.select(cmd_str):
if row[2] == "None":
row = get_correct_type(row)
bucket = get_bucket_event_count(row[5])
if bucket is None:
continue
cmd_str = "INSERT INTO raw_results " \
"(machine, kernel, type, model, iteration, event_count, final_time, cpu, maxmem, agents, " \
"connections, bucket) " \
"VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}')" \
.format(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8],
row[9], row[10], bucket)
experiment_db.execute(cmd_str)
experiment_db.commit()
def generate_per_kernel_results_table():
global experiment_db
global kernel_results_table
global event_count_buckets
kernel_results_table = ResultsTable()
select_cmd = "SELECT kernel, bucket, model, event_count, agents, connections, cpu, maxmem FROM raw_results"
for row in experiment_db.select(select_cmd):
kernel_results_table.add_entry((row[0],), row[1], row[2], row[3], row[4], row[5], row[6], row[7])
kernel_results_table.create_filtered_table()
def generate_per_kernel_results_artifacts():
print "Generating per kernel artifacts"
global kernel_results_table
kernel_artifacts = KernelArtifacts(kernel_results_table)
kernel_artifacts.generate_multiline_plots()
kernel_artifacts.generate_fit_tables()
kernel_artifacts.generate_score_tables()
kernel_artifacts.generate_machine_comparison_tables()
print "Finished per kernel artifacts"
def generate_per_kernel_and_machine_results_table():
global experiment_db
global kernel_machine_results_table
global event_count_buckets
kernel_machine_results_table = ResultsTable()
select_cmd = "SELECT kernel, machine, bucket, model, event_count, agents, connections, cpu, maxmem FROM raw_results"
for row in experiment_db.select(select_cmd):
kernel_machine_results_table.add_entry((row[0], row[1]), row[2], row[3], row[4], row[5], row[6], row[7], row[8])
kernel_machine_results_table.create_filtered_table()
def generate_per_kernel_and_machine_results_artifacts():
print "Generating per kernel and machine artifacts"
global kernel_machine_results_table
kernel_and_machine_artifacts = KernelMachineArtifacts(kernel_machine_results_table)
kernel_and_machine_artifacts.generate_multiline_plots()
kernel_and_machine_artifacts.generate_fit_tables()
kernel_and_machine_artifacts.generate_score_tables()
kernel_and_machine_artifacts.generate_machine_comparison_tables()
print "Finished per kernel and machine artifacts"
def generate_per_kernel_and_machine_and_type_results_table():
global experiment_db
global kernel_machine_type_results_table
global event_count_buckets
kernel_machine_type_results_table = ResultsTable()
select_cmd = "SELECT kernel, machine, type, bucket, model, event_count, agents, connections, cpu, maxmem FROM " \
"raw_results"
for row in experiment_db.select(select_cmd):
kernel_machine_type_results_table.add_entry((row[0], row[1], row[2]), row[3], row[4], row[5], row[6], row[7],
row[8], row[9])
kernel_machine_type_results_table.create_filtered_table()
def generate_per_kernel_and_machine_and_type_results_artifacts():
print "Generating per kernel and machine and type artifacts"
global kernel_machine_type_results_table
kernel_and_machine_and_type_artifacts = KernelMachineTypeArtifacts(kernel_machine_type_results_table)
kernel_and_machine_and_type_artifacts.generate_fit_tables()
kernel_and_machine_and_type_artifacts.generate_score_tables()
kernel_and_machine_and_type_artifacts.generate_machine_comparison_tables()
print "Finished per kernel and machine and type artifacts"
def generate_fit_comparison_artifacts():
global fit_comparison_table
cpu_fit_list = list()
memory_fit_list = list()
for key, value in fit_comparison_table.iteritems():
if key[2] == "CPU":
cpu_fit_list.append((value, key,))
else:
memory_fit_list.append((value, key,))
cpu_fit_list.sort()
cpu_fit_list.reverse()
cpu_comparison_filename = os.path.join(FLAGS.root_dir, "cpu_fit_comparison_table.tex")
print "\tGenerating {}".format(cpu_comparison_filename)
output_latex = r"""\begin{table}[h]
\centering
"""
output_latex += r"""\begin{tabular}{|l|l|c|}
\hline
"""
output_latex += r"""Selection Keys & Independent Variables & $R^2$ \\
\hline
"""
for cpu_fit in cpu_fit_list:
score = cpu_fit[0]
entry = cpu_fit[1]
for key in entry[0]:
output_latex += "{} \& ".format(key)
output_latex = output_latex[:-4]
output_latex += " & "
for var in entry[1]:
output_latex += "{} \& ".format(var)
output_latex = output_latex[:-4]
output_latex += " & %5.4f" % float(score)
output_latex += r""" \\
"""
output_latex += r"""\hline
\end{tabular}
"""
output_latex += "\\caption{Comparisons for CPU fits}\n"
output_latex += "\\label{tab:cpu_fit_comparison}\n"
output_latex += r"""\end{table}"""
with open(cpu_comparison_filename, 'w') as f:
f.write(output_latex)
memory_fit_list.sort()
memory_fit_list.reverse()
memory_comparison_filename = os.path.join(FLAGS.root_dir, "memory_fit_comparison_table.tex")
print "\tGenerating {}".format(memory_comparison_filename)
output_latex = r"""\begin{table}[h]
\centering
"""
output_latex += r"""\begin{tabular}{|l|l|c|}
\hline
"""
output_latex += r"""Selection Keys & Independent Variables & $R^2$ \\
\hline
"""
for memory_fit in memory_fit_list:
score = memory_fit[0]
entry = memory_fit[1]
for key in entry[0]:
output_latex += "{} \& ".format(key)
output_latex = output_latex[:-4]
output_latex += " & "
for var in entry[1]:
output_latex += "{} \& ".format(var)
output_latex = output_latex[:-4]
output_latex += " & %5.4f" % float(score)
output_latex += r""" \\
"""
output_latex += r"""\hline
\end{tabular}
"""
output_latex += "\\caption{Comparisons for Memory fits}\n"
output_latex += "\\label{tab:memory_fit_comparison}\n"
output_latex += r"""\end{table}"""
with open(memory_comparison_filename, 'w') as f:
f.write(output_latex)
def process_raw_results():
generate_per_kernel_results_table()
generate_per_kernel_results_artifacts()
generate_per_kernel_and_machine_results_table()
generate_per_kernel_and_machine_results_artifacts()
generate_per_kernel_and_machine_and_type_results_table()
generate_per_kernel_and_machine_and_type_results_artifacts()
generate_fit_comparison_artifacts()
def main(argv):
global experiment_db
try:
FLAGS(argv) # parse flags
except gflags.FlagsError, e:
print '%s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
full_path = os.path.join(FLAGS.root_dir, FLAGS.output_db)
if FLAGS.read_inputs:
print "Unlinking {}".format(full_path)
try:
os.unlink(full_path)
except OSError, e:
print "Unable able to unlink {} due to {}".format(full_path, e)
else:
print "Reusing {}".format(full_path)
experiment_db = DBWrapper(full_path)
if FLAGS.read_inputs:
read_raw_inputs()
process_raw_results()
experiment_db.cleanup()
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
mmottahedi/neuralnilm_prototype | scripts/e349.py | 2 | 6140 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
# input_padding=8,
lag=0,
output_central_value=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-3,
learning_rate_changes_by_iteration={
# 200: 1e-2,
# 400: 1e-3,
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False,
plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 512
output_shape = source.output_shape_after_processing()
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border': 'same'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N // 2,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': output_shape[1] * output_shape[2],
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
eg-zhang/scikit-learn | benchmarks/bench_mnist.py | 76 | 6136 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
rssenar/PyToolkit | JoinDatasets.py | 1 | 2552 |
#!/usr/bin/env python3.4
# ---------------------------------------------------------------------------- #
import os, csv, glob, re
import pandas as pd
from Constants import ConvPercentage
from tqdm import tqdm
# ---------------------------------------------------------------------------- #
os.chdir('../../../../Desktop/')
# ---------------------------------------------------------------------------- #
File1 = 'a.csv'
File2 = 'b.csv'
ziproute = 0
Description = 1
Records = 2
total = 3
dfo = 4
Percentage = 5
RTotal = 6
AdjRec = 7
AdjRecPerc = 8
RecRTotal = 9
OutputHeaderRow = [
'ziproute',
'Description',
'Records',
'Total_Sat',
'Dist(m)',
'Sat%',
'R-TOTAL',
'ADJ_Rec',
'ADJ_Sat%',
'ADJ_R-TOTAL'
]
def Join():
ds1 = pd.read_csv(File1)
ds2 = pd.read_csv(File2)
merged = ds1.merge(ds2, how = 'inner')
merged['Percentage'] = ''
merged['RTotal'] = ''
merged['AdjRec'] = ''
merged['AdjRecPerc'] = ''
merged['AdjRecRTotal'] = ''
merged.to_csv('temp.csv', encoding = 'utf-8', index=False)
def ReformatOutputReport():
CSVFiles = glob.glob('temp.csv')
for file in tqdm(CSVFiles):
with open(file,'rU') as InputFile,\
open('DATA.csv','at') as OutputFile:
Input = csv.reader(InputFile)
Output = csv.writer(OutputFile)
Output.writerow(OutputHeaderRow)
RunningTotal = 0
AdjRecRTotal = 0
RowCounter = 2
next(InputFile)
for Row in tqdm(Input):
if int(Row[Records]) >= 135:
Row[dfo] = round(float(Row[dfo]),1)
Row[Percentage] = round(ConvPercentage(Row[Records],Row[total]),0)
Row[RTotal] = '=SUM($C$2:$C{})'.format(RowCounter)
if int(Row[Percentage]) >= 74:
Row[AdjRec] = round(float(Row[total]) * 0.73,0)
else:
Row[AdjRec] = Row[Records]
Row[AdjRecPerc] = round(ConvPercentage(Row[AdjRec],Row[total]),0)
Row[RecRTotal] = '=SUM($H$2:$H{})'.format(RowCounter)
Output.writerow(Row)
RowCounter += 1
# ---------------------------------------------------------------------------- #
if __name__ == '__main__':
print('=======================================')
print(' JOIN DATASETS ')
print('=======================================')
Join()
ReformatOutputReport()
Files = glob.glob('*.csv')
for Record in Files:
if bool(re.match(r'\btemp\b', Record, flags = re.I)):
os.remove(Record)
print('=======================================')
print(' COMPLETED ')
print()
| bsd-2-clause |
low-sky/pyspeckit | pyspeckit/spectrum/models/n2hp.py | 4 | 11414 | """
===========
N2H+ fitter
===========
Reference for line params:
Dore (Private Communication), improving on the determinations from
L. Pagani, F. Daniel, and M. L. Dubernet A&A 494, 719-727 (2009)
DOI: 10.1051/0004-6361:200810570
http://www.strw.leidenuniv.nl/~moldata/N2H+.html
http://adsabs.harvard.edu/abs/2005MNRAS.363.1083D
"""
from __future__ import print_function
import numpy as np
import matplotlib.cbook as mpcb
import copy
try:
from astropy.io import fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
from ...mpfit import mpfit
from .. import units
from . import fitter,model,modelgrid
from . import hyperfine
import astropy.units as u
freq_dict_cen ={
'J1-0': 93173.7637e6,
'J2-1': 186344.8420e6,
'J3-2': 279511.8325e6,
}
voff_lines_dict={
####### J 1-0
'J1-0_01': -7.9930,
'J1-0_02': -7.9930,
'J1-0_03': -7.9930,
'J1-0_04': -0.6112,
'J1-0_05': -0.6112,
'J1-0_06': -0.6112,
'J1-0_07': 0.0000,
'J1-0_08': 0.9533,
'J1-0_09': 0.9533,
'J1-0_10': 5.5371,
'J1-0_11': 5.5371,
'J1-0_12': 5.5371,
'J1-0_13': 5.9704,
'J1-0_14': 5.9704,
'J1-0_15': 6.9238,
####### J 2-1
'J2-1_01': -4.6258,
'J2-1_02': -4.5741,
'J2-1_03': -4.4376,
'J2-1_04': -4.2209,
'J2-1_05': -4.0976,
'J2-1_06': -3.8808,
'J2-1_07': -3.1619,
'J2-1_08': -2.9453,
'J2-1_09': -2.3469,
'J2-1_10': -1.9290,
'J2-1_11': -1.5888,
'J2-1_12': -1.5516,
'J2-1_13': -1.4523,
'J2-1_14': -1.1465,
'J2-1_15': -0.8065,
'J2-1_16': -0.6532,
'J2-1_17': -0.4694,
'J2-1_18': -0.1767,
'J2-1_19': 0.0000,
'J2-1_20': 0.0071,
'J2-1_21': 0.1137,
'J2-1_22': 0.1291,
'J2-1_23': 0.1617,
'J2-1_24': 0.2239,
'J2-1_25': 0.5237,
'J2-1_26': 0.6384,
'J2-1_27': 0.7405,
'J2-1_28': 2.1394,
'J2-1_29': 2.5158,
'J2-1_30': 2.5444,
'J2-1_31': 2.6225,
'J2-1_32': 2.8844,
'J2-1_33': 3.0325,
'J2-1_34': 3.0990,
'J2-1_35': 3.2981,
'J2-1_36': 3.5091,
'J2-1_37': 3.8148,
'J2-1_38': 3.8201,
'J2-1_39': 6.9891,
'J2-1_40': 7.5057,
####### J 3-2
'J3-2_01': -3.0666,
'J3-2_02': -2.9296,
'J3-2_03': -2.7221,
'J3-2_04': -2.6563,
'J3-2_05': -2.5270,
'J3-2_06': -2.4010,
'J3-2_07': -2.2535,
'J3-2_08': -2.1825,
'J3-2_09': -2.1277,
'J3-2_10': -1.5862,
'J3-2_11': -1.0158,
'J3-2_12': -0.6131,
'J3-2_13': -0.6093,
'J3-2_14': -0.5902,
'J3-2_15': -0.4872,
'J3-2_16': -0.4725,
'J3-2_17': -0.2757,
'J3-2_18': -0.0697,
'J3-2_19': -0.0616,
'J3-2_20': -0.0022,
'J3-2_21': 0.0000,
'J3-2_22': 0.0143,
'J3-2_23': 0.0542,
'J3-2_24': 0.0561,
'J3-2_25': 0.0575,
'J3-2_26': 0.0687,
'J3-2_27': 0.1887,
'J3-2_28': 0.2411,
'J3-2_29': 0.3781,
'J3-2_30': 0.4620,
'J3-2_31': 0.4798,
'J3-2_32': 0.5110,
'J3-2_33': 0.5540,
'J3-2_34': 0.7808,
'J3-2_35': 0.9066,
'J3-2_36': 1.6382,
'J3-2_37': 1.6980,
'J3-2_38': 2.1025,
'J3-2_39': 2.1236,
'J3-2_40': 2.1815,
'J3-2_41': 2.5281,
'J3-2_42': 2.6458,
'J3-2_43': 2.8052,
'J3-2_44': 3.0320,
'J3-2_45': 3.4963,
}
line_strength_dict = {
####### J 1-0
'J1-0_01': 0.025957,
'J1-0_02': 0.065372,
'J1-0_03': 0.019779,
'J1-0_04': 0.004376,
'J1-0_05': 0.034890,
'J1-0_06': 0.071844,
'J1-0_07': 0.259259,
'J1-0_08': 0.156480,
'J1-0_09': 0.028705,
'J1-0_10': 0.041361,
'J1-0_11': 0.013309,
'J1-0_12': 0.056442,
'J1-0_13': 0.156482,
'J1-0_14': 0.028705,
'J1-0_15': 0.037038,
####### J 2-1
'J2-1_01': 0.008272,
'J2-1_02': 0.005898,
'J2-1_03': 0.031247,
'J2-1_04': 0.013863,
'J2-1_05': 0.013357,
'J2-1_06': 0.010419,
'J2-1_07': 0.000218,
'J2-1_08': 0.000682,
'J2-1_09': 0.000152,
'J2-1_10': 0.001229,
'J2-1_11': 0.000950,
'J2-1_12': 0.000875,
'J2-1_13': 0.002527,
'J2-1_14': 0.000365,
'J2-1_15': 0.000164,
'J2-1_16': 0.021264,
'J2-1_17': 0.031139,
'J2-1_18': 0.000576,
'J2-1_19': 0.200000,
'J2-1_20': 0.001013,
'J2-1_21': 0.111589,
'J2-1_22': 0.088126,
'J2-1_23': 0.142604,
'J2-1_24': 0.011520,
'J2-1_25': 0.027608,
'J2-1_26': 0.012800,
'J2-1_27': 0.066354,
'J2-1_28': 0.013075,
'J2-1_29': 0.003198,
'J2-1_30': 0.061880,
'J2-1_31': 0.004914,
'J2-1_32': 0.035879,
'J2-1_33': 0.011026,
'J2-1_34': 0.039052,
'J2-1_35': 0.019767,
'J2-1_36': 0.004305,
'J2-1_37': 0.001814,
'J2-1_38': 0.000245,
'J2-1_39': 0.000029,
'J2-1_40': 0.000004,
####### J 3-2
'J3-2_01': 0.001845,
'J3-2_02': 0.001818,
'J3-2_03': 0.003539,
'J3-2_04': 0.014062,
'J3-2_05': 0.011432,
'J3-2_06': 0.000089,
'J3-2_07': 0.002204,
'J3-2_08': 0.002161,
'J3-2_09': 0.000061,
'J3-2_10': 0.000059,
'J3-2_11': 0.000212,
'J3-2_12': 0.000255,
'J3-2_13': 0.000247,
'J3-2_14': 0.000436,
'J3-2_15': 0.010208,
'J3-2_16': 0.000073,
'J3-2_17': 0.007447,
'J3-2_18': 0.000000,
'J3-2_19': 0.000155,
'J3-2_20': 0.000274,
'J3-2_21': 0.174603,
'J3-2_22': 0.018683,
'J3-2_23': 0.135607,
'J3-2_24': 0.100527,
'J3-2_25': 0.124866,
'J3-2_26': 0.060966,
'J3-2_27': 0.088480,
'J3-2_28': 0.001083,
'J3-2_29': 0.094510,
'J3-2_30': 0.014029,
'J3-2_31': 0.007191,
'J3-2_32': 0.022222,
'J3-2_33': 0.047915,
'J3-2_34': 0.015398,
'J3-2_35': 0.000071,
'J3-2_36': 0.000794,
'J3-2_37': 0.001372,
'J3-2_38': 0.007107,
'J3-2_39': 0.016618,
'J3-2_40': 0.009776,
'J3-2_41': 0.000997,
'J3-2_42': 0.000487,
'J3-2_43': 0.000069,
'J3-2_44': 0.000039,
'J3-2_45': 0.000010,
}
# Get frequency dictionary in Hz based on the offset velocity and rest frequency
conv_J10=u.doppler_radio(freq_dict_cen['J1-0']*u.Hz)
conv_J21=u.doppler_radio(freq_dict_cen['J2-1']*u.Hz)
conv_J32=u.doppler_radio(freq_dict_cen['J3-2']*u.Hz)
freq_dict = {
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J10).value) for name in voff_lines_dict.keys() if "J1-0" in name
}
freq_dict.update({
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J21).value) for name in voff_lines_dict.keys() if "J2-1" in name
})
freq_dict.update({
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J32).value) for name in voff_lines_dict.keys() if "J3-2" in name
})
# relative_strength_total_degeneracy is not used in the CLASS implementation
# of the hfs fit. It is the sum of the degeneracy values for all hyperfines
# for a given line; it gives the relative weights between lines.
# Hyperfine weights are treated as normalized within one rotational transition.
w10 = sum(val for name,val in line_strength_dict.items() if 'J1-0' in name)
w21 = sum(val for name,val in line_strength_dict.items() if 'J2-1' in name)
w32 = sum(val for name,val in line_strength_dict.items() if 'J3-2' in name)
relative_strength_total_degeneracy = {
name : w10 for name in line_strength_dict.keys() if "J1-0" in name
}
relative_strength_total_degeneracy.update({
name : w21 for name in line_strength_dict.keys() if "J2-1" in name
})
relative_strength_total_degeneracy.update({
name : w32 for name in line_strength_dict.keys() if "J3-2" in name
})
# Get the list of line names from the previous lists
line_names = [name for name in voff_lines_dict.keys()]
n2hp_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict, freq_dict,
line_strength_dict,
relative_strength_total_degeneracy)
n2hp_vtau_fitter = n2hp_vtau.fitter
n2hp_vtau_vheight_fitter = n2hp_vtau.vheight_fitter
n2hp_vtau_tbg_fitter = n2hp_vtau.background_fitter
# RADEX part from old file
def n2hp_radex(xarr,
density=4,
column=13,
xoff_v=0.0,
width=1.0,
grid_vwidth=1.0,
grid_vwidth_scale=False,
texgrid=None,
taugrid=None,
hdr=None,
path_to_texgrid='',
path_to_taugrid='',
temperature_gridnumber=3,
debug=False,
verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
minfreq = (4.8,)
maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = copy.copy(xarr)
xarr.convert_to_unit('Hz', quiet=True)
tau_nu_cumul = np.zeros(len(xarr))
gridval1 = np.interp(density, densityarr[0,:], xinds[0,:])
gridval2 = np.interp(column, columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
tau = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[gridval2],[gridval1]]),order=1) for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[gridval2],[gridval1]]),order=1) for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if verbose:
print("density %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, column, tau, tex))
if debug:
import pdb; pdb.set_trace()
return n2hp_vtau(xarr,Tex=tex,tau=tau,xoff_v=xoff_v,width=width,**kwargs)
| mit |
agoose77/hivesystem | manual/movingpanda/panda-7.py | 1 | 4435 | import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import math, functools
from panda3d.core import NodePath
import dragonfly.scene.unbound
import dragonfly.std
import dragonfly.io
import dragonfly.canvas
import Spyder
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = Spyder.AxisSystem()
a.rotateZ(360 * random())
a.origin = Spyder.Coordinate(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "AxisSystem")
def id_generator():
n = 0
while 1:
yield "spawnedpanda" + str(n)
from dragonfly.canvas import box2d, canvasargs
from bee.drone import dummydrone
from libcontext.pluginclasses import plugin_single_required
class parameters: pass
class myscene(bee.frame):
pandaclassname_ = bee.get_parameter("pandaclassname")
pandaname_ = bee.get_parameter("pandaname")
pandaicon_ = bee.get_parameter("pandaicon")
c1 = bee.configure("scene")
c1.import_mesh_EGG("models/environment")
a = Spyder.AxisSystem()
a *= 0.25
a.origin += (-8, 42, 0)
c1.add_model_SPYDER(axissystem=a)
c2 = bee.configure("scene")
c2.import_mesh_EGG("models/panda-model")
a = Spyder.AxisSystem()
a *= 0.005
c2.add_actor_SPYDER(axissystem=a, entityname=pandaname_)
c2.import_mesh_EGG("models/panda-walk4")
c2.add_animation("walk")
c3 = bee.configure("scene")
c3.import_mesh_EGG("models/panda-model")
a = Spyder.AxisSystem()
a *= 0.005
c3.add_actorclass_SPYDER(axissystem=a, actorclassname=pandaclassname_)
c3.import_mesh_EGG("models/panda-walk4")
c3.add_animation("walk")
box = box2d(50, 470, 96, 96)
params = parameters()
params.transparency = True
args = canvasargs("pandaicon.png", pandaicon_, box, params)
plugin = plugin_single_required(args)
pattern = ("canvas", "draw", "init", ("object", "image"))
d1 = dummydrone(plugindict={pattern: plugin})
i1 = bee.init("mousearea")
i1.register(pandaicon_, box)
del a, box, params, args, plugin, pattern
class myhive(dragonfly.pandahive.pandahive):
pandaname = "mypanda"
pandaname_ = bee.attribute("pandaname")
pandaclassname = "pandaclass"
pandaclassname_ = bee.attribute("pandaclassname")
pandaicon = "pandaicon"
pandaicon_ = bee.attribute("pandaicon")
canvas = dragonfly.pandahive.pandacanvas()
mousearea = dragonfly.canvas.mousearea()
raiser = bee.raiser()
connect("evexc", raiser)
animation = dragonfly.scene.unbound.animation()
pandaid = dragonfly.std.variable("id")(pandaname_)
walk = dragonfly.std.variable("str")("walk")
connect(pandaid, animation.actor)
connect(walk, animation.animation_name)
key_w = dragonfly.io.keyboardsensor_trigger("W")
connect(key_w, animation.loop)
key_s = dragonfly.io.keyboardsensor_trigger("S")
connect(key_s, animation.stop)
pandaspawn = dragonfly.scene.spawn_actor()
v_panda = dragonfly.std.variable("id")(pandaclassname_)
connect(v_panda, pandaspawn)
panda_id = dragonfly.std.generator("id", id_generator)()
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
key_z = dragonfly.io.keyboardsensor_trigger("Z")
connect(key_z, do_spawn)
pandaicon_click = dragonfly.io.mouseareasensor(pandaicon_)
connect(pandaicon_click, do_spawn)
myscene = myscene(
scene="scene",
pandaname=pandaname_,
pandaclassname=pandaclassname_,
canvas=canvas,
mousearea=mousearea,
pandaicon=pandaicon_
)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
from direct.task import Task
def spinCameraTask(camera, task):
angleDegrees = task.time * 30.0
angleRadians = angleDegrees * (math.pi / 180.0)
camera.setPos(20 * math.sin(angleRadians), -20.0 * math.cos(angleRadians), 3)
camera.setHpr(angleDegrees, 0, 0)
return Task.cont
main.window.taskMgr.add(functools.partial(spinCameraTask, main.window.camera), "SpinCameraTask")
main.run()
| bsd-2-clause |
open2c/bioframe | bioframe/io/fileops.py | 1 | 21340 | from collections import OrderedDict
from contextlib import closing
import tempfile
import json
import io
import numpy as np
import pandas as pd
try:
import bbi
except ImportError:
bbi = None
try:
import pyBigWig
except ImportError:
pyBigWig = None
from ..core.stringops import parse_region
from ..core.arrops import argnatsort
from .schemas import SCHEMAS, BAM_FIELDS, GAP_FIELDS, UCSC_MRNA_FIELDS
__all__ = [
"read_table",
"read_chromsizes",
"read_tabix",
"read_pairix",
"read_bam",
"load_fasta",
"read_bigwig",
"to_bigwig",
"read_bigbed",
"to_bigbed",
"read_parquet",
"to_parquet",
]
def read_table(filepath_or, schema=None, **kwargs):
"""
Read a tab-delimited file into a data frame.
Equivalent to :func:`pandas.read_table` but supports an additional
`schema` argument to populate column names for common genomic formats.
"""
kwargs.setdefault("sep", "\t")
kwargs.setdefault("header", None)
if isinstance(filepath_or, str) and filepath_or.endswith(".gz"):
kwargs.setdefault("compression", "gzip")
if schema is not None:
try:
kwargs.setdefault("names", SCHEMAS[schema])
except (KeyError, TypeError):
if isinstance(schema, str):
raise ValueError("TSV schema not found: '{}'".format(schema))
kwargs.setdefault("names", schema)
return pd.read_csv(filepath_or, **kwargs)
def parse_gtf_attributes(attrs, kv_sep="=", item_sep=";", quotechar='"', **kwargs):
item_lists = attrs.str.split(item_sep)
item_lists = item_lists.apply(
lambda items: [item.strip().split(kv_sep) for item in items]
)
stripchars = quotechar + " "
item_lists = item_lists.apply(
lambda items: [
map(lambda x: x.strip(stripchars), item) for item in items if len(item) == 2
]
)
kv_records = item_lists.apply(dict)
return pd.DataFrame.from_records(kv_records, **kwargs)
def read_chromsizes(
filepath_or,
filter_chroms=True,
chrom_patterns=(r"^chr[0-9]+$", r"^chr[XY]$", r"^chrM$"),
natsort=True,
as_bed=False,
**kwargs
):
"""
Parse a ``<db>.chrom.sizes`` or ``<db>.chromInfo.txt`` file from the UCSC
database, where ``db`` is a genome assembly name.
Parameters
----------
filepath_or : str or file-like
Path or url to text file, or buffer.
filter_chroms : bool, optional
Filter for chromosome names given in ``chrom_patterns``.
chrom_patterns : sequence, optional
Sequence of regular expressions to capture desired sequence names.
natsort : bool, optional
Sort each captured group of names in natural order. Default is True.
as_bed : bool, optional
If True, return chromsizes as an interval dataframe (chrom, start, end).
**kwargs :
Passed to :func:`pandas.read_csv`
Returns
-------
Series of integer bp lengths indexed by sequence name or an interval dataframe.
Notes
-----
Mention name patterns
See also
--------
* UCSC assembly terminology: <http://genome.ucsc.edu/FAQ/FAQdownloads.html#download9>
* NCBI assembly terminology: <https://www.ncbi.nlm.nih.gov/grc/help/definitions>
"""
if isinstance(filepath_or, str) and filepath_or.endswith(".gz"):
kwargs.setdefault("compression", "gzip")
chromtable = pd.read_csv(
filepath_or,
sep="\t",
usecols=[0, 1],
names=["name", "length"],
dtype={"name": str},
**kwargs
)
if filter_chroms:
parts = []
for pattern in chrom_patterns:
if not len(pattern):
continue
part = chromtable[chromtable["name"].str.contains(pattern)]
if natsort:
part = part.iloc[argnatsort(part["name"])]
parts.append(part)
chromtable = pd.concat(parts, axis=0)
if as_bed:
chromtable["start"] = 0
chromtable = (
chromtable[["name", "start", "length"]]
.rename({"name": "chrom", "length": "end"}, axis="columns")
.reset_index(drop=True)
)
else:
chromtable.index = chromtable["name"].values
chromtable = chromtable["length"]
return chromtable
def read_gapfile(filepath_or_fp, chroms=None, **kwargs):
gap = pd.read_csv(
filepath_or_fp,
sep="\t",
names=GAP_FIELDS,
usecols=["chrom", "start", "end", "length", "type", "bridge"],
**kwargs
)
if chroms is not None:
gap = gap[gap.chrom.isin(chroms)]
return gap
def read_ucsc_mrnafile(filepath_or_fp, chroms=None, **kwargs):
mrna = pd.read_csv(
filepath_or_fp,
sep="\t",
names=UCSC_MRNA_FIELDS,
# usecols=['chrom', 'start', 'end', 'length', 'type', 'bridge'],
**kwargs
)
if chroms is not None:
mrna = mrna[mrna.chrom.isin(chroms)]
return mrna
def read_tabix(fp, chrom=None, start=None, end=None):
import pysam
with closing(pysam.TabixFile(fp)) as f:
names = list(f.header) or None
df = pd.read_csv(
io.StringIO("\n".join(f.fetch(chrom, start, end))),
sep="\t",
header=None,
names=names,
)
return df
def read_pairix(
fp,
region1,
region2=None,
chromsizes=None,
columns=None,
usecols=None,
dtypes=None,
**kwargs
):
import pypairix
import cytoolz as toolz
if dtypes is None:
dtypes = {}
f = pypairix.open(fp, "r")
header = f.get_header()
if len(header):
header_groups = toolz.groupby(lambda x: x.split(":")[0], header)
if "#chromsize" in header_groups and chromsizes is None:
items = [line.split()[1:] for line in header_groups["#chromsize"]]
if len(items) and chromsizes is None:
names, lengths = zip(*((item[0], int(item[1])) for item in items))
chromsizes = pd.Series(index=names, data=lengths)
if "#columns" in header_groups and columns is None:
columns = header_groups["#columns"][0].split()[1:]
chrom1, start1, end1 = parse_region(region1, chromsizes)
if region2 is not None:
chrom2, start2, end2 = parse_region(region2, chromsizes)
else:
chrom2, start2, end2 = chrom1, start1, end1
it = f.query2D(chrom1, start1, end1, chrom2, start2, end2)
if usecols is not None:
argusecols = [columns.index(col) for col in usecols]
records = [(record[i] for i in argusecols) for record in it]
columns = usecols
else:
records = it
df = pd.DataFrame.from_records(records, columns=columns)
if columns is not None:
for col in columns:
if col in dtypes:
df[col] = df[col].astype(dtypes[col])
else:
df[col] = pd.to_numeric(df[col], "ignore")
return df
def read_bam(fp, chrom=None, start=None, end=None):
import pysam
with closing(pysam.AlignmentFile(fp, "rb")) as f:
bam_iter = f.fetch(chrom, start, end)
records = [
(
s.qname,
s.flag,
s.rname,
s.pos,
s.mapq,
s.cigarstring if s.mapq != 0 else np.nan,
s.rnext,
s.pnext,
s.tlen,
s.seq,
s.qual,
json.dumps(OrderedDict(s.tags)),
)
for s in bam_iter
]
df = pd.DataFrame(records, columns=BAM_FIELDS)
return df
def extract_centromeres(df, schema=None, merge=True):
if schema == "centromeres":
cens = df
elif schema == "cytoband":
cens = df[df["gieStain"] == "acen"]
elif schema == "gap":
cens = df[df["type"] == "centromere"]
else:
raise ValueError('`schema` must be one of {"centromeres", "cytoband", "gap"}.')
if merge:
cens = cens.groupby("chrom").agg({"start": np.min, "end": np.max}).reset_index()
cens["mid"] = (cens["start"] + cens["end"]) // 2
cens = (
cens[["chrom", "start", "end", "mid"]]
.sort_values("chrom")
.reset_index(drop=True)
)
return cens
class PysamFastaRecord(object):
def __init__(self, ff, ref):
self.ff = ff
if ref not in ff.references:
raise KeyError("Reference name '{}' not found in '{}'".format(ref, ff))
self.ref = ref
def __getitem__(self, key):
if isinstance(key, slice):
start, stop = key.start, key.stop
else:
start = key
stop = key + 1
return self.ff.fetch(self.ref, start, stop)
def load_fasta(filepath_or, engine="pysam", **kwargs):
"""
Load lazy fasta sequences from an indexed fasta file (optionally compressed)
or from a collection of uncompressed fasta files.
Parameters
----------
filepath_or : str or iterable
If a string, a filepath to a single `.fa` or `.fa.gz` file. Assumed to
be accompanied by a `.fai` index file. Depending on the engine, the
index may be created on the fly, and some compression formats may not
be supported. If not a string, an iterable of fasta file paths each
assumed to contain a single sequence.
engine : {'pysam', 'pyfaidx'}, optional
Module to use for loading sequences.
kwargs : optional
Options to pass to ``pysam.FastaFile`` or ``pyfaidx.Fasta``.
Returns
-------
OrderedDict of (lazy) fasta records.
Notes
-----
* pysam/samtools can read .fai and .gzi indexed files, I think.
* pyfaidx can handle uncompressed and bgzf compressed files.
"""
is_multifile = not isinstance(filepath_or, str)
records = OrderedDict()
engine = engine.lower()
if engine == "pysam":
try:
import pysam
except ImportError:
raise ImportError("pysam is required to use engine='pysam'")
if is_multifile:
for onefile in filepath_or:
ff = pysam.FastaFile(onefile, **kwargs)
name = ff.references[0]
records[name] = PysamFastaRecord(ff, name)
else:
ff = pysam.FastaFile(filepath_or, **kwargs)
for name in ff.references:
records[name] = PysamFastaRecord(ff, name)
elif engine == "pyfaidx":
try:
import pyfaidx
except ImportError:
raise ImportError("pyfaidx is required to use engine='pyfaidx'")
if is_multifile:
for onefile in filepath_or:
ff = pyfaidx.Fasta(onefile, **kwargs)
name = next(iter(ff.keys()))
records[name] = ff[name]
else:
ff = pyfaidx.Fasta(filepath_or, **kwargs)
for name in ff.keys():
records[name] = ff[name]
else:
raise ValueError("engine must be 'pysam' or 'pyfaidx'")
return records
def read_bigwig(path, chrom, start=None, end=None, engine="auto"):
"""
Read intervals from a bigWig file.
Parameters
----------
path : str
Path or URL to a bigWig file
chrom : str
start, end : int, optional
Start and end coordinates. Defaults to 0 and chromosome length.
engine : {"auto", "pybbi", "pybigwig"}
Library to use for querying the bigWig file.
Returns
-------
DataFrame
"""
engine = engine.lower()
if engine == "auto":
if bbi is None and pyBigWig is None:
raise ImportError(
"read_bigwig requires either the pybbi or pyBigWig package"
)
elif bbi is not None:
engine = "pybbi"
else:
engine = "pybigwig"
if engine in ("pybbi", "bbi"):
if start is None:
start = 0
if end is None:
end = -1
with bbi.open(path) as f:
df = f.fetch_intervals(chrom, start=start, end=end)
elif engine == "pybigwig":
f = pyBigWig.open(path)
if start is None:
start = 0
if end is None:
end = f.chroms()[chrom]
ivals = f.intervals(chrom, start, end)
df = pd.DataFrame(ivals, columns=["start", "end", "value"])
df.insert(0, "chrom", chrom)
else:
raise ValueError(
"engine must be 'auto', 'pybbi' or 'pybigwig'; got {}".format(engine)
)
return df
def read_bigbed(path, chrom, start=None, end=None, engine="auto"):
"""
Read intervals from a bigBed file.
Parameters
----------
path : str
Path or URL to a bigBed file
chrom : str
start, end : int, optional
Start and end coordinates. Defaults to 0 and chromosome length.
engine : {"auto", "pybbi", "pybigwig"}
Library to use for querying the bigBed file.
Returns
-------
DataFrame
"""
engine = engine.lower()
if engine == "auto":
if bbi is None and pyBigWig is None:
raise ImportError(
"read_bigbed requires either the pybbi or pyBigWig package"
)
elif bbi is not None:
engine = "pybbi"
else:
engine = "pybigwig"
if engine in ("pybbi", "bbi"):
if start is None:
start = 0
if end is None:
end = -1
with bbi.open(path) as f:
df = f.fetch_intervals(chrom, start=start, end=end)
elif engine == "pybigwig":
f = pyBigWig.open(path)
if start is None:
start = 0
if end is None:
end = f.chroms()[chrom]
ivals = f.entries(chrom, start, end)
df = pd.DataFrame(ivals, columns=["start", "end", "rest"])
df.insert(0, "chrom", chrom)
else:
raise ValueError(
"engine must be 'auto', 'pybbi' or 'pybigwig'; got {}".format(engine)
)
return df
def to_bigwig(df, chromsizes, outpath, value_field=None):
"""
Save a bedGraph-like dataframe as a binary BigWig track.
Parameters
----------
df : pandas.DataFrame
Data frame with columns 'chrom', 'start', 'end' and one or more value
columns
chromsizes : pandas.Series
Series indexed by chromosome name mapping to their lengths in bp
outpath : str
The output BigWig file path
value_field : str, optional
Select the column label of the data frame to generate the track. Default
is to use the fourth column.
"""
is_bedgraph = True
for col in ["chrom", "start", "end"]:
if col not in df.columns:
is_bedgraph = False
if len(df.columns) < 4:
is_bedgraph = False
if not is_bedgraph:
raise ValueError(
"A bedGraph-like DataFrame is required, got {}".format(df.columns)
)
if value_field is None:
value_field = df.columns[3]
columns = ["chrom", "start", "end", value_field]
bg = df[columns].copy()
bg["chrom"] = bg["chrom"].astype(str)
bg = bg.sort_values(["chrom", "start", "end"])
with tempfile.NamedTemporaryFile(suffix=".bg") as f, tempfile.NamedTemporaryFile(
"wt", suffix=".chrom.sizes"
) as cs:
chromsizes.to_csv(cs, sep="\t", header=False)
cs.flush()
bg.to_csv(
f.name, sep="\t", columns=columns, index=False, header=False, na_rep="nan"
)
p = subprocess.run(
["bedGraphToBigWig", f.name, cs.name, outpath],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return p
def to_bigbed(df, chromsizes, outpath, schema="bed6"):
"""
Save a bedGraph-like dataframe as a binary BigWig track.
Parameters
----------
df : pandas.DataFrame
Data frame with columns 'chrom', 'start', 'end' and one or more value
columns
chromsizes : pandas.Series
Series indexed by chromosome name mapping to their lengths in bp
outpath : str
The output BigWig file path
value_field : str, optional
Select the column label of the data frame to generate the track. Default
is to use the fourth column.
"""
import tempfile
import subprocess
is_bed6 = True
for col in ["chrom", "start", "end", "name", "score", "strand"]:
if col not in df.columns:
is_bed6 = False
if len(df.columns) < 6:
is_bed6 = False
if not is_bed6:
raise ValueError("A bed6-like DataFrame is required, got {}".format(df.columns))
columns = ["chrom", "start", "end", "name", "score", "strand"]
bed = df[columns].copy()
bed["chrom"] = bed["chrom"].astype(str)
bed = bed.sort_values(["chrom", "start", "end"])
with tempfile.NamedTemporaryFile(suffix=".bed") as f, tempfile.NamedTemporaryFile(
"wt", suffix=".chrom.sizes"
) as cs:
chromsizes.to_csv(cs, sep="\t", header=False)
cs.flush()
bed.to_csv(
f.name, sep="\t", columns=columns, index=False, header=False, na_rep="nan"
)
p = subprocess.run(
["bedToBigBed", "-type={}".format(schema), f.name, cs.name, outpath],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return p
def to_parquet(
pieces,
outpath,
row_group_size=None,
compression="snappy",
use_dictionary=True,
version=2.0,
**kwargs
):
"""
Save an iterable of dataframe chunks to a single Apache Parquet file. For
more info about Parquet, see https://arrow.apache.org/docs/python/parquet.html.
Parameters
----------
pieces : DataFrame or iterable of DataFrame
Chunks to write
outpath : str
Path to output file
row_group_size : int
Number of rows per row group
compression : {'snappy', 'gzip', 'brotli', 'none'}, optional
Compression algorithm. Can be set on a per-column basis with a
dictionary of column names to compression lib.
use_dictionary : bool, optional
Use dictionary encoding. Can be set on a per-column basis with a list
of column names.
See also
--------
pyarrow.parquet.write_table
pyarrow.parquet.ParquetFile
fastparquet
"""
try:
import pyarrow.parquet
import pyarrow as pa
except ImportError:
raise ImportError("Saving to parquet requires the `pyarrow` package")
if isinstance(pieces, pd.DataFrame):
pieces = (pieces,)
try:
for i, piece in enumerate(pieces):
table = pa.Table.from_pandas(piece, preserve_index=False)
if i == 0:
writer = pa.parquet.ParquetWriter(
outpath,
table.schema,
compression=compression,
use_dictionary=use_dictionary,
version=version,
**kwargs
)
writer.write_table(table, row_group_size=row_group_size)
finally:
writer.close()
def read_parquet(filepath, columns=None, iterator=False, **kwargs):
"""
Load DataFrames from Parquet files, optionally in pieces.
Parameters
----------
filepath : str, pathlib.Path, pyarrow.NativeFile, or file-like object
Readable source. For passing bytes or buffer-like file containing a
Parquet file, use pyarorw.BufferReader
columns: list
If not None, only these columns will be read from the row groups. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'
iterator : boolean, default False
Return an iterator object that yields row group DataFrames and
provides the ParquetFile interface.
use_threads : boolean, default True
Perform multi-threaded column reads
memory_map : boolean, default True
If the source is a file path, use a memory map to read file, which can
improve performance in some environments
Returns
-------
DataFrame or ParquetFileIterator
"""
use_threads = kwargs.pop("use_threads", True)
if not iterator:
return pd.read_parquet(
filepath, columns=columns, use_threads=use_threads, **kwargs
)
else:
try:
from pyarrow.parquet import ParquetFile
except ImportError:
raise ImportError(
"Iterating over Parquet data requires the `pyarrow` package."
)
class ParquetFileIterator(ParquetFile):
def __iter__(self):
return self
def __next__(self):
if not hasattr(self, "_rgid"):
self._rgid = 0
if self._rgid < self.num_row_groups:
rg = self.read_row_group(
self._rgid,
columns=columns,
use_threads=use_threads,
use_pandas_metadata=True,
)
self._rgid += 1
else:
raise StopIteration
return rg.to_pandas()
return ParquetFileIterator(filepath, **kwargs)
| mit |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
clemkoa/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 39 | 7489 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.), edgecolor='k')
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
teoliphant/numpy-refactor | numpy/lib/twodim_base.py | 5 | 22944 | """ Basic functions for manipulating 2d arrays
"""
__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu',
'tril','vander','histogram2d','mask_indices',
'tril_indices','tril_indices_from','triu_indices','triu_indices_from',
]
from numpy.core.numeric import asanyarray, equal, subtract, arange, \
zeros, greater_equal, multiply, ones, asarray, alltrue, where, \
empty
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Does not require the array to be
two-dimensional.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError, "Input must be >= 2-d."
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError, "Input must be >= 1-d."
return m[::-1,...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError, "Input must >= 2-d."
k = k % 4
if k == 0: return m
elif k == 1: return fliplr(m).swapaxes(0,1)
elif k == 2: return fliplr(flipud(m))
else: return fliplr(m.swapaxes(0,1)) # k==3
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal, a positive value
refers to an upper diagonal, and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned array.
Returns
-------
I : ndarray (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
diag : Return a diagonal 2-D array using a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triange of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n,n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
if k >= s[1]:
return empty(0, dtype=v.dtype)
if v.flags.f_contiguous:
# faster slicing
v, k, s = v.T, -k, s[::-1]
if k >= 0:
i = k
else:
i = (-k) * s[1]
return v[:s[1]-k].flat[i::s[1]+1]
else:
raise ValueError, "Input must be 1- or 2-d."
def diagflat(v,k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set. The default is 0.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : Matlab workalike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n,n), v.dtype)
if (k>=0):
i = arange(0,n-k)
fi = i+k+i*n
else:
i = arange(0,n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
Construct an array filled with ones at and below the given diagonal.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
T : (N,M) ndarray
Array with a lower triangle filled with ones, in other words
``T[i,j] == 1`` for ``i <= j + k``.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None: M = N
m = greater_equal(subtract.outer(arange(N), arange(M)),-k)
return m.astype(dtype)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int
Diagonal above which to zero elements.
`k = 0` is the main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
L : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=int),m)
return out
def triu(m, k=0):
"""
Upper triangle of an array.
Construct a copy of a matrix with elements below the k-th diagonal zeroed.
Please refer to the documentation for `tril`.
See Also
--------
tril
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
out = multiply((1-tri(m.shape[0], m.shape[1], k-1, int)),m)
return out
# borrowed from John Hunter and matplotlib
def vander(x, N=None):
"""
Generate a Van der Monde matrix.
The columns of the output matrix are decreasing powers of the input
vector. Specifically, the i-th output column is the input vector to
the power of ``N - i - 1``. Such a matrix with a geometric progression
in each row is named Van Der Monde, or Vandermonde matrix, from
Alexandre-Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Order of (number of columns in) the output. If `N` is not specified,
a square array is returned (``N = len(x)``).
Returns
-------
out : ndarray
Van der Monde matrix of order `N`. The first column is ``x^(N-1)``,
the second ``x^(N-2)`` and so forth.
References
----------
.. [1] Wikipedia, "Vandermonde matrix",
http://en.wikipedia.org/wiki/Vandermonde_matrix
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if N is None: N=len(x)
X = ones( (len(x),N), x.dtype)
for i in range(N-1):
X[:,i] = x**(N-i-1)
return X
def histogram2d(x,y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape(N,)
A sequence of values to be histogrammed along the first dimension.
y : array_like, shape(M,)
A sequence of values to be histogrammed along the second dimension.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension (nx, ny = bins).
* If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension (x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, i.e. the bin count divided by the bin area.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights
are normalized to 1 if `normed` is True. If `normed` is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram: 1D histogram
histogramdd: Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample density,
defined such that:
.. math::
\\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1
where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i`
the area of bin `{i,j}`.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abcissa and `y` values on the ordinate axis.
Rather, `x` is histogrammed along the first dimension of the array
(vertical), and `y` along the second dimension of the array (horizontal).
This ensures compatibility with `histogramdd`.
Examples
--------
>>> x, y = np.random.randn(2, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8))
>>> H.shape, xedges.shape, yedges.shape
((5, 8), (6,), (9,))
We can now use the Matplotlib to visualize this 2-dimensional histogram:
>>> extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
>>> import matplotlib.pyplot as plt
>>> plt.imshow(H, extent=extent)
<matplotlib.image.AxesImage object at ...>
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x,y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n,mask_func,k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n,n),int)
a = mask_func(m,k)
return where(a != 0)
def tril_indices(n,k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return mask_indices(n,tril,k)
def tril_indices_from(arr,k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
See `tril_indices` for full details.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim==2 and arr.shape[0] == arr.shape[1]:
raise ValueError("input array must be 2-d and square")
return tril_indices(arr.shape[0],k)
def triu_indices(n,k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return mask_indices(n,triu,k)
def triu_indices_from(arr,k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
See `triu_indices` for full details.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `triu` for details).
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim==2 and arr.shape[0] == arr.shape[1]:
raise ValueError("input array must be 2-d and square")
return triu_indices(arr.shape[0],k)
| bsd-3-clause |
boddmg/dsp-playground | experiment.py | 1 | 2717 | from matplotlib import pyplot as plt
import numpy as np
import math
import pickle
from scipy import signal
from numpy.fft import rfft, irfft
from numpy import argmax, sqrt, mean, absolute, arange, log10
from scipy.signal import blackmanharris
import thdn
def single_frequency_filter(input_signal):
y_f_all = np.fft.fft(input_signal)
y_f_all[:1] = np.array([0] *1)
y_f_half = y_f_all[:len(y_f_all) / 2]
y_f_abs = np.abs(y_f_half)
y_f_max = max(y_f_abs)
y_f_max_index = np.where(y_f_abs == y_f_max)[0][0]
print(y_f_max_index)
y_f_all[:y_f_max_index] = [0] * (y_f_max_index)
y_f_all[y_f_max_index+1:] = [0] * (len(y_f_all)-y_f_max_index-1)
y_filtered = np.fft.ifft(y_f_all)
return y_filtered
FS = 204.8
BASE_FREQUENCY = 50.0
FILTER_SAMPLE_PURE = int(2*1/BASE_FREQUENCY * FS) # 2T
FILTER_SAMPLE_ALL = 2048
DF = FS/FILTER_SAMPLE_ALL
print(DF)
filter_buffer = []
def single_filter(x):
return x
def main():
import json
# get data
data = json.load(open("data.txt", "r"))
# y = np.concatenate((np.load("data.pkl")[:256], np.array([0] * (600 - 256))))
# y = np.concatenate((np.load("data.pkl")[:], [0]*6000))
y = np.array(data["Y"])
# y = signal.resample(y, 5000)
# fs = 5000.0 * (10000/200)
fs = 1 / data["dt"]
print("fs:\t", fs)
time = len(y)/fs # in seconds
x = np.arange(0, time, 1/fs)
# x = x[:-1]
# for i in meta_data:
# print(i, meta_data[i])
print("time",time)
end = 40
f = x[:end]
f = f * fs / time
# Add the noise
# y = y.clip(-10, 7)
# y += (np.sin(x * 5) * 2).clip (-0.3, 0.8)
# y += np.random.uniform(size=len(y))
plt.subplot(231)
plt.plot(x, y, 'r')
plt.subplot(232)
y_filtered = y.tolist()
y_list = y.tolist()
for i in range(len(y_list)):
y_filtered[i] = single_filter(y_list[i])
y_filtered = np.array(y_filtered)
# y_filtered = single_frequency_filter(y)*10
# filter_function = np.array(([0] * 6 + [1] + [0] * (600 - 7)))
# filter_function = np.fft.ifft(filter_function)
# y_filtered = np.fft.ifft(y_f * filter_function)
# y_filtered = np.convolve(y_filtered, filter_function, mode='same')
# y_filtered = np.sin(x*np.pi*2* )*10
plt.plot(x, y_filtered, "b")
plt.subplot(233)
plt.plot(x[:end], y[:end], "r", x[:end], y_filtered[:end], "b")
plt.subplot(234)
y = np.abs(np.fft.fft(y))
y = y[:end]
plt.plot(f, y)
plt.subplot(235)
y_filtered = np.abs(np.fft.fft(y_filtered))
y_filtered = y_filtered[:end]
plt.plot(f, y_filtered)
plt.subplot(236)
plt.plot(f, y, 'r', f, y_filtered, 'b')
plt.show()
if __name__ == '__main__':
main()
| mit |
pratapvardhan/pandas | pandas/tests/test_base.py | 2 | 46174 | # -*- coding: utf-8 -*-
from __future__ import print_function
import re
import sys
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.compat as compat
from pandas.core.dtypes.common import (
is_object_dtype, is_datetimetz, is_datetime64_dtype,
needs_i8_conversion)
import pandas.util.testing as tm
from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex,
PeriodIndex, Timedelta, IntervalIndex, Interval,
CategoricalIndex, Timestamp)
from pandas.compat import StringIO, PYPY, long
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.accessor import PandasDelegate
from pandas.core.base import PandasObject, NoNewAttributesMixin
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas._libs.tslib import iNaT
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container) # noqa
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
pytest.skip('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container) # noqa
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to assert_raises_regex
# (after the Exception kind).
tm.assert_raises_regex(
TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate(object):
class Delegator(object):
_properties = ['foo']
_methods = ['bar']
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ='property'
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._methods,
typ='method'
)
delegate = self.Delegate(self.Delegator())
def f():
delegate.foo
pytest.raises(TypeError, f)
def f():
delegate.foo = 5
pytest.raises(TypeError, f)
def f():
delegate.foo()
pytest.raises(TypeError, f)
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops(object):
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and
(obj.is_boolean() or not obj._can_hold_na)):
# don't test boolean / int64 index
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name='a')
self.int_index = tm.makeIntIndex(10, name='a')
self.float_index = tm.makeFloatIndex(10, name='a')
self.dt_index = tm.makeDateIndex(10, name='a')
self.dt_tz_index = tm.makeDateIndex(10, name='a').tz_localize(
tz='US/Eastern')
self.period_index = tm.makePeriodIndex(10, name='a')
self.string_index = tm.makeStringIndex(10, name='a')
self.unicode_index = tm.makeUnicodeIndex(10, name='a')
arr = np.random.randn(10)
self.int_series = Series(arr, index=self.int_index, name='a')
self.float_series = Series(arr, index=self.float_index, name='a')
self.dt_series = Series(arr, index=self.dt_index, name='a')
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name='a')
self.string_series = Series(arr, index=self.string_index, name='a')
types = ['bool', 'int', 'float', 'dt', 'dt_tz', 'period', 'string',
'unicode']
fmts = ["{0}_{1}".format(t, f)
for t in types for f in ['index', 'series']]
self.objs = [getattr(self, f)
for f in fmts if getattr(self, f, None) is not None]
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(
getattr(o.index, op), index=o.index, name='a')
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these couuld be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(expected,
np.ndarray):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
pytest.raises(TypeError, lambda: getattr(o, op))
else:
pytest.raises(AttributeError,
lambda: getattr(o, op))
def test_binary_ops_docs(self):
from pandas import DataFrame, Panel
op_map = {'add': '+',
'sub': '-',
'mul': '*',
'mod': '%',
'pow': '**',
'truediv': '/',
'floordiv': '//'}
for op_name in ['add', 'sub', 'mul', 'mod', 'pow', 'truediv',
'floordiv']:
for klass in [Series, DataFrame, Panel]:
operand1 = klass.__name__.lower()
operand2 = 'other'
op = op_map[op_name]
expected_str = ' '.join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = ' '.join([operand2, op, operand1])
assert expected_str in getattr(klass, 'r' + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super(TestIndexOps, self).setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
# this fails for numpy < 1.9
# and oddly for *some* platforms
# result = None != o # noqa
# assert result.iat[0]
# assert result.iat[1]
if (is_datetime64_dtype(o) or is_datetimetz(o)):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ['shape', 'dtype', 'T', 'nbytes']:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ['flags', 'strides', 'itemsize']:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, 'base')
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_ops(self):
for op in ['max', 'min']:
for o in self.objs:
result = getattr(o, op)()
if not isinstance(o, PeriodIndex):
expected = getattr(o.values, op)()
else:
expected = pd.Period(
ordinal=getattr(o._ndarray_values, op)(),
freq=o.freq)
try:
assert result == expected
except TypeError:
# comparing tz-aware series with np.array results in
# TypeError
expected = expected.astype('M8[ns]').astype('int64')
assert result.value == expected
def test_nanops(self):
# GH 7261
for op in ['max', 'min']:
for klass in [Index, Series]:
obj = klass([np.nan, 2.0])
assert getattr(obj, op)() == 2.0
obj = klass([np.nan])
assert pd.isna(getattr(obj, op)())
obj = klass([])
assert pd.isna(getattr(obj, op)())
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
assert getattr(obj, op)() == datetime(2011, 11, 1)
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
assert getattr(obj, op)(), datetime(2011, 11, 1)
# argmin/max
obj = Index(np.arange(5, dtype='int64'))
assert obj.argmin() == 0
assert obj.argmax() == 4
obj = Index([np.nan, 1, np.nan, 2])
assert obj.argmin() == 1
assert obj.argmax() == 3
obj = Index([np.nan])
assert obj.argmin() == -1
assert obj.argmax() == -1
obj = Index([pd.NaT, datetime(2011, 11, 1), datetime(2011, 11, 2),
pd.NaT])
assert obj.argmin() == 1
assert obj.argmax() == 2
obj = Index([pd.NaT])
assert obj.argmin() == -1
assert obj.argmax() == -1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = 'a'
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
rep = np.repeat(values, range(1, len(o) + 1))
o = klass(rep, index=idx, name='a')
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(range(10, 0, -1), index=expected_index,
dtype='int64', name='a')
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == 'a'
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
elif is_datetimetz(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(result,
orig._values.astype(object).values)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert o.nunique() == len(np.unique(o.values))
def test_value_counts_unique_nunique_null(self):
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetimetz(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = iNaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = 'a'
else:
if is_datetimetz(o):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = 'a'
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name='a')
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype='int64', name='a')
expected_s = Series(list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype='int64', name='a')
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == 'a'
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == 'a'
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result,
Index(values[1:], name='a'))
elif is_datetimetz(o):
# unable to compare NaT / nan
vals = values[2:].astype(object).values
tm.assert_numpy_array_equal(result[1:], vals)
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
def test_value_counts_inferred(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list('acbd')).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list('cdab'))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(hist, expected)
def test_value_counts_bins(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
# bins
pytest.raises(TypeError, lambda bins: s.value_counts(bins=bins), 1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0],
index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ['a', 'b', 'b', 'b', np.nan, np.nan,
'd', 'd', 'a', 'a', 'b']
s = klass(s_values)
expected = Series([4, 3, 2], index=['b', 'a', 'd'])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(['a', 'b', np.nan, 'd'])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(['a', 'b', np.nan, 'd'], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected,
check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]),
check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize('klass', [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM',
'xxyyzz20100101EGG', 'xxyyww20090101EGG',
'foofoo20080909PIE', 'foofoo20080909GUM'])
f = StringIO(txt)
df = pd.read_fwf(f, widths=[6, 8, 3],
names=["person_id", "dt", "food"],
parse_dates=["dt"])
s = klass(df['dt'].copy())
s.name = None
idx = pd.to_datetime(['2010-01-01 00:00:00Z',
'2008-09-09 00:00:00Z',
'2009-01-01 00:00:00Z'])
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(['2010-01-01 00:00:00Z',
'2009-01-01 00:00:00Z',
'2008-09-09 00:00:00Z'],
dtype='datetime64[ns]')
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df['dt'].copy()
s = klass([v for v in s.values] + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == 'datetime64[ns]'
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == 'datetime64[ns]'
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name='dt')
result = td.value_counts()
expected_s = Series([6], index=[Timedelta('1day')], name='dt')
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(['1 days'], name='dt')
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name='dt')
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
labels, uniques = o.factorize()
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig),
check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques,
check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
dtype=np.intp)
labels, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig).sort_values(),
check_names=False)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4],
np.intp)
labels, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name='a')
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True],
dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep='last')
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with tm.assert_raises_regex(
TypeError, r"drop_duplicates\(\) got an unexpected "
"keyword argument"):
idx.drop_duplicates(inplace=True)
else:
expected = Series([False] * len(original),
index=original.index, name='a')
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name='a')
expected = Series([False] * len(original) + [True, True],
index=idx, name='a')
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name='a')
tm.assert_series_equal(s.duplicated(keep='last'), expected)
tm.assert_series_equal(s.drop_duplicates(keep='last'),
s[~np.array(base)])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name='a')
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(s.drop_duplicates(keep=False),
s[~np.array(base)])
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame({'a': [1, 1, 1, 'one', 'one'],
'b': [2, 2, np.nan, np.nan, np.nan],
'c': [3, 3, np.nan, np.nan, 'three'],
'd': [1, 2, 3, 4, 4],
'e': [datetime(2015, 1, 1), datetime(2015, 1, 1),
datetime(2015, 2, 1), pd.NaT, pd.NaT]
})
for column in df.columns:
for keep in ['first', 'last', False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if (is_object_dtype(o) or (isinstance(o, Series) and
is_object_dtype(o.index))):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert ((o.memory_usage(index=False) +
o.index.memory_usage()) ==
o.memory_usage(index=True))
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_series.drop_duplicates(inplace=value)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
if isinstance(obj, Index):
tm.assert_index_equal(obj.transpose(), obj)
else:
tm.assert_series_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
tm.assert_raises_regex(ValueError, self.errmsg,
obj.transpose, 1)
tm.assert_raises_regex(ValueError, self.errmsg,
obj.transpose, axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
if isinstance(obj, Index):
tm.assert_index_equal(np.transpose(obj), obj)
else:
tm.assert_series_equal(np.transpose(obj), obj)
tm.assert_raises_regex(ValueError, self.errmsg,
np.transpose, obj, axes=1)
class TestNoNewAttributesMixin(object):
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
def f():
t.b = "test"
pytest.raises(AttributeError, f)
assert not hasattr(t, "b")
class TestToIterable(object):
# test that we convert an iterable to python types
dtypes = [
('int8', (int, long)),
('int16', (int, long)),
('int32', (int, long)),
('int64', (int, long)),
('uint8', (int, long)),
('uint16', (int, long)),
('uint32', (int, long)),
('uint64', (int, long)),
('float16', float),
('float32', float),
('float64', float),
('datetime64[ns]', Timestamp),
('datetime64[ns, US/Eastern]', Timestamp),
('timedelta64[ns]', Timedelta)]
@pytest.mark.parametrize(
'dtype, rdtype', dtypes)
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype, obj',
[
('object', object, 'a'),
('object', (int, long), 1),
('category', object, 'a'),
('category', (int, long), 1)])
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable_object_and_category(self, typ, method,
dtype, rdtype, obj):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype', dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test items / iteritems yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.iteritems())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype',
dtypes + [
('object', (int, long)),
('category', (int, long))])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable_map(self, typ, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp('1999-12-31'),
Timestamp('2000-12-31')])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp('2011-01-01'), Timestamp('2011-01-02')]
s = Series(vals)
assert s.dtype == 'datetime64[ns]'
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [Timestamp('2011-01-01', tz='US/Eastern'),
Timestamp('2011-01-02', tz='US/Eastern')]
s = Series(vals)
assert s.dtype == 'datetime64[ns, US/Eastern]'
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta('1 days'), Timedelta('2 days')]
s = Series(vals)
assert s.dtype == 'timedelta64[ns]'
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = Series(vals)
assert s.dtype == 'object'
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == 'M'
assert res == exp
@pytest.mark.parametrize('array, expected_type, dtype', [
(np.array([0, 1], dtype=np.int64), np.ndarray, 'int64'),
(np.array(['a', 'b']), np.ndarray, 'object'),
(pd.Categorical(['a', 'b']), pd.Categorical, 'category'),
(pd.DatetimeIndex(['2017', '2018']), np.ndarray, 'datetime64[ns]'),
(pd.DatetimeIndex(['2017', '2018'], tz="US/Central"), pd.DatetimeIndex,
'datetime64[ns, US/Central]'),
(pd.TimedeltaIndex([10**10]), np.ndarray, 'm8[ns]'),
(pd.PeriodIndex([2018, 2019], freq='A'), np.ndarray, 'object'),
(pd.IntervalIndex.from_breaks([0, 1, 2]), np.ndarray, 'object'),
])
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
if isinstance(l_values, np.ndarray):
tm.assert_numpy_array_equal(l_values, r_values)
elif isinstance(l_values, pd.Index):
tm.assert_index_equal(l_values, r_values)
elif pd.api.types.is_categorical(l_values):
tm.assert_categorical_equal(l_values, r_values)
else:
raise TypeError("Unexpected type {}".format(type(l_values)))
assert l_values.dtype == dtype
assert r_values.dtype == dtype
@pytest.mark.parametrize('array, expected', [
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(['0', '1']), np.array(['0', '1'], dtype=object)),
(pd.Categorical(['a', 'a']), np.array([0, 0], dtype='int8')),
(pd.DatetimeIndex(['2017-01-01T00:00:00']),
np.array(['2017-01-01T00:00:00'], dtype='M8[ns]')),
(pd.DatetimeIndex(['2017-01-01T00:00:00'], tz="US/Eastern"),
np.array(['2017-01-01T05:00:00'], dtype='M8[ns]')),
(pd.TimedeltaIndex([10**10]), np.array([10**10], dtype='m8[ns]')),
pytest.param(
pd.PeriodIndex(['2017', '2018'], freq='D'),
np.array([17167, 17532]),
marks=pytest.mark.xfail(reason="PeriodArray Not implemented")
),
])
def test_ndarray_values(array, expected):
l_values = pd.Series(array)._ndarray_values
r_values = pd.Index(array)._ndarray_values
tm.assert_numpy_array_equal(l_values, r_values)
tm.assert_numpy_array_equal(l_values, expected)
| bsd-3-clause |
tensorflow/models | research/delf/delf/python/examples/extract_boxes.py | 1 | 7510 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts bounding boxes from a list of images, saving them to files.
The images must be in JPG format. The program checks if boxes already
exist, and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
from absl import app
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from delf import box_io
from delf import utils
from delf import detector
cmd_args = None
# Extension/suffix of produced files.
_BOX_EXT = '.boxes'
_VIZ_SUFFIX = '_viz.jpg'
# Used for plotting boxes.
_BOX_EDGE_COLORS = ['r', 'y', 'b', 'm', 'k', 'g', 'c', 'w']
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.io.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def _FilterBoxesByScore(boxes, scores, class_indices, score_threshold):
"""Filter boxes based on detection scores.
Boxes with detection score >= score_threshold are returned.
Args:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
score_threshold: Float detection score threshold to use.
Returns:
selected_boxes: selected `boxes`.
selected_scores: selected `scores`.
selected_class_indices: selected `class_indices`.
"""
selected_boxes = []
selected_scores = []
selected_class_indices = []
for i, box in enumerate(boxes):
if scores[i] >= score_threshold:
selected_boxes.append(box)
selected_scores.append(scores[i])
selected_class_indices.append(class_indices[i])
return np.array(selected_boxes), np.array(selected_scores), np.array(
selected_class_indices)
def _PlotBoxesAndSaveImage(image, boxes, output_path):
"""Plot boxes on image and save to output path.
Args:
image: Numpy array containing image.
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
output_path: String containing output path.
"""
height = image.shape[0]
width = image.shape[1]
fig, ax = plt.subplots(1)
ax.imshow(image)
for i, box in enumerate(boxes):
scaled_box = [
box[0] * height, box[1] * width, box[2] * height, box[3] * width
]
rect = patches.Rectangle([scaled_box[1], scaled_box[0]],
scaled_box[3] - scaled_box[1],
scaled_box[2] - scaled_box[0],
linewidth=3,
edgecolor=_BOX_EDGE_COLORS[i %
len(_BOX_EDGE_COLORS)],
facecolor='none')
ax.add_patch(rect)
ax.axis('off')
plt.savefig(output_path, bbox_inches='tight')
plt.close(fig)
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read list of images.
print('Reading list of images...')
image_paths = _ReadImageList(cmd_args.list_images_path)
num_images = len(image_paths)
print(f'done! Found {num_images} images')
# Create output directories if necessary.
if not tf.io.gfile.exists(cmd_args.output_dir):
tf.io.gfile.makedirs(cmd_args.output_dir)
if cmd_args.output_viz_dir and not tf.io.gfile.exists(
cmd_args.output_viz_dir):
tf.io.gfile.makedirs(cmd_args.output_viz_dir)
detector_fn = detector.MakeDetector(cmd_args.detector_path)
start = time.time()
for i, image_path in enumerate(image_paths):
# Report progress once in a while.
if i == 0:
print('Starting to detect objects in images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.time() - start)
print(f'Processing image {i} out of {num_images}, last '
f'{_STATUS_CHECK_ITERATIONS} images took {elapsed} seconds')
start = time.time()
# If descriptor already exists, skip its computation.
base_boxes_filename, _ = os.path.splitext(os.path.basename(image_path))
out_boxes_filename = base_boxes_filename + _BOX_EXT
out_boxes_fullpath = os.path.join(cmd_args.output_dir, out_boxes_filename)
if tf.io.gfile.exists(out_boxes_fullpath):
print(f'Skipping {image_path}')
continue
im = np.expand_dims(np.array(utils.RgbLoader(image_paths[i])), 0)
# Extract and save boxes.
(boxes_out, scores_out, class_indices_out) = detector_fn(im)
(selected_boxes, selected_scores,
selected_class_indices) = _FilterBoxesByScore(boxes_out[0], scores_out[0],
class_indices_out[0],
cmd_args.detector_thresh)
box_io.WriteToFile(out_boxes_fullpath, selected_boxes, selected_scores,
selected_class_indices)
if cmd_args.output_viz_dir:
out_viz_filename = base_boxes_filename + _VIZ_SUFFIX
out_viz_fullpath = os.path.join(cmd_args.output_viz_dir, out_viz_filename)
_PlotBoxesAndSaveImage(im[0], selected_boxes, out_viz_fullpath)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--detector_path',
type=str,
default='/tmp/d2r_frcnn_20190411/',
help="""
Path to exported detector model.
""")
parser.add_argument(
'--detector_thresh',
type=float,
default=.0,
help="""
Detector threshold. Any box with confidence score lower than this is not
returned.
""")
parser.add_argument(
'--list_images_path',
type=str,
default='list_images.txt',
help="""
Path to list of images to undergo object detection.
""")
parser.add_argument(
'--output_dir',
type=str,
default='test_boxes',
help="""
Directory where bounding boxes will be written to. Each image's boxes
will be written to a file with same name, and extension replaced by
.boxes.
""")
parser.add_argument(
'--output_viz_dir',
type=str,
default='',
help="""
Optional. If set, a visualization of the detected boxes overlaid on the
image is produced, and saved to this directory. Each image is saved with
_viz.jpg suffix.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
jurajmajor/ltl3tela | Experiments/ltlcross_runner.py | 1 | 23078 | # -*- coding: utf-8 -*-
import subprocess
import sys
import os.path
import re
import math
import spot
from IPython.display import SVG
from datetime import datetime
import pandas as pd
from experiments_lib import hoa_to_spot, dot_to_svg, pretty_print
def bogus_to_lcr(form):
"""Converts a formula as it is printed in ``_bogus.ltl`` file
(uses ``--relabel=abc``) to use ``pnn`` AP names.
"""
args = ['-r0','--relabel=pnn','-f',form]
return subprocess.check_output(["ltlfilt"] + args, universal_newlines=True).strip()
def parse_check_log(log_f):
"""Parses a given log file and locates cases where
sanity checks found some error.
Returns:
bugs: a dict: ``form_id``->``list of error lines``
bogus_forms: a dict: ``form_id``->``form``
tools: a dict: ``tool_id``->``command``
"""
log = open(log_f,'r')
bugs = {}
bogus_forms = {}
formula = re.compile('.*ltl:(\d+): (.*)$')
empty_line = re.compile('^\s$')
problem = re.compile('error: .* nonempty')
for line in log:
m_form = formula.match(line)
if m_form:
form = m_form
f_bugs = []
m_empty = empty_line.match(line)
if m_empty:
if len(f_bugs) > 0:
form_id = int(form.group(1))-1
bugs[form_id] = f_bugs
bogus_forms[form_id] = form.group(2)
m_prob = problem.match(line)
if m_prob:
f_bugs.append(m_prob.group(0))
log.close()
tools = parse_log_tools(log_f)
return bugs, bogus_forms, tools
def find_log_for(tool_code, form_id, log_f):
"""Returns an array of lines from log for
given tool code (P1,N3,...) and form_id. The
form_id is taken from runner - thus we search for
formula number ``form_id+1``
"""
log = open(log_f,'r')
current_f = -1
formula = re.compile('.*ltl:(\d+): (.*)$')
tool = re.compile('.*\[([PN]\d+)\]: (.*)$')
gather = re.compile('Performing sanity checks and gathering statistics')
output = []
for line in log:
m_form = formula.match(line)
if m_form:
current_f = int(m_form.group(1))
curr_tool = ''
if current_f < form_id+1:
continue
if current_f > form_id+1:
break
m_tool = tool.match(line)
if m_tool:
curr_tool = m_tool.group(1)
if gather.match(line):
curr_tool = 'end'
if curr_tool == tool_code:
output.append(line.strip())
log.close()
return output
def hunt_error_types(log_f):
log = open(log_f,'r')
errors = {}
err_forms = {}
formula = re.compile('.*ltl:(\d+): (.*)$')
empty_line = re.compile('^\s$')
tool = re.compile('.*\[([PN]\d+)\]: (.*)$')
problem = re.compile('error: .*')
nonempty = re.compile('error: (.*) is nonempty')
for line in log:
m_form = formula.match(line)
if m_form:
form = m_form
f_bugs = {}
m_tool = tool.match(line)
if m_tool:
tid = m_tool.group(1)
m_empty = empty_line.match(line)
if m_empty:
if len(f_bugs) > 0:
form_id = int(form.group(1))-1
errors[form_id] = f_bugs
err_forms[form_id] = form.group(2)
m_prob = problem.match(line)
if m_prob:
prob = m_prob.group(0)
m_bug = nonempty.match(line)
if m_bug:
prob = 'nonempty'
tid = m_bug.group(1)
if prob not in f_bugs:
f_bugs[prob] = []
f_bugs[prob].append(tid)
log.close()
tools = parse_log_tools(log_f)
return errors, err_forms, tools
def parse_log_tools(log_f):
log = open(log_f,'r')
tools = {}
tool = re.compile('.*\[(P\d+)\]: (.*)$')
empty_line = re.compile('^\s$')
for line in log:
m_tool = tool.match(line)
m_empty = empty_line.match(line)
if m_empty:
break
if m_tool:
tid = m_tool.group(1)
tcmd = m_tool.group(2)
tools[tid] = tcmd
log.close()
return tools
class LtlcrossRunner(object):
"""A class for running Spot's `ltlcross` and storing and manipulating
its results. For LTL3HOA it can also draw very weak alternating automata
(VWAA).
Parameters
----------
tools : a dict (String -> String)
The records in the dict of the form ``name : ltlcross_cmd``
>>> tools = {"LTL3HOA" : "ltl3hoa -d -x -i -p 2 -f %f > %O",
>>> "SPOT": : "ltl2tgba"
>>> }
formula_files : a list of strings
paths to files with formulas to be fed to `ltlcross`
res_filename : String
filename to store the ltlcross`s results
cols : list of Strings, default ``['states','edges','transitions']``
names of ltlcross's statistics columns to be recorded
"""
def __init__(self, tools,
formula_files=['formulae/classic.ltl'],
res_filename='na_comp.csv',
cols=['states', 'edges', 'transitions'],
log_file=None,
):
self.tools = tools
self.mins = []
self.f_files = formula_files
self.cols = cols.copy()
self.automata = None
self.values = None
self.form = None
if res_filename == '' or res_filename is None:
self.res_file = '_'.join(tools.keys()) + '.csv'
else:
self.res_file = res_filename
if log_file is None:
self.log_file = self.res_file[:-3] + 'log'
else:
self.log_file = log_file
def create_args(self, automata=True, check=False, timeout='300',
log_file=None, res_file=None,
save_bogus=True, tool_subset=None,
forms = True, escape_tools=False):
"""Creates args that are passed to run_ltlcross
"""
if log_file is None:
log_file = self.log_file
if res_file is None:
res_file = self.res_file
if tool_subset is None:
tool_subset=self.tools.keys()
### Prepare ltlcross command ###
tools_strs = ["{"+name+"}" + cmd for (name, cmd) in self.tools.items() if name in tool_subset]
if escape_tools:
tools_strs = ["'{}'".format(t_str) for t_str in tools_strs]
args = tools_strs
if forms:
args += ' '.join(['-F '+F for F in self.f_files]).split()
if timeout:
args.append('--timeout='+timeout)
if automata:
args.append('--automata')
if save_bogus:
args.append('--save-bogus={}_bogus.ltl'.format(res_file[:-4]))
if not check:
args.append('--no-checks')
#else:
# args.append('--reference={ref_Spot}ltl2tgba -H %f')
args.append('--products=0')
args.append('--csv='+res_file)
return args
def ltlcross_cmd(self, args=None, automata=True,
check=False, timeout='300',
log_file=None, res_file=None,
save_bogus=True, tool_subset=None,
forms=True, lcr='ltlcross'):
"""Returns ltlcross command for the parameters.
"""
if log_file is None:
log_file = self.log_file
if res_file is None:
res_file = self.res_file
if tool_subset is None:
tool_subset=self.tools.keys()
if args is None:
args = self.create_args(automata, check, timeout,
log_file, res_file,
save_bogus, tool_subset, forms,
escape_tools=True)
return ' '.join([lcr] + args)
def run_ltlcross(self, args=None, automata=True,
check=False, timeout='300',
log_file=None, res_file=None,
save_bogus=True, tool_subset=None,
lcr='ltlcross'):
"""Removes any older version of ``self.res_file`` and runs `ltlcross`
on all tools.
Parameters
----------
args : a list of ltlcross arguments that can be used for subprocess
tool_subset : a list of names from self.tools
"""
if log_file is None:
log_file = self.log_file
if res_file is None:
res_file = self.res_file
if tool_subset is None:
tool_subset=self.tools.keys()
if args is None:
args = self.create_args(automata, check, timeout,
log_file, res_file,
save_bogus, tool_subset)
# Delete ltlcross result and lof files
subprocess.call(["rm", "-f", res_file, log_file])
## Run ltlcross ##
log = open(log_file,'w')
cmd = self.ltlcross_cmd(args,lcr=lcr)
print(cmd, file=log)
print(datetime.now().strftime('[%d.%m.%Y %T]'), file=log)
print('=====================', file=log,flush=True)
self.returncode = subprocess.call([lcr] + args, stderr=subprocess.STDOUT, stdout=log)
log.writelines([str(self.returncode)+'\n'])
log.close()
def parse_results(self, res_file=None):
"""Parses the ``self.res_file`` and sets the values, automata, and
form. If there are no results yet, it runs ltlcross before.
"""
if res_file is None:
res_file = self.res_file
if not os.path.isfile(res_file):
raise FileNotFoundError(res_file)
res = pd.read_csv(res_file)
# Add incorrect columns to track flawed automata
if not 'incorrect' in res.columns:
res['incorrect'] = False
# Removes unnecessary parenthesis from formulas
res.formula = res['formula'].map(pretty_print)
form = pd.DataFrame(res.formula.drop_duplicates())
form['form_id'] = range(len(form))
form.index = form.form_id
res = form.merge(res)
# Shape the table
table = res.set_index(['form_id', 'formula', 'tool'])
table = table.unstack(2)
table.axes[1].set_names(['column','tool'],inplace=True)
# Create separate tables for automata
automata = None
if 'automaton' in table.columns.levels[0]:
automata = table[['automaton']]
# Removes formula column from the index
automata.index = automata.index.levels[0]
# Removes `automata` from column names -- flatten the index
automata.columns = automata.columns.levels[1]
form = form.set_index(['form_id', 'formula'])
# Store incorrect and exit_status information separately
self.incorrect = table[['incorrect']]
self.incorrect.columns = self.incorrect.columns.droplevel()
self.exit_status = table[['exit_status']]
self.exit_status.columns = self.exit_status.columns.droplevel()
# stores the followed columns only
values = table[self.cols]
self.form = form
self.values = values.sort_index(axis=1,level=['column','tool'])
# self.compute_best("Minimum")
if automata is not None:
self.automata = automata
def compute_sbacc(self,col='states'):
def get_sbacc(aut):
if isinstance(aut, float) and math.isnan(aut):
return None
a = spot.automata(aut+'\n')
aut = next(a)
aut = spot.sbacc(aut)
if col == 'states':
return aut.num_states()
if col == 'acc':
return aut.num_sets()
df = self.automata.copy()
# Recreate the same index as for other cols
n_i = [(l, self.form_of_id(l,False)) for l in df.index]
df.index = pd.MultiIndex.from_tuples(n_i)
df.index.names=['form_id','formula']
# Recreate the same columns hierarchy
df = df.T
df['column'] = 'sb_{}'.format(col)
self.cols.append('sb_{}'.format(col))
df = df.set_index(['column'],append=True)
df = df.T.swaplevel(axis=1)
# Compute the requested values and add them to others
df = df.applymap(get_sbacc)
self.values = self.values.join(df)
def compute_best(self, tools=None, colname="Minimum"):
"""Computes minimum values over tools in ``tools`` for all
formulas and stores them in column ``colname``.
Parameters
----------
tools : list of Strings
column names that are used to compute the min over
colname : String
name of column used to store the computed values
"""
if tools is None:
tools = list(self.tools.keys())
else:
tools = [t for t in tools if t in self.tools.keys()
or t in self.mins]
self.mins.append(colname)
for col in self.cols:
self.values[col, colname] = self.values[col][tools].min(axis=1)
self.values.sort_index(axis=1, level=0, inplace=True)
def aut_for_id(self, form_id, tool):
"""For given formula id and tool it returns the corresponding
non-deterministic automaton as a Spot's object.
Parameters
----------
form_id : int
id of formula to use
tool : String
name of the tool to use to produce the automaton
"""
if self.automata is None:
raise AssertionError("No results parsed yet")
if tool not in self.tools.keys():
raise ValueError(tool)
return hoa_to_spot(self.automata.loc[form_id, tool])
def cummulative(self, col="states"):
"""Returns table with cummulative numbers of given ``col``.
Parameters
---------
col : String
One of the followed columns (``states`` default)
"""
return self.values[col].dropna().sum()
def smaller_than(self, t1, t2, reverse=False,
restrict=True,
col='states', restrict_cols=True):
"""Returns a dataframe with results where ``col`` for ``tool1``
has strictly smaller value than ``col`` for ``tool2``.
Parameters
----------
t1 : String
name of tool for comparison (the better one)
must be among tools
t2 : String
name of tool for comparison (the worse one)
must be among tools
reverse : Boolean, default ``False``
if ``True``, it switches ``tool1`` and ``tool2``
restrict : Boolean, default ``True``
if ``True``, the returned DataFrame contains only the compared
tools
col : String, default ``'states'``
name of column use for comparison.
restrict_cols : Boolean, default ``True``
if ``True``, show only the compared column
"""
return self.better_than(t1,t2,reverse=reverse,
props=[col],include_fails=False,
restrict_cols=restrict_cols,
restrict_tools=restrict)
def better_than(self, t1, t2, props=['states','acc'],
reverse=False, include_fails=True,
restrict_cols=True,restrict_tools=True
):
"""Compares ``t1`` against ``t2`` lexicographicaly
on cols from ``props`` and returns DataFrame with
results where ``t1`` is better than ``t2``.
Parameters
----------
t1 : String
name of tool for comparison (the better one)
must be among tools
t2 : String
name of tool for comparison (the worse one)
must be among tools
props : list of Strings, default (['states','acc'])
list of columns on which we want the comparison (in order)
reverse : Boolean, default ``False``
if ``True``, it switches ``t1`` and ``t2``
include_fails : Boolean, default ``True``
if ``True``, include formulae where t2 fails and t1 does not
fail
restrict_cols : Boolean, default ``True``
if ``True``, the returned DataFrame contains only the compared
property columns
restrict_tools : Boolean, default ``True``
if ``True``, the returned DataFrame contains only the compared
tools
"""
if t1 not in list(self.tools.keys())+self.mins:
raise ValueError(t1)
if t2 not in list(self.tools.keys())+self.mins:
raise ValueError(t2)
if reverse:
t1, t2 = t2, t1
v = self.values
t1_ok = self.exit_status[t1] == 'ok'
if include_fails:
t2_ok = self.exit_status[t2] == 'ok'
# non-fail beats fail
c = v[t1_ok & ~t2_ok]
# We work on non-failures only from now on
eq = t1_ok & t2_ok
else:
c = pd.DataFrame()
eq = t1_ok
for prop in props:
# For each prop we add t1 < t2
better = v[prop][t1] < v[prop][t2]
# but only from those which were equivalent so far
equiv_and_better = v.loc[better & eq]
c = c.append(equiv_and_better)
# And now choose those equivalent also on prop to eq
eq = eq & (v[prop][t1] == v[prop][t2])
# format the output
idx = pd.IndexSlice
tools = [t1,t2] if restrict_tools else slice(None)
props = props if restrict_cols else slice(None)
return c.loc[:,idx[props,tools]]
def form_of_id(self, form_id, spot_obj=True):
"""For given form_id returns the formula
Parameters
----------
form_id : int
id of formula to return
spot_obj : Bool
If ``True``, returns Spot formula object (uses Latex to
print the formula in Jupyter notebooks)
"""
f = self.values.index[form_id][1]
if spot_obj:
return spot.formula(f)
return f
def id_of_form(self, f, convert=False):
"""Returns id of a given formula. If ``convert`` is ``True``
it also calls ``bogus_to_lcr`` first.
"""
if convert:
f = bogus_to_lcr(f)
ni = self.values.index.droplevel(0)
return ni.get_loc(f)
def mark_incorrect(self, form_id, tool,output_file=None,input_file=None):
"""Marks automaton given by the formula id and tool as flawed
and writes it into the .csv file
"""
if tool not in self.tools.keys():
raise ValueError(tool)
# Put changes into the .csv file
if output_file is None:
output_file = self.res_file
if input_file is None:
input_file = self.res_file
csv = pd.read_csv(input_file)
if not 'incorrect' in csv.columns:
csv['incorrect'] = False
cond = (csv['formula'].map(pretty_print) ==
pretty_print(self.form_of_id(form_id,False))) &\
(csv.tool == tool)
csv.loc[cond,'incorrect'] = True
csv.to_csv(output_file,index=False)
# Mark the information into self.incorrect
self.incorrect.loc[self.index_for(form_id)][tool] = True
def na_incorrect(self):
"""Marks values for flawed automata as N/A. This causes
that the touched formulae will be removed from cummulative
etc. if computed again. To reverse this information you
have to parse the results again.
It also sets ``exit_status`` to ``incorrect``
"""
self.values = self.values[~self.incorrect]
self.exit_status[self.incorrect] = 'incorrect'
def index_for(self, form_id):
return (form_id,self.form_of_id(form_id,False))
def get_error_count(self,err_type='timeout',drop_zeros=True):
"""Returns a Series with total number of er_type errors for
each tool.
Parameters
----------
err_type : String one of `timeout`, `parse error`,
`incorrect`, `crash`, or
'no output'
Type of error we seek
drop_zeros: Boolean (default True)
If true, rows with zeros are removed
"""
if err_type not in ['timeout', 'parse error',
'incorrect', 'crash',
'no output']:
raise ValueError(err_type)
if err_type == 'crash':
c1 = self.exit_status == 'exit code'
c2 = self.exit_status == 'signal'
res = (c1 | c2).sum()
else:
res = (self.exit_status == err_type).sum()
if drop_zeros:
return res.iloc[res.to_numpy().nonzero()]
return res
def cross_compare(self,tools=None,props=['states','acc'],
include_fails=True, total=True,
include_other=True):
def count_better(tool1,tool2):
if tool1 == tool2:
return float('nan')
try:
return len(self.better_than(tool1,tool2,props,
include_fails=include_fails))
except ValueError as e:
if include_other:
return float('nan')
else:
raise e
if tools is None:
tools = self.tools.keys()
c = pd.DataFrame(index=tools, columns=tools).fillna(0)
for tool in tools:
c[tool] = pd.DataFrame(c[tool]).apply(lambda x:
count_better(x.name,tool), 1)
if total:
c['V'] = c.sum(axis=1)
return c
def min_counts(self, tools=None, restrict_tools=False, unique_only=False, col='states',min_name='min(count)'):
if tools is None:
tools = list(self.tools.keys())
else:
tools = [t for t in tools if
t in self.tools.keys() or
t in self.mins]
min_tools = tools if restrict_tools else list(self.tools.keys())
self.compute_best(tools=min_tools, colname=min_name)
s = self.values.loc(axis=1)[col]
df = s.loc(axis=1)[tools+[min_name]]
is_min = lambda x: x[x == x[min_name]]
best_t_count = df.apply(is_min, axis=1).count(axis=1)
choose = (df[best_t_count == 2]) if unique_only else df
choose = choose.index
min_counts = df.loc[choose].apply(is_min,axis=1).count()
return pd.DataFrame(min_counts[min_counts.index != min_name])
def param_runner(name, tools, data_dir='data_param'):
cols=["states","transitions","acc","time","nondet_states"]
r = LtlcrossRunner(tools,\
res_filename='{}/{}.csv'.format(data_dir,name),\
formula_files=['formulae/{}.ltl'.format(name)],\
cols=cols)
return r
| gpl-3.0 |
wcalvert/LPC11U_LPC13U_CodeBase | src/drivers/sensors/testscripts/plot_xyz_plus_mag_sma.py | 2 | 3774 | #-------------------------------------------------------------------------------
# Name: plot_sensors_event.py
# Purpose: Plots logged sensors_event_t data from logger.c CSV files
#
# Author: K. Townsend
#
# Created: 09/06/2013
# Copyright: (c) K. Townsend 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import math
import numpy as np
import matplotlib.pyplot as plt
import Tkinter, tkFileDialog
from collections import deque
# This program will plot X/Y/Z data logged via drivers/storage/logger.c, and
# assumes we are getting vector data in CSV format generated using the
# 'sensorsLogSensorsEvent' helper function in drivers/sensors/sensors.c
#
# Data should look similar to the this:
#
# 0,1,5714,6.001670,-6.629296,-4.785645,0.000000
# 0,1,5729,6.001670,-6.629296,-4.785645,0.000000
# 0,1,5734,5.883990,-6.590069,-4.746419,0.000000
#
# In addition to the raw X/Y/Z data, vector magnitude is also calculated in
# a fourth data column
class RingBuffer(deque):
def __init__(self, size_max):
deque.__init__(self)
self.size_max = size_max
def append(self, datum):
deque.append(self, datum)
if len(self) > self.size_max:
self.popleft( )
def tolist(self):
return list(self)
def main():
# Variables for our moving average filter
current = 0
avg = 0
total = 0
mavals = []
# Get window size (how many 'samples' are averaged together)
windowsize = int(input("Windows size (0..65535): "))
if (windowsize > 65535):
print ('Setting window size to 65535')
windowsize = 65535
if (windowsize < 1):
print ('Setting window size to 1')
windowsize = 1
# Request the data file to process
root = Tkinter.Tk()
root.withdraw()
filename = tkFileDialog.askopenfilename()
# Load the CSV file in 'data'
data = np.genfromtxt(filename,
delimiter=',',
dtype="i32,i32,i32,f32,f32,f32,f32",
names=['id','type','timestamp','x','y','z','a'])
# Create a circular buffer for our moving average filter
window = RingBuffer(size_max=windowsize)
# Calculate magnitude in column a
for x in np.nditer(data, op_flags=['readwrite']):
x['a'] = math.sqrt(
math.pow(x['x'], 2) +
math.pow(x['y'], 2) +
math.pow(x['z'], 2))
# Perform the moving average filter operations
current+=1
# Add magnitude into the ringbuffer
window.append(x['a'])
# Make sure we've reached 'windowlength' samples in the buffer
if (current <= windowsize):
mavals.append(0)
else:
# Get the current average based on the window content
li = window.tolist()
total = 0
for i in li:
total += i
avg = (float)(total/windowsize)
# Append ma output for plotting below
mavals.append(avg);
# Display the results
plt.title("SMA Filtered sensors_event_t Data (X/Y/Z + Magnitude)\nSMA Window Size = %d Samples"
% (windowsize))
plt.xlabel('Timestamp (ms)')
plt.ylabel('Value')
plt.xlim(data['timestamp'].min(), data['timestamp'].max()*1.1)
plt.grid(True)
plt.plot(data['timestamp'], data['x'], color='r', alpha = 0.25, label='x')
plt.plot(data['timestamp'], data['y'], color='g', alpha = 0.25, label='y')
plt.plot(data['timestamp'], data['z'], color='b', alpha = 0.25, label='z')
plt.plot(data['timestamp'], data['a'], color='m', alpha = 0.25, label='mag')
plt.plot(data['timestamp'], mavals, color="black", label="mag filtered")
plt.legend()
plt.show()
pass
if __name__ == '__main__':
main()
| bsd-3-clause |
transientskp/aartfaac-arthur | scripts/arthur-plot.py | 1 | 1440 | #!/usr/bin/env python3
import sys
import numpy as np
from arthur.imaging import full_calculation, calculate_lag
from arthur.io import read_full
from arthur.plot import plot_image, plot_lag, plot_chan_power, plot_corr_mat, plot_diff
from arthur.constants import NUM_CHAN
from matplotlib import pyplot
FRQ = 58398437.5 # Central observation frequency in Hz
def main():
if len(sys.argv) < 2:
print("Image the first set of visibilites from a visibilities file")
print()
print("usage: {} <file>".format(sys.argv[0]))
sys.exit(1)
else:
path = sys.argv[1]
# define them here so we can access them out of for loop scope
lags = []
prev_data = date = img_data = corr_data = diff_data = None
chan_data = np.zeros((NUM_CHAN, 60), dtype=np.float32)
for date, body in read_full(path):
img_data, corr_data, chan_row = full_calculation(body, FRQ)
lags += [calculate_lag(date).seconds]
if prev_data is None:
prev_data = img_data
chan_data = np.roll(chan_data, 1)
chan_data[:, 0] = chan_row
diff_data = img_data - prev_data
prev_data = img_data
fig_img = plot_image(date, img_data, FRQ)
fig_lag = plot_lag(lags)
fig_chan = plot_chan_power(chan_data)
fig_cm = plot_corr_mat(corr_data, FRQ, date)
fig_diff = plot_diff(diff_data, FRQ, date)
pyplot.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
magic2du/contact_matrix | Contact_maps/DeepLearning/DeepLearningTool/DL_contact_matrix_load2-new10fold_01_09_2015_01.py | 1 | 25014 |
# coding: utf-8
# In[1]:
# this part imports libs and load data from csv file
import sys
sys.path.append('../../../libs/')
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pickle
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
import cPickle
import gzip
import os
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import numpy as np
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb, PIL
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
# In[2]:
# set settings for this script
settings = {}
settings['with_auc_score'] = False
settings['reduce_ratio'] = 1
settings['SVM'] = 1
settings['SVM_RBF'] = 1
settings['SVM_POLY'] = 1
settings['DL'] = 1
settings['Log'] = 1
settings['SAE_SVM'] = 1
settings['SAE_SVM_RBF'] = 1
settings['SAE_SVM_POLY'] = 1
settings['DL_S'] = 1
settings['SAE_S_SVM'] = 1
settings['SAE_S_SVM_RBF'] = 1
settings['SAE_S_SVM_POLY'] = 1
settings['number_iterations'] = 10
settings['finetune_lr'] = 0.1
settings['batch_size'] = 30
settings['pretraining_interations'] = 50000#10000
settings['pretrain_lr'] = 0.001
#settings['training_epochs'] = 300 #300
settings['training_interations'] = 50000 #300
settings['hidden_layers_sizes'] = [200, 200, 200, 200, 200]
settings['corruption_levels'] = [0.5, 0.5, 0.5, 0.5, 0.5 ]
settings['number_of_training'] = [10000]#[1000, 2500, 5000, 7500, 10000]
settings['test_set_from_test'] = True
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_handwritten_digits' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
#logger.debug('This message should go to the log file')
for key, value in settings.items():
logger.info(key +': '+ str(value))
# In[3]:
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
X_train,y_train = train_set
X_valid,y_valid = valid_set
X_total=np.vstack((X_train, X_valid))
X_total = np.array(X_total, dtype= theano.config.floatX)
print'sample size', X_total.shape
y_total = np.concatenate([y_train, y_valid])
# In[5]:
################## generate data from training set###################
array_A =[]
array_B =[]
for i in range(100000):
array_A.append(np.random.random_integers(0, 59999))
array_B.append(np.random.random_integers(0, 59999))
pos_index = []
neg_index = []
for index in xrange(100000):
if y_total[array_A[index]] - y_total[array_B[index]] == 1:
pos_index.append(index)
else:
neg_index.append(index)
print 'number of positive examples', len(pos_index)
selected_neg_index= neg_index[ : len(pos_index)]
array_A = np.array(array_A)
array_B = np.array(array_B)
index_for_positive_image_A = array_A[pos_index]
index_for_positive_image_B = array_B[pos_index]
index_for_neg_image_A = array_A[selected_neg_index]
index_for_neg_image_B = array_B[selected_neg_index]
X_pos_A = X_total[index_for_positive_image_A]
X_pos_B = X_total[index_for_positive_image_B]
X_pos_whole = np.hstack((X_pos_A,X_pos_B))
X_neg_A = X_total[index_for_neg_image_A]
X_neg_B = X_total[index_for_neg_image_B]
X_neg_whole = np.hstack((X_neg_A, X_neg_B))
print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape
print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape
X_whole = np.vstack((X_pos_whole, X_neg_whole))
print X_whole.shape
y_pos = np.ones(X_pos_whole.shape[0])
y_neg = np.zeros(X_neg_whole.shape[0])
y_whole = np.concatenate([y_pos,y_neg])
print y_whole
# In[7]:
#pylab.imshow(imageB.reshape(28, 28), cmap="Greys")
# In[8]:
def saveAsCsv(with_auc_score, fname, score_dict, arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
writer.writerow(['no.', 'number_of_training', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
for arg in arguments:
writer.writerow([i for i in arg])
csvfile.close()
def run_models(settings = None):
analysis_scr = []
with_auc_score = settings['with_auc_score']
for subset_no in xrange(1,settings['number_iterations']+1):
print("Subset:", subset_no)
################## generate data ###################
array_A =[]
array_B =[]
for i in range(100000):
array_A.append(np.random.random_integers(0, 59999))
array_B.append(np.random.random_integers(0, 59999))
pos_index = []
neg_index = []
for index in xrange(100000):
if y_total[array_A[index]] - y_total[array_B[index]] == 1:
pos_index.append(index)
else:
neg_index.append(index)
print 'number of positive examples', len(pos_index)
selected_neg_index= neg_index[ : len(pos_index)]
array_A = np.array(array_A)
array_B = np.array(array_B)
index_for_positive_image_A = array_A[pos_index]
index_for_positive_image_B = array_B[pos_index]
index_for_neg_image_A = array_A[selected_neg_index]
index_for_neg_image_B = array_B[selected_neg_index]
X_pos_A = X_total[index_for_positive_image_A]
X_pos_B = X_total[index_for_positive_image_B]
X_pos_whole = np.hstack((X_pos_A,X_pos_B))
X_neg_A = X_total[index_for_neg_image_A]
X_neg_B = X_total[index_for_neg_image_B]
X_neg_whole = np.hstack((X_neg_A, X_neg_B))
print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape
print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape
X_whole = np.vstack((X_pos_whole, X_neg_whole))
print X_whole.shape
y_pos = np.ones(X_pos_whole.shape[0])
y_neg = np.zeros(X_neg_whole.shape[0])
y_whole = np.concatenate([y_pos,y_neg])
print y_whole.shape
x_train_pre_validation, x_test, y_train_pre_validation, y_test = train_test_split(X_whole,y_whole, test_size=0.2, random_state=211)
for number_of_training in settings['number_of_training']:
x_train, x_validation, y_train, y_validation = train_test_split(x_train_pre_validation[:number_of_training],
y_train_pre_validation[:number_of_training],\
test_size=0.2, random_state=21)
print x_train.shape, y_train.shape, x_validation.shape, y_validation.shape, x_test.shape, y_test.shape
x_train_minmax, x_validation_minmax, x_test_minmax = x_train, x_validation, x_test
train_X_reduced = x_train
train_y_reduced = y_train
test_X = x_test
test_y = y_test
###original data###
################ end of data ####################
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
if settings['SVM']:
print "SVM"
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, y_train)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append(( subset_no,number_of_training, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, y_train)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_POLY']:
print "SVM_POLY"
L1_SVC_POLY_Selector = SVC(C=1, kernel='poly').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_POLY_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_POLY_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['Log']:
print "Log"
log_clf_l2 = sklearn.linear_model.LogisticRegression(C=1, penalty='l2')
log_clf_l2.fit(scaled_train_X, train_y_reduced)
predicted_test_y = log_clf_l2.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'Log', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = log_clf_l2.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'Log', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = cal_epochs(settings['training_interations'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes = settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['DL']:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train,
x_validation_minmax, y_validation,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
test_predicted = sda.predict(x_test_minmax)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
training_predicted = sda.predict(x_train_minmax)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
####transformed original data####
x = train_X_reduced
a_MAE_original = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_original.transform(train_X_reduced)
new_x_test_minmax_A = a_MAE_original.transform(x_test_minmax)
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_A)
new_x_train_scaled = standard_scaler.transform(new_x_train_minmax_A)
new_x_test_scaled = standard_scaler.transform(new_x_test_minmax_A)
new_x_train_combo = np.hstack((scaled_train_X, new_x_train_scaled))
new_x_test_combo = np.hstack((scaled_test_X, new_x_test_scaled))
if settings['SAE_SVM']:
# SAE_SVM
print 'SAE followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF']:
# SAE_SVM
print 'SAE followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_POLY']:
# SAE_SVM
print 'SAE followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'SAE_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
#### separated transformed data ####
y_test = test_y
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_whole)
new_x_train_minmax_whole_scaled = standard_scaler.transform(new_x_train_minmax_whole)
new_x_test_minmax_whole_scaled = standard_scaler.transform(new_x_test_minmax_whole)
if settings['DL_S']:
# deep learning using split network
sda_transformed = trainSda(new_x_train_minmax_whole, y_train,
new_x_validationt_minmax_whole, y_validation ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
predicted_test_y = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'DL_S', isTest) + tuple(performance_score(y_test, predicted_test_y, with_auc_score).values()))
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
if settings['SAE_S_SVM']:
print 'SAE_S followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_S_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append(( subset_no,number_of_training, 'SAE_S_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
if settings['SAE_S_SVM_RBF']:
print 'SAE S followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_S_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
if settings['SAE_S_SVM_POLY']:
# SAE_SVM
print 'SAE S followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
report_name = 'DL_handwritten_digits' + '_size_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + '_' + str(settings['pretraining_interations']) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(test_y, predicted_test_y, with_auc_score), analysis_scr)
return sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr
# In[9]:
sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr = run_models(settings)
# In[48]:
# save objects
sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date +'sda.pickle', 'wb') as handle:
pickle.dump(sda, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_original.pickle', 'wb') as handle:
pickle.dump(a_MAE_original, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_A.pickle', 'wb') as handle:
pickle.dump(a_MAE_A, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_B.pickle', 'wb') as handle:
pickle.dump(a_MAE_B, handle)
x = logging._handlers.copy()
for i in x:
log.removeHandler(i)
i.flush()
i.close()
# In[ ]:
# In[31]:
'''
weights_map_to_input_space = []
StackedNNobject = sda
image_dimension_x = 28*2
image_dimension_y = 28
if isinstance(StackedNNobject, SdA) or isinstance(StackedNNobject, MultipleAEs):
weights_product = StackedNNobject.dA_layers[0].W.get_value(borrow=True)
image = PIL.Image.fromarray(tile_raster_images(
X=weights_product.T,
img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10),
tile_spacing=(1, 1)))
sample_image_path = 'hidden_0_layer_weights.png'
image.save(sample_image_path)
weights_map_to_input_space.append(weights_product)
for i_layer in range(1, len(StackedNNobject.dA_layers)):
i_weigths = StackedNNobject.dA_layers[i_layer].W.get_value(borrow=True)
weights_product = np.dot(weights_product, i_weigths)
weights_map_to_input_space.append(weights_product)
image = PIL.Image.fromarray(tile_raster_images(
X=weights_product.T,
img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10),
tile_spacing=(1, 1)))
sample_image_path = 'hidden_'+ str(i_layer)+ '_layer_weights.png'
image.save(sample_image_path)
'''
# In[18]:
| gpl-2.0 |
rvraghav93/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 8 | 35969 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_equal, assert_false, assert_true,
assert_not_equal, assert_almost_equal,
assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry, SkipTest)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_false(hasattr(t2, "idf_"))
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(rng.choice(vocab_words, size=5, replace=False))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = rng.choice(vocab_words, size=5, replace=False)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
def test_vectorizer_string_object_as_input():
message = ("Iterable over raw text documents expected, "
"string object received.")
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(
ValueError, message, vec.fit, "hello world!")
assert_raise_message(
ValueError, message, vec.transform, "hello world!")
| bsd-3-clause |
allinpaybusiness/ACS | allinpay projects/creditscoreMLP/classMLP.py | 1 | 9585 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import sys;
import os;
sys.path.append("allinpay projects")
from creditscore.creditscore import CreditScore
import numpy as np
import pandas as pd
import time
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
class CreditScoreMLP(CreditScore):
def MLP_trainandtest(self, testsize, cv, feature_sel, varthreshold, activation,solver, alpha, max_iter =1000,nclusters=10, cmethod=None, *hidden_layer_sizes):
#分割数据集为训练集和测试集
data_feature = self.data.ix[:, self.data.columns != 'default']
data_target = self.data['default']
X_train, X_test, y_train, y_test = train_test_split(data_feature, data_target, test_size=testsize, random_state=0)
#对训练集做变量粗分类和woe转化,并据此对测试集做粗分类和woe转化
X_train, X_test = self.binandwoe_traintest(X_train, y_train, X_test, nclusters, cmethod)
#在train中做变量筛选, sklearn.feature_selection中的方法
if feature_sel == "VarianceThreshold":
selector = VarianceThreshold(threshold = varthreshold)
X_train1 = pd.DataFrame(selector.fit_transform(X_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "RFECV":
estimator = LogisticRegression()
selector = RFECV(estimator, step=1, cv=cv)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectFromModel":
estimator = LogisticRegression()
selector = SelectFromModel(estimator)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectKBest":
selector = SelectKBest()
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
else:
X_train1, X_test1 = X_train, X_test
#训练并预测模型
classifier = MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, activation=activation,solver=solver,alpha=alpha, max_iter =1000) # 使用类,参数全是默认的
#为避免单次神经网络训练不收敛的情况,反复训练10次,最终预测概率为10次的平均值
probability = 0
for i in range(10):
#训练模型
classifier.fit(X_train1, y_train)
#预测概率
probability += classifier.predict_proba(X_test1)[:,1]
probability = probability / 10
predresult = pd.DataFrame({'target' : y_test, 'probability' : probability})
return predresult
def MLP_trainandtest_kfold(self, nsplit, cv, feature_sel, varthreshold, activation,solver, alpha, max_iter =1000,nclusters=10, cmethod=None, *hidden_layer_sizes):
data_feature = self.data.ix[:, self.data.columns != 'default']
data_target = self.data['default']
#将数据集分割成k个分段分别进行训练和测试,对每个分段,该分段为测试集,其余数据为训练集
kf = KFold(n_splits=nsplit, shuffle=True)
predresult = pd.DataFrame()
for train_index, test_index in kf.split(data_feature):
X_train, X_test = data_feature.iloc[train_index, ], data_feature.iloc[test_index, ]
y_train, y_test = data_target.iloc[train_index, ], data_target.iloc[test_index, ]
#如果随机抽样造成train或者test中只有一个分类,跳过此次预测
if (len(y_train.unique()) == 1) or (len(y_test.unique()) == 1):
continue
#对训练集做变量粗分类和woe转化,并据此对测试集做粗分类和woe转化
X_train, X_test = self.binandwoe_traintest(X_train, y_train, X_test, nclusters, cmethod)
#在train中做变量筛选, sklearn.feature_selection中的方法
if feature_sel == "VarianceThreshold":
selector = VarianceThreshold(threshold = varthreshold)
X_train1 = pd.DataFrame(selector.fit_transform(X_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "RFECV":
estimator = LogisticRegression()
selector = RFECV(estimator, step=1, cv=cv)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectFromModel":
estimator = LogisticRegression()
selector = SelectFromModel(estimator)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectKBest":
selector = SelectKBest()
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
else:
X_train1, X_test1 = X_train, X_test
#训练并预测模型
classifier = MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, activation=activation,solver=solver, alpha=alpha,max_iter =max_iter) # 使用类,参数全是默认的
#为避免单次神经网络训练不收敛的情况,反复训练10次,最终预测概率为10次的平均值
probability = 0
for i in range(10):
#训练模型
classifier.fit(X_train1, y_train)
#预测概率
probability += classifier.predict_proba(X_test1)[:,1]
probability = probability / 10
temp = pd.DataFrame({'target' : y_test, 'probability' : probability})
predresult = pd.concat([predresult, temp], ignore_index = True)
return predresult
def loopMLP_trainandtest(self, testsize, cv, feature_sel, varthreshold, activation, solver,alpha, max_iter =1000, nclusters=10, cmethod=None):
df = pd.DataFrame()
for i in range (3 , 101,3):#对神经元做循环
hidden_layer_sizes = (i,)
#分割train test做测试
predresult = self.MLP_trainandtest(testsize, cv, feature_sel, varthreshold, activation,solver ,alpha, max_iter,nclusters, cmethod, *hidden_layer_sizes)
#评估并保存测试结果
auc, ks, metrics_p = self.loopmodelmetrics_scores(predresult)
temp = pd.DataFrame({'hidden_first_layer' : i, 'auc_value' : auc ,'ks_value' :ks ,'p0=0.5' :metrics_p['accuracy'][5]} ,index=[0])
df = pd.concat([df, temp], ignore_index = False)
print('num %s complete' %i)
time0 = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))
exist = os.path.exists('d:/ACS_CSVS')
if exist:
df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',')
else:
os.makedirs('d:/ACS_CSVS/')
df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',')
def loopMLP_trainandtest_kfold(self, testsize, cv, feature_sel, varthreshold, activation, solver,alpha, max_iter =1000, nclusters=10, cmethod=None):
df = pd.DataFrame()
for i in range (3 , 101,3):#对神经元做循环
hidden_layer_sizes = (i,)
#分割train test做测试
predresult = self.MLP_trainandtest_kfold(testsize, cv, feature_sel, varthreshold, activation,solver ,alpha, max_iter,nclusters, cmethod, *hidden_layer_sizes)
#评估并保存测试结果
auc, ks, metrics_p = self.loopmodelmetrics_scores(predresult)
temp = pd.DataFrame({'hidden_first_layer' : i, 'auc_value' : auc ,'ks_value' :ks ,'p0=0.5' :metrics_p['accuracy'][5]} ,index=[0])
df = pd.concat([df, temp], ignore_index = False)
print('num %s complete' %i)
time0 = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))
exist = os.path.exists('d:/ACS_CSVS')
if exist:
df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',')
else:
os.makedirs('d:/ACS_CSVS/')
df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',')
| apache-2.0 |
surchs/brainbox | visu/base.py | 1 | 8414 | __author__ = 'surchs'
import sys
import numpy as np
from matplotlib import gridspec
from nilearn import plotting as nlp
from matplotlib import pyplot as plt
from matplotlib import colors as mpc
def add_subplot_axes(ax, rect, axisbg='w'):
fig = plt.gcf()
box = ax.get_position()
width = box.width
height = box.height
inax_position = ax.transAxes.transform(rect[0:2])
trans_figure = fig.transFigure.inverted()
infig_position = trans_figure.transform(inax_position)
x = infig_position[0]
y = infig_position[1]
width *= rect[2]
height *= rect[3]
subax = fig.add_axes([x, y, width, height], axisbg=axisbg)
return subax
def add_four_grid(ax, dist=0.05, ticks=False, border=False, titles=None):
"""
Function that creates a symmetric four grid inside a subplot
:param ax: Axis handle of parent subplot
:param dist: Distance between neighbouring fields of the grd
:param ticks: True if ticks shall be visible
:param border: True if border shall be visible
:param titles: Iterable with length 4 in this order:
0) top left
1) bottom left
2) top right
3) bottom right
If set, distance the fields will be made narrower to
accommodate the title
:return: Axis handles for the four subfields in this order:
0) top left
1) bottom left
2) top right
3) bottom right
"""
# See if titles are provided for all subplots
if titles and len(titles) == 4:
title = True
else:
title = False
# Make left top plot
lt = add_subplot_axes(ax, [0, 0.5+dist/2,
0.5-dist/(2-title), 0.5-dist/(2-title)])
if title:
lt.set_title(titles[0])
if not ticks:
lt.set_xticks([])
lt.set_yticks([])
if not border:
lt.spines["top"].set_visible(False)
lt.spines["right"].set_visible(False)
lt.spines["left"].set_visible(False)
lt.spines["bottom"].set_visible(False)
# Make left bottom plot
lb = add_subplot_axes(ax, [0, 0,
0.5-dist/(2-title), 0.5-dist/(2-title)])
if title:
lb.set_title(titles[1])
if not ticks:
lb.set_xticks([])
lb.set_yticks([])
if not border:
lb.spines["top"].set_visible(False)
lb.spines["right"].set_visible(False)
lb.spines["left"].set_visible(False)
lb.spines["bottom"].set_visible(False)
# Make right top plot
rt = add_subplot_axes(ax, [0.5+dist/2, 0.5+dist/2,
0.5-dist/(2-title), 0.5-dist/(2-title)])
if title:
rt.set_title(titles[2])
if not border:
rt.set_xticks([])
rt.set_yticks([])
if not border:
rt.spines["top"].set_visible(False)
rt.spines["right"].set_visible(False)
rt.spines["left"].set_visible(False)
rt.spines["bottom"].set_visible(False)
# Make right bottom plot
rb = add_subplot_axes(ax, [0.5+dist/2, 0,
0.5-dist/(2-title), 0.5-dist/(2-title)])
if title:
rb.set_title(titles[3])
if not ticks:
rb.set_xticks([])
rb.set_yticks([])
if not border:
rb.spines["top"].set_visible(False)
rb.spines["right"].set_visible(False)
rb.spines["left"].set_visible(False)
rb.spines["bottom"].set_visible(False)
return lt, lb, rt, rb
def make_montage(vol, axis='coronal', x_step=5, y_step=6):
"""
Makes a montage of a 3D volume
"""
n_steps = x_step * y_step
if axis == 'coronal':
it_dim = vol.shape[1]
x_dim = vol.shape[0]
y_dim = vol.shape[2]
elif axis == 'axial':
it_dim = vol.shape[0]
x_dim = vol.shape[1]
y_dim = vol.shape[2]
vis_mat = np.zeros((x_step*x_dim, y_step*y_dim))
it_slc = np.linspace(0, it_dim-1, n_steps)
itc = 0
for y in np.arange(y_step):
for x in np.arange(x_step):
slc_ind = it_slc[itc]
get_slc = np.floor(slc_ind)
if axis == 'coronal':
slc = vol[:, get_slc, :]
elif axis == 'axial':
slc = vol[get_slc, ...]
vis_mat[x_dim * x:x_dim * (x + 1), y_dim * y:y_dim * (y + 1)] = slc
itc += 1
out_mat = np.fliplr(np.rot90(vis_mat))
return out_mat
def montage(img, thr=0, mode='coronal', rows=5, cloumns=6, fsz=(10, 20)):
"""
Make a montage using nilearn for the background
The output figure will be 5 slices wide and 6
slices deep
:param img: nilearn image containing the data
:param thr: threshold for the image
:param mode: view mode. saggital, coronal, axial
:param rows: number of rows in the figure
:param cloumns: number of columns in the figure
:param fsz: size of the figure
:return fig: figure handle for saving or whatnot
"""
# Hardwired view range
sag_rng = [-65, 65]
cor_rng = [-100, 65]
axi_rng = [-71, 85]
# Get the number of slices
n_slices = rows * cloumns
if mode == 'coronal':
# Get the slice indices
view_range = np.floor(np.linspace(cor_rng[0], cor_rng[1], n_slices))
view_mode = 'y'
if mode == 'axial':
# Get the slice indices
view_range = np.floor(np.linspace(axi_rng[0], axi_rng[1], n_slices))
view_mode = 'z'
if mode == 'saggital':
# Get the slice indices
view_range = np.floor(np.linspace(sag_rng[0], sag_rng[1], n_slices))
view_mode = 'x'
# Prepare the figure
fig = plt.figure(figsize=fsz)
gs = gridspec.GridSpec(cloumns, 1, hspace=0, wspace=0)
# Loop through the rows of the image
for row_id in range(cloumns):
# Create the axis to show
ax = fig.add_subplot(gs[row_id, 0])
# Get the slices in the column direction
row_range = view_range[row_id*rows:(row_id+1)*rows]
# Display the thing
nlp.plot_stat_map(img, cut_coords=row_range,
display_mode=view_mode, threshold=thr,
axes=ax, black_bg=True)
return fig
def make_cmap(colors, position=None, bit=False):
"""
make_cmap takes a list of tuples which contain RGB values. The RGB
values may either be in 8-bit [0 to 255] (in which bit must be set to
True when called) or arithmetic [0 to 1] (default). make_cmap returns
a cmap with equally spaced colors.
Arrange your tuples so that the first color is the lowest value for the
colorbar and the last is the highest.
position contains values from 0 to 1 to dictate the location of each color.
"""
bit_rgb = np.linspace(0,1,256)
if position:
position = np.linspace(0,1,len(colors))
else:
if len(position) != len(colors):
sys.exit("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
sys.exit("position must start with 0 and end with 1")
if bit:
for i in range(len(colors)):
colors[i] = (bit_rgb[colors[i][0]],
bit_rgb[colors[i][1]],
bit_rgb[colors[i][2]])
cdict = {'red':[], 'green':[], 'blue':[]}
for pos, color in zip(position, colors):
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
cmap = mpc.LinearSegmentedColormap('my_colormap',cdict,256)
return cmap
def hot_cold():
"""
This generates a niak-like colormap of hot cold
:return:
"""
# Define a new colormap
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 1.0, 1.0),
(0.25, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.75, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 1.0, 1.0),
(0.25, 1.0, 1.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0))
}
hotcold = mpc.LinearSegmentedColormap('hotcold', cdict)
return hotcold
| mit |
rhiever/bokeh | sphinx/source/docs/tutorials/exercises/unemployment.py | 23 | 2160 | import numpy as np
from bokeh.models import HoverTool
from bokeh.plotting import ColumnDataSource, figure, output_file, show
from bokeh.sampledata.unemployment1948 import data
# Read in the data with pandas. Convert the year column to string
data['Year'] = [str(x) for x in data['Year']]
years = list(data['Year'])
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
data = data.set_index('Year')
# this is the colormap from the original plot
colors = [
"#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce",
"#ddb7b1", "#cc7878", "#933b41", "#550b1d"
]
# Set up the data for plotting. We will need to have values for every
# pair of year/month names. Map the rate to a color.
month = []
year = []
color = []
rate = []
for y in years:
for m in months:
month.append(m)
year.append(y)
monthly_rate = data[m][y]
rate.append(monthly_rate)
color.append(colors[min(int(monthly_rate)-2, 8)])
# EXERCISE: create a `ColumnDataSource` with columns: month, year, color, rate
source = ColumnDataSource(
data=dict(
month=month,
year=year,
color=color,
rate=rate,
)
)
# EXERCISE: output to static HTML file
# create a new figure
p = figure(title="US Unemployment (1948 - 2013)", tools="resize,hover",
x_range=years, y_range=list(reversed(months)),
plot_width=900, plot_height=400, x_axis_location="above")
# EXERCISE: use the `rect renderer with the following attributes:
# - x_range is years, y_range is months (reversed)
# - fill color for the rectangles is the 'color' field
# - line_color for the rectangles is None
# - tools are resize and hover tools
# - add a nice title, and set the plot_width and plot_height
# EXERCISE: use p.grid, p.axis, etc. to style the plot. Some suggestions:
# - remove the axis and grid lines
# - remove the major ticks
# - make the tick labels smaller
# - set the x-axis orientation to vertical, or angled
# EXERCISE: configure the hover tool to display the month, year and rate
hover = p.select(dict(type=HoverTool))
hover.tooltips = [
# fill me in
]
show(p)
| bsd-3-clause |
yousrabk/mne-python | mne/viz/tests/test_misc.py | 17 | 4858 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
from mne import (io, read_events, read_cov, read_source_spaces, read_evokeds,
read_dipole, SourceEstimate)
from mne.datasets import testing
from mne.minimum_norm import read_inverse_operator
from mne.viz import (plot_bem, plot_events, plot_source_spectrogram,
plot_snr_estimate)
from mne.utils import requires_nibabel, run_tests_if_main, slow_test
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
inv_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
evoked_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
dip_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_set1.dip')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
def _get_raw():
return io.Raw(raw_fname, preload=True)
def _get_events():
return read_events(event_fname)
def test_plot_cov():
"""Test plotting of covariances
"""
raw = _get_raw()
cov = read_cov(cov_fname)
fig1, fig2 = cov.plot(raw.info, proj=True, exclude=raw.ch_names[6:])
@testing.requires_testing_data
@requires_nibabel()
def test_plot_bem():
"""Test plotting of BEM contours
"""
assert_raises(IOError, plot_bem, subject='bad-subject',
subjects_dir=subjects_dir)
assert_raises(ValueError, plot_bem, subject='sample',
subjects_dir=subjects_dir, orientation='bad-ori')
plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='sagittal', slices=[25, 50])
def test_plot_events():
"""Test plotting events
"""
event_labels = {'aud_l': 1, 'aud_r': 2, 'vis_l': 3, 'vis_r': 4}
color = {1: 'green', 2: 'yellow', 3: 'red', 4: 'c'}
raw = _get_raw()
events = _get_events()
plot_events(events, raw.info['sfreq'], raw.first_samp)
plot_events(events, raw.info['sfreq'], raw.first_samp, equal_spacing=False)
# Test plotting events without sfreq
plot_events(events, first_samp=raw.first_samp)
warnings.simplefilter('always', UserWarning)
with warnings.catch_warnings(record=True):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels)
plot_events(events, raw.info['sfreq'], raw.first_samp,
color=color)
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels, color=color)
assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
raw.first_samp, event_id={'aud_l': 1}, color=color)
assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
raw.first_samp, event_id={'aud_l': 111}, color=color)
@testing.requires_testing_data
def test_plot_source_spectrogram():
"""Test plotting of source spectrogram
"""
sample_src = read_source_spaces(op.join(subjects_dir, 'sample',
'bem', 'sample-oct-6-src.fif'))
# dense version
vertices = [s['vertno'] for s in sample_src]
n_times = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts, n_times))
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_source_spectrogram([stc, stc], [[1, 2], [3, 4]])
assert_raises(ValueError, plot_source_spectrogram, [], [])
assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmin=0)
assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmax=7)
@slow_test
@testing.requires_testing_data
def test_plot_snr():
"""Test plotting SNR estimate
"""
inv = read_inverse_operator(inv_fname)
evoked = read_evokeds(evoked_fname, baseline=(None, 0))[0]
plot_snr_estimate(evoked, inv)
@testing.requires_testing_data
def test_plot_dipole_amplitudes():
"""Test plotting dipole amplitudes
"""
dipoles = read_dipole(dip_fname)
dipoles.plot_amplitudes(show=False)
run_tests_if_main()
| bsd-3-clause |
CVML/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
tipsybear/actors-simulation | tests/test_viz.py | 1 | 1179 | # test_viz
# Vizualization tests
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Sun Dec 06 20:45:32 2015 -0500
#
# Copyright (C) 2015 University of Maryland
# For license information, see LICENSE.txt
#
# ID: test_viz.py [] [email protected] $
"""
Vizualization tests
"""
##########################################################################
## Imports
##########################################################################
import unittest
import gvas.viz
from peak.util.imports import lazyModule
##########################################################################
## Vizualization and Configuration Tests
##########################################################################
class VizTests(unittest.TestCase):
def test_lazyimport(self):
"""
Test that the viz module is lazily imported.
"""
self.assertEqual(type(gvas.viz.sns), type(lazyModule('seaborn')))
self.assertEqual(type(gvas.viz.plt), type(lazyModule('matplotlib.pyplot')))
self.assertEqual(type(gvas.viz.np), type(lazyModule('numpy')))
self.assertEqual(type(gvas.viz.pd), type(lazyModule('pandas')))
| mit |
ky822/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
DSLituiev/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 8 | 7473 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
bigdataelephants/scikit-learn | sklearn/datasets/tests/test_lfw.py | 50 | 6849 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
load_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_people():
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
load_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100)
@raises(IOError)
def test_load_empty_lfw_pairs():
load_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
DESHRAJ/crowdsource-platform | crowdsourcing/models.py | 4 | 22804 | from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from oauth2client.django_orm import FlowField, CredentialsField
from crowdsourcing.utils import get_delimiter
import pandas as pd
import os
class RegistrationModel(models.Model):
user = models.OneToOneField(User)
activation_key = models.CharField(max_length=40)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class PasswordResetModel(models.Model):
user = models.OneToOneField(User)
reset_key = models.CharField(max_length=40)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Region(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the region!', })
code = models.CharField(max_length=16, error_messages={'required': 'Please specify the region code!', })
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Country(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the country!', })
code = models.CharField(max_length=8, error_messages={'required': 'Please specify the country code!', })
region = models.ForeignKey(Region)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s' % (self.name)
class City(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the city!', })
country = models.ForeignKey(Country)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s' % (self.name)
class Address(models.Model):
street = models.CharField(max_length=128, error_messages={'required': 'Please specify the street name!', })
country = models.ForeignKey(Country)
city = models.ForeignKey(City)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s, %s, %s' % (self.street, self.city, self.country)
class Role(models.Model):
name = models.CharField(max_length=32, unique=True, error_messages={'required': 'Please specify the role name!',
'unique': 'The role %(value)r already exists. Please provide another name!'})
is_active = models.BooleanField(default=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Language(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the language!'})
iso_code = models.CharField(max_length=8)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserProfile(models.Model):
user = models.OneToOneField(User)
gender_choices = (('M', 'Male'), ('F', 'Female'))
gender = models.CharField(max_length=1, choices=gender_choices)
address = models.ForeignKey(Address, null=True)
birthday = models.DateField(null=True, error_messages={'invalid': "Please enter a correct date format"})
nationality = models.ManyToManyField(Country, through='UserCountry')
verified = models.BooleanField(default=False)
picture = models.BinaryField(null=True)
friends = models.ManyToManyField('self', through='Friendship',
symmetrical=False)
roles = models.ManyToManyField(Role, through='UserRole')
deleted = models.BooleanField(default=False)
languages = models.ManyToManyField(Language, through='UserLanguage')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserCountry(models.Model):
country = models.ForeignKey(Country)
user = models.ForeignKey(UserProfile)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Skill(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the skill name!"})
description = models.CharField(max_length=512, error_messages={'required': "Please enter the skill description!"})
verified = models.BooleanField(default=False)
parent = models.ForeignKey('self', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Worker(models.Model):
profile = models.OneToOneField(UserProfile)
skills = models.ManyToManyField(Skill, through='WorkerSkill')
deleted = models.BooleanField(default=False)
alias = models.CharField(max_length=32, error_messages={'required': "Please enter an alias!"})
class WorkerSkill(models.Model):
worker = models.ForeignKey(Worker)
skill = models.ForeignKey(Skill)
level = models.IntegerField(null=True)
verified = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('worker', 'skill')
class Requester(models.Model):
profile = models.OneToOneField(UserProfile)
alias = models.CharField(max_length=32, error_messages={'required': "Please enter an alias!"})
class UserRole(models.Model):
user_profile = models.ForeignKey(UserProfile)
role = models.ForeignKey(Role)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Friendship(models.Model):
user_source = models.ForeignKey(UserProfile, related_name='user_source')
user_target = models.ForeignKey(UserProfile, related_name='user_target')
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Category(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the category name!"})
parent = models.ForeignKey('self', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Project(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the project name!"})
start_date = models.DateTimeField(auto_now_add=True, auto_now=False)
end_date = models.DateTimeField(auto_now_add=True, auto_now=False)
owner = models.ForeignKey(Requester, related_name='project_owner')
description = models.CharField(max_length=1024, default='')
collaborators = models.ManyToManyField(Requester, through='ProjectRequester')
keywords = models.TextField(null=True)
save_to_drive = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
categories = models.ManyToManyField(Category, through='ProjectCategory')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ProjectRequester(models.Model):
"""
Tracks the list of requesters that collaborate on a specific project
"""
requester = models.ForeignKey(Requester)
project = models.ForeignKey(Project)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('requester', 'project')
class Template(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the template name!"})
owner = models.ForeignKey(UserProfile)
source_html = models.TextField(default=None, null=True)
price = models.FloatField(default=0)
share_with_others = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Module(models.Model):
"""
aka Milestone
This is a group of similar tasks of the same kind.
Fields
-repetition: number of times a task needs to be performed
"""
name = models.CharField(max_length=128, error_messages={'required': "Please enter the module name!"})
description = models.TextField(error_messages={'required': "Please enter the module description!"})
owner = models.ForeignKey(Requester)
project = models.ForeignKey(Project, related_name='modules')
categories = models.ManyToManyField(Category, through='ModuleCategory')
keywords = models.TextField(null=True)
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'In Review'),
(3, 'In Progress'),
(4, 'Completed')
)
permission_types = ((1, "Others:Read+Write::Workers:Read+Write"),
(2, 'Others:Read::Workers:Read+Write'),
(3, 'Others:Read::Workers:Read'),
(4, 'Others:None::Workers:Read')
)
status = models.IntegerField(choices=statuses, default=1)
price = models.FloatField()
repetition = models.IntegerField(default=1)
module_timeout = models.IntegerField(default=0)
has_data_set = models.BooleanField(default=False)
data_set_location = models.CharField(max_length=256, default='No data set', null=True)
task_time = models.FloatField(default=0) # in minutes
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
template = models.ManyToManyField(Template, through='ModuleTemplate')
is_micro = models.BooleanField(default=True)
is_prototype = models.BooleanField(default=False)
min_rating = models.FloatField(default=0)
allow_feedback = models.BooleanField(default=True)
feedback_permissions = models.IntegerField(choices=permission_types, default=1)
class ModuleCategory(models.Model):
module = models.ForeignKey(Module)
category = models.ForeignKey(Category)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('category', 'module')
class ProjectCategory(models.Model):
project = models.ForeignKey(Project)
category = models.ForeignKey(Category)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('project', 'category')
class TemplateItem(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the name of the template item!"})
template = models.ForeignKey(Template, related_name='template_items')
id_string = models.CharField(max_length=128)
role = models.CharField(max_length=16)
icon = models.CharField(max_length=256, null=True)
data_source = models.CharField(max_length=256, null=True)
layout = models.CharField(max_length=16, default='column')
type = models.CharField(max_length=16)
sub_type = models.CharField(max_length=16)
values = models.TextField(null=True)
position = models.IntegerField()
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
ordering = ['position']
class ModuleTemplate(models.Model):
module = models.ForeignKey(Module)
template = models.ForeignKey(Template)
class TemplateItemProperties(models.Model):
template_item = models.ForeignKey(TemplateItem)
attribute = models.CharField(max_length=128)
operator = models.CharField(max_length=128)
value1 = models.CharField(max_length=128)
value2 = models.CharField(max_length=128)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Task(models.Model):
module = models.ForeignKey(Module, related_name='module_tasks')
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'Accepted'),
(3, 'Assigned'),
(4, 'Finished')
)
status = models.IntegerField(choices=statuses, default=1)
data = models.TextField(null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
price = models.FloatField(default=0)
class TaskWorker(models.Model):
task = models.ForeignKey(Task, related_name='task_workers')
worker = models.ForeignKey(Worker)
statuses = ((1, 'In Progress'),
(2, 'Submitted'),
(3, 'Accepted'),
(4, 'Rejected'),
(5, 'Returned'),
(6, 'Skipped')
)
task_status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
is_paid = models.BooleanField(default=False)
class TaskWorkerResult(models.Model):
task_worker = models.ForeignKey(TaskWorker, related_name='task_worker_results')
result = models.TextField(null=True)
template_item = models.ForeignKey(TemplateItem)
# TODO: To be refined
statuses = ((1, 'Created'),
(2, 'Accepted'),
(3, 'Rejected')
)
status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class WorkerModuleApplication(models.Model):
worker = models.ForeignKey(Worker)
module = models.ForeignKey(Module)
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'Accepted'),
(3, 'Rejected')
)
status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ActivityLog(models.Model):
"""
Track all user's activities: Create, Update and Delete
"""
activity = models.CharField(max_length=512)
author = models.ForeignKey(User)
created_timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)
class Qualification(models.Model):
module = models.ForeignKey(Module)
# TODO: To be refined
types = ((1, "Strict"),
(2, 'Flexible'))
type = models.IntegerField(choices=types, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class QualificationItem(models.Model):
qualification = models.ForeignKey(Qualification)
attribute = models.CharField(max_length=128)
operator = models.CharField(max_length=128)
value1 = models.CharField(max_length=128)
value2 = models.CharField(max_length=128)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserLanguage(models.Model):
language = models.ForeignKey(Language)
user = models.ForeignKey(UserProfile)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Currency(models.Model):
name = models.CharField(max_length=32)
iso_code = models.CharField(max_length=8)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserPreferences(models.Model):
user = models.OneToOneField(User)
language = models.ForeignKey(Language)
currency = models.ForeignKey(Currency)
login_alerts = models.SmallIntegerField(default=0)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class RequesterRanking(models.Model):
requester_name = models.CharField(max_length=128)
requester_payRank = models.FloatField()
requester_fairRank = models.FloatField()
requester_speedRank = models.FloatField()
requester_communicationRank = models.FloatField()
requester_numberofReviews = models.IntegerField(default=0)
class ModuleRating(models.Model):
worker = models.ForeignKey(Worker)
module = models.ForeignKey(Module)
value = models.IntegerField()
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('worker', 'module')
class ModuleReview(models.Model):
worker = models.ForeignKey(Worker)
anonymous = models.BooleanField(default=False)
module = models.ForeignKey(Module)
comments = models.TextField()
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('worker', 'module')
class FlowModel(models.Model):
id = models.OneToOneField(User, primary_key=True)
flow = FlowField()
class AccountModel(models.Model):
name = models.CharField(max_length=128)
type = models.CharField(max_length=16)
email = models.EmailField()
access_token = models.TextField(max_length=2048)
root = models.CharField(max_length=256)
is_active = models.IntegerField()
quota = models.BigIntegerField()
used_space = models.BigIntegerField()
assigned_space = models.BigIntegerField()
status = models.IntegerField(default=quota)
owner = models.ForeignKey(User)
class CredentialsModel(models.Model):
account = models.ForeignKey(AccountModel)
credential = CredentialsField()
class TemporaryFlowModel(models.Model):
user = models.ForeignKey(User)
type = models.CharField(max_length=16)
email = models.EmailField()
class BookmarkedProjects(models.Model):
profile = models.ForeignKey(UserProfile)
project = models.ForeignKey(Project)
class Conversation(models.Model):
subject = models.CharField(max_length=64)
sender = models.ForeignKey(User, related_name='sender')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
deleted = models.BooleanField(default=False)
recipients = models.ManyToManyField(User, through='ConversationRecipient')
class Message(models.Model):
conversation = models.ForeignKey(Conversation, related_name='messages')
sender = models.ForeignKey(User)
body = models.TextField(max_length=8192)
deleted = models.BooleanField(default=False)
status = models.IntegerField(default=1) # 1:Sent 2:Delivered 3:Read
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ConversationRecipient(models.Model):
recipient = models.ForeignKey(User, related_name='recipients')
conversation = models.ForeignKey(Conversation, related_name='conversation_recipient')
date_added = models.DateTimeField(auto_now_add=True, auto_now=False)
class UserMessage(models.Model):
message = models.ForeignKey(Message)
user = models.ForeignKey(User)
deleted = models.BooleanField(default=False)
class RequesterInputFile(models.Model):
# TODO will need save files on a server rather than in a temporary folder
file = models.FileField(upload_to='tmp/')
deleted = models.BooleanField(default=False)
def parse_csv(self):
delimiter = get_delimiter(self.file.name)
df = pd.DataFrame(pd.read_csv(self.file, sep=delimiter))
return df.to_dict(orient='records')
def delete(self, *args, **kwargs):
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path = os.path.join(root, self.file.url[1:])
os.remove(path)
super(RequesterInputFile, self).delete(*args, **kwargs)
class WorkerRequesterRating(models.Model):
origin = models.ForeignKey(UserProfile, related_name='rating_origin')
target = models.ForeignKey(UserProfile, related_name='rating_target')
module = models.ForeignKey(Module, related_name='rating_module')
weight = models.FloatField(default=2)
origin_type = models.CharField(max_length=16)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Comment(models.Model):
sender = models.ForeignKey(UserProfile, related_name='comment_sender')
body = models.TextField(max_length=8192)
parent = models.ForeignKey('self', related_name='reply_to', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
ordering = ['created_timestamp']
class ModuleComment(models.Model):
module = models.ForeignKey(Module, related_name='modulecomment_module')
comment = models.ForeignKey(Comment, related_name='modulecomment_comment')
deleted = models.BooleanField(default=False)
class TaskComment(models.Model):
task = models.ForeignKey(Task, related_name='taskcomment_task')
comment = models.ForeignKey(Comment, related_name='taskcomment_comment')
deleted = models.BooleanField(default=False) | mit |
elkingtonmcb/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 103 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
georgid/sms-tools | lectures/5-Sinusoidal-model/plots-code/sine-analysis-synthesis.py | 2 | 1538 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
from scipy.fftpack import fft, ifft, fftshift
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
M = 601
w = np.blackman(M)
N = 1024
hN = N/2
Ns = 512
hNs = Ns/2
pin = 5000
t = -70
x1 = x[pin:pin+w.size]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, hN, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
freqs = iploc*fs/N
Y = UF.genSpecSines(freqs, ipmag, ipphase, Ns, fs)
mY = 20*np.log10(abs(Y[:hNs]))
pY = np.unwrap(np.angle(Y[:hNs]))
y= fftshift(ifft(Y))*sum(blackmanharris(Ns))
plt.figure(1, figsize=(9, 6))
plt.subplot(4,1,1)
plt.plot(np.arange(-M/2,M/2), x1, 'b', lw=1.5)
plt.axis([-M/2,M/2, min(x1), max(x1)])
plt.title("x (oboe-A4.wav), M = 601")
plt.subplot(4,1,2)
plt.plot(np.arange(hN), mX, 'r', lw=1.5)
plt.plot(iploc, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.axis([0, hN,-90,max(mX)+2])
plt.title("mX + spectral peaks; Blackman, N = 1024")
plt.subplot(4,1,3)
plt.plot(np.arange(hNs), mY, 'r', lw=1.5)
plt.axis([0, hNs,-90,max(mY)+2])
plt.title("mY; Blackman-Harris; Ns = 512")
plt.subplot(4,1,4)
plt.plot(np.arange(Ns), y, 'b', lw=1.5)
plt.axis([0, Ns,min(y),max(y)])
plt.title("y; Ns = 512")
plt.tight_layout()
plt.savefig('sine-analysis-synthesis.png')
plt.show()
| agpl-3.0 |
thaole16/Boids | boids/boids.py | 1 | 4866 | """
A refactored implementation of Boids from a deliberately bad implementation of
[Boids](http://dl.acm.org/citation.cfm?doid=37401.37406): an exercise for class.
"""
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
class Boids(object):
def __init__(self,
boid_count=50,
x_positions=[-450, 50.0],
y_positions=[300.0, 600.0],
x_velocities=[0, 10.0],
y_velocities=[-20.0, 20.0],
move_to_middle_strength=0.01,
alert_distance=100,
formation_flying_distance=10000,
formation_flying_strength=0.125):
self.boid_count = boid_count
self.move_to_middle_strength = move_to_middle_strength
self.alert_distance = alert_distance
self.formation_flying_distance = formation_flying_distance
self.formation_flying_strength = formation_flying_strength
self.boids_x = np.random.uniform(size=boid_count, *x_positions)
self.boids_y = np.random.uniform(size=boid_count, *y_positions)
self.positions = np.stack((self.boids_x, self.boids_y))
self.boid_x_velocities = np.random.uniform(size=boid_count, *x_velocities)
self.boid_y_velocities = np.random.uniform(size=boid_count, *y_velocities)
self.velocities = np.stack((self.boid_x_velocities, self.boid_y_velocities))
self.boids = (self.positions, self.velocities)
def fly_towards_the_middle(self, boids, move_to_middle_strength=0.01):
(positions, velocities) = boids
middle = np.mean(positions, 1)
move_to_middle = (middle[:, np.newaxis] - positions) * move_to_middle_strength
velocities += move_to_middle
def separation(self, coords):
separations = np.array(coords)[:, np.newaxis, :] - np.array(coords)[:, :, np.newaxis]
separation_distance_squared = separations[0, :, :] ** 2 + separations[1, :, :] ** 2
return separations, separation_distance_squared
def fly_away_from_nearby_boids(self, boids, alert_distance=100):
(positions, velocities) = boids
separations, separation_distance_squared = self.separation(positions)
birds_outside_alert = separation_distance_squared > alert_distance
close_separations = np.copy(separations)
close_separations[0, :, :][birds_outside_alert] = 0 # x positions
close_separations[1, :, :][birds_outside_alert] = 0 # y positions
velocities += np.sum(close_separations, 1)
def match_speed_with_nearby_boids(self, boids,
formation_flying_distance=10000,
formation_flying_strength=0.125):
(positions, velocities) = boids
separations, separation_distance_squared = self.separation(positions)
birds_outside_formation = separation_distance_squared > formation_flying_distance
velocity_difference = velocities[:, np.newaxis, :] - velocities[:, :, np.newaxis]
close_formation = np.copy(velocity_difference)
close_formation[0, :, :][birds_outside_formation] = 0
close_formation[1, :, :][birds_outside_formation] = 0
velocities += -1 * np.mean(close_formation, 1) * formation_flying_strength
def update_boids(self, boids):
(positions, velocities) = boids
# Fly towards the middle
self.fly_towards_the_middle(boids, self.move_to_middle_strength)
# Fly away from nearby boids
self.fly_away_from_nearby_boids(boids, self.alert_distance)
# Try to match speed with nearby boids
self.match_speed_with_nearby_boids(boids, self.formation_flying_distance, self.formation_flying_strength)
# Update positions
positions += velocities
def _animate(self, frame):
self.update_boids(self.boids)
(positions, velocities) = self.boids
self.scatter.set_offsets(np.transpose(positions))
def model(self, xlim=(-500, 1500), ylim=(-500, 1500), frames=50, interval=50, savefile=None):
colors = np.random.rand(self.boid_count)
boidsize = np.pi * (2 * np.random.rand(self.boid_count) + 2) ** 2
figure = plt.figure()
axes = plt.axes(xlim=xlim, ylim=ylim)
self.scatter = axes.scatter(self.boids_x, self.boids_y,
s=boidsize, c=colors, alpha=0.5, edgecolors=None)
anim = animation.FuncAnimation(figure, self._animate,
frames=frames, interval=interval)
plt.xlabel('x (arbitrary units)')
plt.ylabel('y (arbitrary units)')
plt.title("Boids a'Flocking")
if savefile != None:
anim.save(savefile)
plt.show()
if __name__ == "__main__":
boidsobject = Boids()
boidsobject.model()
| mit |
Ziqi-Li/bknqgis | pandas/pandas/tests/io/parser/quoting.py | 18 | 5813 | # -*- coding: utf-8 -*-
"""
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import PY3, StringIO, u
class QuotingTests(object):
def test_bad_quote_char(self):
data = '1,2,3'
# Python 2.x: "...must be an 1-character..."
# Python 3.x: "...must be a 1-character..."
msg = '"quotechar" must be a(n)? 1-character string'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar='foo')
msg = 'quotechar must be set if quoting enabled'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
msg = '"quotechar" must be string, not int'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=2)
def test_bad_quoting(self):
data = '1,2,3'
msg = '"quoting" must be an integer'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quoting='foo')
# quoting must in the range [0, 3]
msg = 'bad "quoting" value'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quoting=5)
def test_quote_char_basic(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
def test_quote_char_various(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
quote_chars = ['~', '*', '%', '$', '@', 'P']
for quote_char in quote_chars:
new_data = data.replace('"', quote_char)
result = self.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
def test_null_quote_char(self):
data = 'a,b,c\n1,2,3'
# sanity checks
msg = 'quotechar must be set if quoting enabled'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar='',
quoting=csv.QUOTE_MINIMAL)
# no errors should be raised if quoting is None
expected = DataFrame([[1, 2, 3]],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
def test_quoting_various(self):
data = '1,2,"foo"'
cols = ['a', 'b', 'c']
# QUOTE_MINIMAL and QUOTE_ALL apply only to
# the CSV writer, so they should have no
# special effect for the CSV reader
expected = DataFrame([[1, 2, 'foo']], columns=cols)
# test default (afterwards, arguments are all explicit)
result = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_MINIMAL, names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_ALL, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONE tells the reader to do no special handling
# of quote characters and leave them alone
expected = DataFrame([[1, 2, '"foo"']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONE, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONNUMERIC tells the reader to cast
# all non-quoted fields to float
expected = DataFrame([[1.0, 2.0, 'foo']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONNUMERIC,
names=cols)
tm.assert_frame_equal(result, expected)
def test_double_quote(self):
data = 'a,b\n3,"4 "" 5"'
expected = DataFrame([[3, '4 " 5']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[3, '4 " 5"']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=False)
tm.assert_frame_equal(result, expected)
def test_quotechar_unicode(self):
# See gh-14477
data = 'a\n1'
expected = DataFrame({'a': [1]})
result = self.read_csv(StringIO(data), quotechar=u('"'))
tm.assert_frame_equal(result, expected)
# Compared to Python 3.x, Python 2.x does not handle unicode well.
if PY3:
result = self.read_csv(StringIO(data), quotechar=u('\u0001'))
tm.assert_frame_equal(result, expected)
| gpl-2.0 |