repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jorisvandenbossche/numpy | numpy/lib/polynomial.py | 1 | 40755 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import functools
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.core import overrides
from numpy.core.overrides import set_module
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
@set_module('numpy')
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def _poly_dispatcher(seq_of_zeros):
return seq_of_zeros
@array_function_dispatch(_poly_dispatcher)
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1., 0., 0., 0.])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) # random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def _roots_dispatcher(p):
return p
@array_function_dispatch(_roots_dispatcher)
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def _polyint_dispatcher(p, m=None, k=None):
return (p,)
@array_function_dispatch(_polyint_dispatcher)
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to integrate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def _polyder_dispatcher(p, m=None):
return (p,)
@array_function_dispatch(_polyder_dispatcher)
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
return (x, y, w)
@array_function_dispatch(_polyfit_dispatcher)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error in the order `deg`, `deg-1`, ... `0`.
The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
method is recommended for new code as it is more stable numerically. See
the documentation of the method for more information.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool or str, optional
If given and not `False`, return not just the estimate but also its
covariance matrix. By default, the covariance are scaled by
chi2/sqrt(N-dof), i.e., the weights are presumed to be unreliable
except in a relative sense and everything is scaled such that the
reduced chi2 is unity. This scaling is omitted if ``cov='unscaled'``,
as is relevant for the case that the weights are 1/sigma**2, with
sigma known to be a reliable estimate of the uncertainty.
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals is sum of squared residuals
of the least-squares fit, the effective rank of the scaled Vandermonde
coefficient matrix, its singular values, and the specified value of
`rcond`. For more details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
https://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> import warnings
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179 # may vary
>>> p(3.5)
-0.34732142857143039 # may vary
>>> p(10)
22.579365079365115 # may vary
High-order polynomials may oscillate wildly:
>>> with warnings.catch_warnings():
... warnings.simplefilter('ignore', np.RankWarning)
... p30 = np.poly1d(np.polyfit(x, y, 30))
...
>>> p30(4)
-0.80000000000000204 # may vary
>>> p30(5)
-0.99999999999999445 # may vary
>>> p30(4.5)
-0.10547061179440398 # may vary
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=4)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
if cov == "unscaled":
fac = 1
else:
if len(x) <= order:
raise ValueError("the number of data points must exceed order "
"to scale the covariance matrix")
# note, this used to be: fac = resids / (len(x) - order - 2.0)
# it was deciced that the "- 2" (originally justified by "Bayesian
# uncertainty analysis") is not was the user expects
# (see gh-11196 and gh-11197)
fac = resids / (len(x) - order)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def _polyval_dispatcher(p, x):
return (p, x)
@array_function_dispatch(_polyval_dispatcher)
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
If `x` is a subtype of `ndarray` the return value will be of the same type.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asanyarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def _binary_op_dispatcher(a1, a2):
return (a1, a2)
@array_function_dispatch(_binary_op_dispatcher)
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def _polydiv_dispatcher(u, v):
return (u, v)
@array_function_dispatch(_polydiv_dispatcher)
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([1.5 , 1.75]), array([0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.astype(w.dtype)
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
@set_module('numpy')
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1., -3., 2.])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" The polynomial coefficients """
return self._coeffs
@coeffs.setter
def coeffs(self, value):
# allowing this makes p.coeffs *= 2 legal
if value is not self._coeffs:
raise AttributeError("Cannot set attribute")
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
# calculated attributes
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
# our internal _coeffs property need to be backed by __dict__['coeffs'] for
# scipy to work correctly.
@property
def _coeffs(self):
return self.__dict__['coeffs']
@_coeffs.setter
def _coeffs(self, coeffs):
self.__dict__['coeffs'] = coeffs
# alias attributes
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| bsd-3-clause |
HIPS/optofit | optofit/neuron/channels.py | 1 | 45372 |
"""
Define the ionic channels used by a neuron
"""
import numpy as np
from numpy import exp
from optofit.models.model import *
from optofit.models.component import Component
from optofit.models.parameters import Parameter
from optofit.models.hyperparameters import hypers
from optofit.inference.distributions import GammaDistribution
from optofit.utils.utils import get_item_at_path
#
# def make_channel(compartment, model):
# """
# Make a channel with the given channel model.
# """
# if model['channel_type'].lower() == 'leak':
# return LeakChannel(compartment, model)
# elif model['channel_type'].lower() == 'na' or \
# model['channel_type'].lower() == 'sodium':
# return NaChannel(compartment, model)
#
# # Hippocampal CA3 style sodium channel
# elif model['channel_type'].lower() == 'ca3na' or \
# model['channel_type'].lower() == 'ca3_sodium':
# return Ca3NaChannel(compartment, model)
#
# # Delayed rectification is the default potassium channel
# elif model['channel_type'].lower() == 'kdr' or \
# model['channel_type'].lower() == 'k' or \
# model['channel_type'].lower() == 'potassium':
# return KdrChannel(compartment, model)
#
# elif model['channel_type'].lower() == 'ca3kdr':
# return Ca3KdrChannel(compartment, model)
#
# # Delayed rectification is the default potassium channel
# elif model['channel_type'].lower() == 'ca3ka':
# return Ca3KaChannel(compartment, model)
#
# # Hippocampal CA3 style calcium channel
# elif model['channel_type'].lower() == 'ca3ca' or \
# model['channel_type'].lower() == 'ca3_calcium' or \
# model['channel_type'].lower() == 'calcium':
# return Ca3CaChannel(compartment, model)
#
# elif model['channel_type'].lower() == "kahp" or \
# model['channel_type'].lower() == "ca3kahp":
# return Ca3KahpChannel(compartment, model)
#
# elif model['channel_type'].lower() == "ca3kc":
# return Ca3KcChannel(compartment, model)
#
# elif model['channel_type'].lower() == "chr2":
# return ChR2Channel(compartment, model)
#
# else:
# raise Exception("Unrecognized channel type: %s" % model['channel_type'])
class Channel(Component):
"""
Abstract base class for an ion channel.
"""
def __init__(self, name, compartment):
super(Channel, self).__init__()
self.parent = compartment
self.compartment = self.parent
self.name = name
# All channels (at least so far!) have a conductance and a reversal
# potential
self.g = None
self.E = None
self._latent_dtype = []
self._state_dtype = []
self._input_dtype = None
self._latent_lb = []
self._latent_ub = []
self._moves_calcium = False
self._calcium_dependent = False
@property
def moves_calcium(self):
return self._moves_calcium
@property
def calcium_dependent(self):
return self._calcium_dependent
@property
def latent_dtype(self):
return self._latent_dtype
@latent_dtype.setter
def latent_dtype(self, value):
self._latent_dtype = value
@property
def state_dtype(self):
return self._state_dtype
@state_dtype.setter
def state_dtype(self, value):
self._state_dtype = value
@property
def input_dtype(self):
return self._input_dtype
@input_dtype.setter
def input_dtype(self, value):
self._input_dtype = value
# Add properties for constraints on the latent variables
@property
def latent_lb(self):
return self._latent_lb
@latent_lb.setter
def latent_lb(self, value):
self._latent_lb = value
@property
def latent_ub(self):
return self._latent_ub
@latent_ub.setter
def latent_ub(self, value):
self._latent_ub = value
def steady_state(self, V):
# Steady state value of the latent vars as a function of voltage
return np.array([])
def kinetics(self, latent, inpt, state):
pass
def IV_plot(self, start=-200, stop=100):
comp_state_dt = np.dtype(self.compartment._state_vars)
if self.latent_dtype:
dt = np.dtype([(self.compartment.name, [('V', np.float64), ('[Ca]', np.float64), (self.name, self.latent_dtype)])])
else:
dt = np.dtype([(self.compartment.name, [('V', np.float64), ('[Ca]', np.float64)])])
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
if self.calcium_dependent:
Vs = np.linspace(start, stop, 100)
Cas = np.linspace(0, 1000, 100)
X, Y = np.meshgrid(Vs, Cas)
Z = np.zeros(X.shape)
for row in range(X.shape[0]):
for col in range(X.shape[1]):
state = np.ndarray(buffer = np.array([X[row, col], Y[row, col]]), dtype=comp_state_dt, shape = comp_state_dt.shape)
latent = np.ndarray(buffer = np.hstack((np.array([X[row, col], Y[row, col]]), self.steady_state(state))), dtype = dt, shape = dt.shape)
Z[row, col] = self.evaluate_state(np.array([latent]), ())[0][0]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
plt.title(self.name)
plt.show()
else:
ca = 0
Vs = np.linspace(start, stop, 1000)
state = np.ndarray(buffer = np.array([Vs[0], ca]), dtype=comp_state_dt, shape = comp_state_dt.shape)
latents = np.ndarray(buffer=np.hstack((np.array([Vs[0], ca]), self.steady_state(state))), dtype = dt, shape = dt.shape)
for v in Vs[1:]:
state = np.ndarray(buffer = np.array([v, ca]), dtype=comp_state_dt, shape = comp_state_dt.shape)
latents = np.append(latents, np.ndarray(buffer=np.hstack((np.array([v, ca]), self.steady_state(state))), dtype = dt, shape = dt.shape))
plt.plot(Vs, [self.evaluate_state(l, ()) for l in latents])
plt.title(self.name)
plt.show()
def _set_defaults(self, g, g_param, E, E_param):
if g is None:
self.g = g_param
else:
self.g = g
if E is None:
self.E = E_param
else:
self.E = E
class LeakChannel(Channel):
"""
Passive leak channel.
"""
def __init__(self, name, compartment,
g_leak=None, E_leak=None):
super(LeakChannel, self).__init__(name, compartment)
self.state_dtype = [('I', np.float64)]
# By default, g is gamma distributed
if g_leak is None:
self.g = Parameter('g_leak',
distribution=GammaDistribution(hypers['a_g_leak'].value,
hypers['b_g_leak'].value),
lb=0.0)
else:
assert isinstance(g_leak, Parameter)
self.g = g_leak
# By default, E is a hyperparameter
if E_leak is None:
self.E = hypers['E_leak']
else:
assert isinstance(E_leak, Parameter)
self.E = E_leak
def evaluate_state(self, latent, inpt):
"""
Evaluate the state of this compartment
"""
state = np.zeros(latent.shape, dtype=self.state_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
state['I'] = x_comp['V'] - self.E.value
return state
def kinetics(self, latent, inpt, state):
"""
Compute the state kinetics, d{latent}/dt, according to the Hodgkin-Huxley eqns,
given current state x and external or given variables y.
latent: latent state variables of the neuron, e.g. voltage per compartment,
channel activation variables, etc.
inpt: observations, e.g. supplied irradiance, calcium concentration, injected
current, etc.
state: evaluated state of the neuron including channel currents, etc.
returns:
dxdt: Rate of change of the latent state variables.
"""
return None
class NaChannel(Channel):
"""
Sodium channel.
"""
def __init__(self, name, compartment,
g_na=None, E_na=None):
super(NaChannel, self).__init__(name, compartment)
self.latent_dtype = [('m', np.float64), ('h', np.float64)]
self.latent_lb = np.array([0,0])
self.latent_ub = np.array([1,1])
self.state_dtype = [('I', np.float64)]
# By default, g is gamma distributed
if g_na is None:
self.g = Parameter('g_na',
distribution=GammaDistribution(hypers['a_g_na'].value,
hypers['b_g_na'].value),
lb=0.0)
else:
self.g = g_na
# By default, E is a hyperparameter
if E_na is None:
self.E = hypers['E_Na']
else:
self.E = E_na
def steady_state(self, state):
V = state['V']
# Steady state value of the latent vars
# Compute the alpha and beta as a function of V
am1 = 0.1*(V+35.)/(1-exp(-(V+35.)/10.))
ah1 = 0.07*exp(-(V+50.)/20.)
bm1 = 4.*exp(-(V+65.)/18.)
bh1 = 1./(exp(-(V+35)/10.)+1)
xss = np.zeros(2)
xss[0] = am1/(am1+bm1)
xss[1] = ah1/(ah1+bh1)
return xss
def evaluate_state(self, latent, inpt):
"""
Evaluate the state of this compartment
"""
state = np.zeros(latent.shape, dtype=self.state_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
state['I'] = x_ch['m']**3 * x_ch['h'] * (x_comp['V'] - self.E.value)
return state
def kinetics(self, latent, inpt, state):
"""
Compute the state kinetics, d{latent}/dt, according to the Hodgkin-Huxley eqns,
given current state x and external or given variables y.
latent: latent state variables of the neuron, e.g. voltage per compartment,
channel activation variables, etc.
inpt: observations, e.g. supplied irradiance, calcium concentration, injected
current, etc.
state: evaluated state of the neuron including channel currents, etc.
returns:
dxdt: Rate of change of the latent state variables.
"""
# Initialize dxdt for each latent state
dxdt = np.zeros(latent.shape, dtype=self.latent_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
V = x_comp['V']
m = x_ch['m']
h = x_ch['h']
# Compute the alpha and beta as a function of V
am1 = 0.1*(V+35.)/(1-exp(-(V+35.)/10.))
ah1 = 0.07*exp(-(V+50.)/20.)
bm1 = 4.*exp(-(V+65.)/18.)
bh1 = 1./(exp(-(V+35.)/10.)+1.)
# Compute the channel state updates
dxdt['m'] = am1*(1.-m) - bm1*m
dxdt['h'] = ah1*(1.-h) - bh1*h
return dxdt
class Ca3NaChannel(Channel):
"""
Sodium channel in a hippocampal CA3 neuron.
"""
def __init__(self, name, compartment,
g_ca3na = None,
E_ca3na = None):
super(Ca3NaChannel, self).__init__(name, compartment)
self._latent_dtype = [('m', np.float64), ('h', np.float64)]
self._latent_lb = np.array([0,0])
self._latent_ub = np.array([1,1])
self._state_dtype = [('I', np.float64)]
self._input_dtype = None
self._set_defaults(g_ca3na, Parameter('g_ca3na', distribution=
GammaDistribution(
hypers['a_g_ca3na'].value,
hypers['b_g_ca3na'].value
),
lb=0.0),
E_ca3na, hypers['E_Na'])
@property
def latent_dtype(self):
return self._latent_dtype
@property
def state_dtype(self):
return self._state_dtype
@property
def input_dtype(self):
return self._input_dtype
@property
def latent_lb(self):
return self._latent_lb
@property
def latent_ub(self):
return self._latent_ub
def steady_state(self, state):
V = state['V']
# Steady state value of the latent vars
# Compute the alpha and beta as a function of V
V_ref = V + 60
am1 = 0.32*(13.1-V_ref)/(exp((13.1-V_ref)/4)-1)
ah1 = 0.128*exp((17.0-V_ref)/18.0)
bm1 = 0.28*(V_ref-40.1)/(exp((V_ref-40.1)/5.0)-1.0)
bh1 = 4.0/(1.0+exp((40.-V_ref)/5.0))
xss = np.zeros(2)
xss[0] = am1/(am1+bm1)
xss[1] = ah1/(ah1+bh1)
return xss
def evaluate_state(self, latent, inpt):
"""
Evaluate the state of this compartment
"""
state = np.zeros(latent.shape, dtype=self.state_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
state['I'] = x_ch['m']**2 * x_ch['h'] * (x_comp['V'] - self.E.value)
return state
def kinetics(self, latent, inpt, state):
"""
Compute the state kinetics, d{latent}/dt, according to the Hodgkin-Huxley eqns,
given current state x and external or given variables y.
latent: latent state variables of the neuron, e.g. voltage per compartment,
channel activation variables, etc.
inpt: observations, e.g. supplied irradiance, calcium concentration, injected
current, etc.
state: evaluated state of the neuron including channel currents, etc.
returns:
dxdt: Rate of change of the latent state variables.
"""
# Initialize dxdt for each latent state
dxdt = np.zeros(latent.shape, dtype=self.latent_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
V = x_comp['V']
# Use resting potential of zero
V_ref = V + 60
m = x_ch['m']
h = x_ch['h']
# Compute the alpha and beta as a function of V
am1 = 0.32*(13.1-V_ref)/(exp((13.1-V_ref)/4)-1)
ah1 = 0.128*exp((17.0-V_ref)/18.0)
bm1 = 0.28*(V_ref-40.1)/(exp((V_ref-40.1)/5.0)-1.0)
bh1 = 4.0/(1.0+exp((40.-V_ref)/5.0))
# Compute the channel state updates
dxdt['m'] = am1*(1.-m) - bm1*m
dxdt['h'] = ah1*(1.-h) - bh1*h
return dxdt
class KdrChannel(Channel):
"""
Potassium (delayed rectification) channel.
"""
def __init__(self, name, compartment,
g_kdr=None, E_kdr=None):
super(KdrChannel, self).__init__(name, compartment)
self.latent_dtype = [('n', np.float64)]
self.state_dtype = [('I', np.float64)]
self.latent_lb = np.array([0])
self.latent_ub = np.array([1])
# By default, g is gamma distributed
if g_kdr is None:
self.g = Parameter('g_kdr',
distribution=GammaDistribution(hypers['a_g_kdr'].value,
hypers['b_g_kdr'].value),
lb=0.0)
else:
self.g = g_kdr
# By default, E is a hyperparameter
if E_kdr is None:
self.E = hypers['E_K']
else:
self.E = E_kdr
def steady_state(self, state):
# Steady state activation values
V = state['V'] + 60
an1 = 0.01*(V+55.)/(1-exp(-(V+55.)/10.))
bn1 = 0.125*exp(-(V+65.)/80.)
xss = np.zeros(1)
xss[0] = an1/(an1+bn1)
return xss
def evaluate_state(self, latent, inpt):
"""
Evaluate the state of this compartment
"""
state = np.zeros(latent.shape, dtype=self.state_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
state['I'] = x_ch['n']**4 * (x_comp['V'] - self.E.value)
return state
def kinetics(self, latent, inpt, state):
"""
Compute the state kinetics, d{latent}/dt, according to the Hodgkin-Huxley eqns,
given current state x and external or given variables y.
latent: latent state variables of the neuron, e.g. voltage per compartment,
channel activation variables, etc.
inpt: observations, e.g. supplied irradiance, calcium concentration, injected
current, etc.
state: evaluated state of the neuron including channel currents, etc.
returns:
dxdt: Rate of change of the latent state variables.
"""
# Initialize dxdt for each latent state
dxdt = np.zeros(latent.shape, dtype=self.latent_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
V = x_comp['V'] + 60
n = x_ch['n']
# Compute the alpha and beta as a function of V
an1 = 0.01*(V+55.) /(1-exp(-(V+55.)/10.))
bn1 = 0.125*exp(-(V+65.)/80.)
# Compute the channel state updates
dxdt['n'] = an1 * (1.0-n) - bn1*n
return dxdt
class Ca3KdrChannel(Channel):
"""
Potassium (delayed rectification) channel from Traub.
"""
def __init__(self, name, compartment,
g_ca3kdr = None,
E_ca3kdr = None):
super(Ca3KdrChannel, self).__init__(name, compartment)
self._latent_dtype = [('n', np.float64)]
self._state_dtype = [('I', np.float64)]
self._input_dtype = None
self._latent_lb = np.array([0])
self._latent_ub = np.array([1])
self._set_defaults(g_ca3kdr, Parameter('g_ca3kdr', distribution=
GammaDistribution(
hypers['a_g_ca3kdr'].value,
hypers['b_g_ca3kdr'].value
),
lb=0.0),
E_ca3kdr, hypers['E_K'])
@property
def latent_dtype(self):
return self._latent_dtype
@property
def state_dtype(self):
return self._state_dtype
@property
def input_dtype(self):
return self._input_dtype
@property
def latent_lb(self):
return self._latent_lb
@property
def latent_ub(self):
return self._latent_ub
def alpha_beta(self, state):
V = state['V'] + 60
# Traub 1991
alpha = .016*(35.1 - V) / (np.exp((35.1-V)/5)-1)
beta = .25 * np.exp((20 - V)/40)
"""
# Traub 1993
alpha = .03 * (17.2 - V) / (np.exp((17.2 -V) / 5) - 1)
beta = .45 * np.exp((12 - V) / 40)
"""
return alpha, beta
def steady_state(self, state):
# Steady state activation values
alpha, beta = self.alpha_beta(state)
xss = np.zeros(1)
xss[0] = alpha/(alpha+beta)
return xss
def evaluate_state(self, latent, inpt):
"""
Evaluate the state of this compartment
"""
state = np.zeros(latent.shape, dtype=self.state_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
"""
# Traub 1993
state['I'] = x_ch['n']**2 * (x_comp['V'] - self.E.value)
"""
# Traub 1991
state['I'] = x_ch['n']**4 * (x_comp['V'] - self.E.value)
return state
def kinetics(self, latent, inpt, state):
"""
Compute the state kinetics, d{latent}/dt, according to the Hodgkin-Huxley eqns,
given current state x and external or given variables y.
latent: latent state variables of the neuron, e.g. voltage per compartment,
channel activation variables, etc.
inpt: observations, e.g. supplied irradiance, calcium concentration, injected
current, etc.
state: evaluated state of the neuron including channel currents, etc.
returns:
dxdt: Rate of change of the latent state variables.
"""
# Initialize dxdt for each latent state
dxdt = np.zeros(latent.shape, dtype=self.latent_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
n = x_ch['n']
# Compute the alpha and beta as a function of V
alpha, beta = self.alpha_beta(x_comp)
# Compute the channel state updates
dxdt['n'] = alpha * (1.0-n) - beta*n
return dxdt
class Ca3KahpChannel(Channel):
"""
Potassium (after hyperpolarization) channel.
"""
def __init__(self, name, compartment,
g_ca3kahp = None,
E_ca3kahp = None):
super(Ca3KahpChannel, self).__init__(name, compartment)
# Kahp requires Calcium compartment
from compartment import CalciumCompartment
# TODO: Or observed calcium?
assert isinstance(compartment, CalciumCompartment)
self._latent_dtype = [('q', np.float64)]
self._state_dtype = [('I', np.float64)]
self._input_dtype = None
self._latent_lb = np.array([0])
self._latent_ub = np.array([1])
self._calcium_dependent = True
self._set_defaults(g_ca3kahp, Parameter('g_ca3kahp', distribution=
GammaDistribution(
hypers['a_g_ca3kahp'].value,
hypers['b_g_ca3kahp'].value
),
lb=0.0),
E_ca3kahp, hypers['E_Kahp'])
@property
def calcium_dependent(self):
return self._calcium_dependent
@property
def latent_dtype(self):
return self._latent_dtype
@property
def state_dtype(self):
return self._state_dtype
@property
def input_dtype(self):
return self._input_dtype
@property
def latent_lb(self):
return self._latent_lb
@property
def latent_ub(self):
return self._latent_ub
def steady_state(self, state):
c_Ca = state['[Ca]']
#
# q = X(1,:);
#
# % Offset to resting potential of 0
#
# % Compute the alpha and beta as a function of V
aq1 = np.min((0.2e-4)*c_Ca, 0.01)
bq1 = 0.001
return np.array([aq1/(aq1 + bq1)])
def evaluate_state(self, latent, inpt):
"""
Evaluate the state of this compartment
"""
state = np.zeros(latent.shape, dtype=self.state_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
state['I'] = x_ch['q'] * (x_comp['V'] - self.E.value)
return state
def kinetics(self, latent, inpt, state):
"""
Compute the state kinetics, d{latent}/dt, according to the Hodgkin-Huxley eqns,
given current state x and external or given variables y.
latent: latent state variables of the neuron, e.g. voltage per compartment,
channel activation variables, etc.
inpt: observations, e.g. supplied irradiance, calcium concentration, injected
current, etc.
state: evaluated state of the neuron including channel currents, etc.
returns:
dxdt: Rate of change of the latent state variables.
"""
# Initialize dxdt for each latent state
dxdt = np.zeros(latent.shape, dtype=self.latent_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
V = x_comp['V']
c_Ca = x_comp['[Ca]']
q = x_ch['q']
# Offset to resting potential of 0
V_ref = 60 + V
# % Compute the alpha and beta as a function of V
aq1 = np.min((0.2e-4)*c_Ca, 0.01)
bq1 = 0.001
# % Compute the channel state updates
dxdt['q'] = aq1*(1-q) - bq1*q
return dxdt
class Ca3KaChannel(Channel):
"""
Potassium (A-type transient current) channel.
"""
def __init__(self, name, compartment,
g_ca3ka = None,
E_ca3ka = None):
super(Ca3KaChannel, self).__init__(name, compartment)
self._latent_dtype = [('a', np.float64), ('b', np.float64)]
self._state_dtype = [('I', np.float64)]
self._input_dtype = None
self._latent_lb = np.array([0, 0])
self._latent_ub = np.array([1, 1])
self._set_defaults(g_ca3ka, Parameter('g_ca3ka', distribution=
GammaDistribution(
hypers['a_g_ca3ka'].value,
hypers['b_g_ca3ka'].value
),
lb=0.0),
E_ca3ka, hypers['E_K'])
@property
def latent_dtype(self):
return self._latent_dtype
@property
def state_dtype(self):
return self._state_dtype
@property
def input_dtype(self):
return self._input_dtype
@property
def latent_lb(self):
return self._latent_lb
@property
def latent_ub(self):
return self._latent_ub
def steady_state(self, state):
V = state['V']
# Steady state activation values
# TODO
xss = np.zeros(2)
# Offset to resting potential of 0
V_ref = 60 + V
# Compute the alpha and beta as a function of V
aa1 = 0.02*(13.1-V_ref)/(exp((13.1-V_ref)/10.)-1)
ba1 = 0.0175*(V_ref-40.1)/(exp((V_ref-40.1)/10.) - 1)
# Inactivation variable b
ab1 = 0.0016*exp((-13-V_ref)/18.0)
bb1 = 0.05/(1+exp((10.1-V_ref)/5.0))
xss[0] = aa1/(aa1+ba1)
xss[1] = ab1/(ab1+bb1)
return xss
def evaluate_state(self, latent, inpt):
"""
Evaluate the state of this compartment
"""
state = np.zeros(latent.shape, dtype=self.state_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
state['I'] = x_ch['a'] * x_ch['b'] * (x_comp['V'] - self.E.value)
return state
def kinetics(self, latent, inpt, state):
"""
Compute the state kinetics, d{latent}/dt, according to the Hodgkin-Huxley eqns,
given current state x and external or given variables y.
latent: latent state variables of the neuron, e.g. voltage per compartment,
channel activation variables, etc.
inpt: observations, e.g. supplied irradiance, calcium concentration, injected
current, etc.
state: evaluated state of the neuron including channel currents, etc.
returns:
dxdt: Rate of change of the latent state variables.
"""
# Initialize dxdt for each latent state
dxdt = np.zeros(latent.shape, dtype=self.latent_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
V = x_comp['V']
# Offset to resting potential of 0
V_ref = 60 + V
# Compute the alpha and beta as a function of V
aa1 = 0.02*(13.1-V_ref)/(exp((13.1-V_ref)/10.)-1)
ba1 = 0.0175*(V_ref-40.1)/(exp((V_ref-40.1)/10.)-1)
# Inactivation variable b
ab1 = 0.0016*exp((-13.0-V_ref)/18.0)
bb1 = 0.05/(1+exp((10.1-V_ref)/5.0))
# Compute the channel state updates
dxdt['a'] = aa1*(1-x_ch['a']) - ba1*x_ch['a']
dxdt['b'] = ab1*(1-x_ch['b']) - bb1*x_ch['b']
return dxdt
class Ca3CaChannel(Channel):
"""
High Threshold Calcium channel from Traub 1994
"""
def __init__(self, name, compartment,
g_ca3ca = None,
E_ca3ca = None):
super(Ca3CaChannel, self).__init__(name, compartment)
self._latent_dtype = [('s', np.float64), ('r', np.float64)]
self._state_dtype = [('I', np.float64)]
self._input_dtype = None
self._latent_lb = np.array([0, 0])
self._latent_ub = np.array([1, 1])
self._moves_calcium = True
self._set_defaults(g_ca3ca, Parameter('g_ca3ca', distribution=
GammaDistribution(
hypers['a_g_ca3ca'].value,
hypers['b_g_ca3ca'].value
),
lb=0.0),
E_ca3ca, hypers['E_Ca'])
@property
def moves_calcium(self):
return self._moves_calcium
@property
def latent_dtype(self):
return self._latent_dtype
@property
def state_dtype(self):
return self._state_dtype
@property
def input_dtype(self):
return self._input_dtype
@property
def latent_lb(self):
return self._latent_lb
@property
def latent_ub(self):
return self._latent_ub
def steady_state(self, state):
V = state['V']
# Steady state activation values
V_ref = 60 + V
alpha = 1.6 / (1 + np.exp(-.072 * (V_ref - 65)))
beta = .02 * (V_ref - 51.1) / (np.exp((V_ref - 51.1) / 5) - 1)
if V_ref <= 0:
r_alpha = .005
r_beta = 0
else:
r_alpha = np.exp(-V_ref / 20) / 200
r_beta = 0.005 - r_alpha
return np.array([(alpha / (alpha + beta))[0], r_alpha/(r_alpha + r_beta)])
def evaluate_state(self, latent, inpt):
"""
Evaluate the state of this compartment
"""
state = np.zeros(len(latent), dtype=self.state_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
state['I'] = (x_ch['s'] ** 2) * x_ch['r'] * (x_comp['V'] - self.E.value)
#print "x_ch: ", x_ch['s']
return state
def kinetics(self, latent, inpt, state):
"""
Compute the state kinetics, d{latent}/dt, according to the Hodgkin-Huxley eqns,
given current state x and external or given variables y.
latent: latent state variables of the neuron, e.g. voltage per compartment,
channel activation variables, etc.
inpt: observations, e.g. supplied irradiance, calcium concentration, injected
current, etc.
state: evaluated state of the neuron including channel currents, etc.
returns:
dxdt: Rate of change of the latent state variables.
"""
# Initialize dxdt for each latent state
dxdt = np.zeros(latent.shape, dtype=self.latent_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
V = x_comp['V']
# Offset to resting potential of 0
V_ref = 60 + V
alpha = 1.6 / (1 + np.exp(-.072 * (V_ref - 65)))
beta = .02 * (V_ref - 51.1) / (np.exp((V_ref - 51.1) / 5) - 1)
r_alpha = np.exp(-V_ref / 20) / 200
r_alpha[V_ref <= 0] = .005
r_beta = 0.005 - r_alpha
dxdt['s'] = alpha * (1 - x_ch['s']) - beta * x_ch['s']
dxdt['r'] = r_alpha * (1 - x_ch['r']) - r_beta * x_ch['r']
return dxdt
class Ca3KcChannel(Channel):
"""
High Threshold Calcium channel from Traub 1994
"""
def __init__(self, name, compartment,
g_ca3kc = None,
E_ca3kc = None):
super(Ca3KcChannel, self).__init__(name, compartment)
self._latent_dtype = [('c', np.float64)]
self._state_dtype = [('I', np.float64)]
self._input_dtype = None
self._latent_lb = np.array([0])
self._latent_ub = np.array([1])
self._calcium_dependent = True
self._set_defaults(g_ca3kc, Parameter('g_ca3kc', distribution=
GammaDistribution(
hypers['a_g_ca3kc'].value,
hypers['b_g_ca3kc'].value
),
lb=0.0),
E_ca3kc, hypers['E_Ca3Kc'])
@property
def moves_calcium(self):
return self._moves_calcium
@property
def calcium_dependent(self):
return self._calcium_dependent
@property
def latent_dtype(self):
return self._latent_dtype
@property
def state_dtype(self):
return self._state_dtype
@property
def input_dtype(self):
return self._input_dtype
@property
def latent_lb(self):
return self._latent_lb
@property
def latent_ub(self):
return self._latent_ub
def alpha_beta(self, state):
V = state['V']
V_ref = V + 60
alpha = np.zeros(V_ref.shape)
beta = np.zeros(V_ref.shape)
if V_ref.size == 1:
if V_ref <= 50:
alpha = (np.exp(((V_ref - 10)/11) - ((V_ref - 6.5)/27)) / 18.975)[V_ref <= 50]
beta = (2 * np.exp(-1 * (V_ref - 6.5) / 27) - alpha)
else:
alpha = 2 * np.exp(-1 * (V_ref - 6.5) / 27)
beta = 0
else:
# Condition 1: V_ref <= 50
alpha[V_ref<=50] = np.exp(((V_ref[V_ref<=50] - 10)/11) - ((V_ref[V_ref<=50] - 6.5)/27)) / 18.975
beta[V_ref<=50] = 2 * np.exp(-1 * (V_ref[V_ref<=50] - 6.5) / 27) - alpha[V_ref<=50]
# Condition 2: V_ref > 50
alpha[V_ref>50] = 2 * np.exp(-1 * (V_ref[V_ref>50] - 6.5) / 27)
beta[V_ref>50] = 0.0
# if V_ref <= 50:
# alpha = np.exp(((V_ref - 10)/11) - ((V_ref - 6.5)/27)) / 18.975
# else:
# alpha = 2 * np.exp(-1 * (V_ref - 6.5) / 27)
# beta = (2 * np.exp(-1 * (V_ref - 6.5) / 27) - alpha)
return alpha, beta
def steady_state(self, state):
alpha, beta = self.alpha_beta(state)
# Steady state activation values
return np.array(alpha / (alpha + beta))
def evaluate_state(self, latent, inpt):
"""
Evaluate the state of this compartment
"""
state = np.zeros(latent.shape, dtype=self.state_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
#print "c: ", x_ch['c']
#print "Ca: ", x_comp['[Ca]'] / 250
#print "min: ", np.min(1, x_comp['[Ca]'] / 250)
#print "ans: ", x_ch['c'] * np.min(1, x_comp['[Ca]'] / 250) * (x_comp['V'] - self.E.value)
state['I'] = x_ch['c'] * np.minimum(1, x_comp['[Ca]'] / 250) * (x_comp['V'] - self.E.value)
return state
def kinetics(self, latent, inpt, state):
"""
Compute the state kinetics, d{latent}/dt, according to the Hodgkin-Huxley eqns,
given current state x and external or given variables y.
latent: latent state variables of the neuron, e.g. voltage per compartment,
channel activation variables, etc.
inpt: observations, e.g. supplied irradiance, calcium concentration, injected
current, etc.
state: evaluated state of the neuron including channel currents, etc.
returns:
dxdt: Rate of change of the latent state variables.
"""
# Initialize dxdt for each latent state
dxdt = np.zeros(latent.shape, dtype=self.latent_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
alpha, beta = self.alpha_beta(x_comp)
dxdt['c'] = alpha * (1 - x_ch['c']) - beta * x_ch['c']
return dxdt
class ChR2Channel(Channel):
"""
Voltage and light gated ChR2 from Williams
"""
def __init__(self, name, compartment,
g_chr2 = None,
E_chr2 = None):
super(ChR2Channel, self).__init__(name, compartment)
self._latent_dtype = [('O1', np.float64),
('O2', np.float64),
('C1', np.float64),
('C2', np.float64),
('p', np.float64)]
self._state_dtype = [('I', np.float64)]
# self._input_dtype = []
self._latent_lb = np.array([0, 0, 0, 0, 0])
self._latent_ub = np.array([1, 1, 1, 1, 1])
self._set_defaults(g_chr2, Parameter('g_chr2', distribution=
GammaDistribution(
hypers['a_g_chr2'].value,
hypers['b_g_chr2'].value
),
lb=0.0),
E_chr2, hypers['E_ChR2'])
@property
def latent_dtype(self):
return self._latent_dtype
@property
def state_dtype(self):
return self._state_dtype
@property
def input_dtype(self):
return self._input_dtype
@property
def latent_lb(self):
return self._latent_lb
@property
def latent_ub(self):
return self._latent_ub
def steady_state(self, state):
ans = np.zeros(5)
# start in the closed state
ans[2] = 0.99
# ans[3] = 0.99
# Steady state activation values
return ans
def evaluate_state(self, latent, inpt):
"""
Evaluate the state of this compartment
"""
state = np.zeros(latent.shape, dtype=self.state_dtype)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
# Alert: Is this true?
V = x_comp['V']
G = (10.6408 - 14.6408*np.exp(-V/42.7671)) / V
gam = 0.1
state['I'] = G * (x_ch['O1'] + gam*x_ch['O2']) * (x_comp['V'] - self.E.value)
return state
def kinetics(self, latent, inpt, state):
"""
Compute the state kinetics, d{latent}/dt, according to the Hodgkin-Huxley eqns,
given current state x and external or given variables y.
latent: latent state variables of the neuron, e.g. voltage per compartment,
channel activation variables, etc.
inpt: observations, e.g. supplied irradiance, calcium concentration, injected
current, etc.
state: evaluated state of the neuron including channel currents, etc.
returns:
dxdt: Rate of change of the latent state variables.
"""
# import pdb; pdb.set_trace()
# Initialize dxdt for each latent state
dxdt = np.zeros(latent.shape, dtype=self.latent_dtype)
i_comp = get_item_at_path(inpt, self.compartment.path)
x_comp = get_item_at_path(latent, self.compartment.path)
x_ch = get_item_at_path(latent, self.path)
I = i_comp['Irr']
V = x_comp['V']
p = x_ch['p']
# Compute the voltage-sensitive rate constants for state transitions
Gd1 = 0.075 + 0.043*np.tanh(-(V+20)/20)
Gd2 = 0.05
# Gr = 4.34587e5 * np.exp(-0.0211539274*V)
Gr = 0.001
# Define a state variable for time and irradiance dependent activation
# function for ChR2 (post isomerization)
theta = 100*I # Optical stimulation protocol
tau_chr2 = 1.3 # Time constant for ChR2
S0 = 0.5*(1+np.tanh(120*(theta-0.1)))
dxdt['p'] = (S0-p)/tau_chr2
# Define the light-sensitive rate constants for state transitions
lamda = 470 # Wavelength of max absorption for retinal
eps1 = 0.8535 # quantum efficiency for photon absorption from C1
eps2 = 0.14 # quantum efficiency for photon absorption from C2
w_loss = 0.77;
F = 0.00006*I*lamda/w_loss #Photon flux (molecules/photon/sec)
# F = (sig_ret/hc)*I*lamda/w_loss*1e-9; % Photon flux (molecules/photon/sec)
# Light sensitive rates for C1->01 and C2->O2 transition
k1 = eps1 * F * p
k2 = eps2 * F * p
# Light sensitive O1->02 transitions
e12d = 0.011
e12c1 = 0.005
e12c2 = 0.024
e12 = e12d + e12c1*np.log(1+I/e12c2)
# Light sensitive O2->O1 transitions
e21d = 0.008
e21c1 = 0.004
e21c2 = 0.024
e21 = e21d + e21c1*np.log(1+I/e21c2)
dxdt['O1'] = k1 * x_ch['C1'] - (Gd1 + e12) * x_ch['O1'] + e21 * x_ch['O2']
dxdt['O2'] = k2 * x_ch['C2'] - (Gd2 + e21) * x_ch['O2'] + e12 * x_ch['O1']
dxdt['C1'] = Gr * x_ch['C2'] + Gd1 * x_ch['O1'] - k1 * x_ch['C1']
dxdt['C2'] = Gd2 * x_ch['O2'] + (k2 + Gr) * x_ch['C2']
return dxdt
def stationary(self, Irr, V):
I = Irr
V = V
dt = np.dtype(self.latent_dtype)
ans = np.zeros(dt.shape, dtype=dt)
# Compute the voltage-sensitive rate constants for state transitions
Gd1 = 0.075 + 0.043*np.tanh(-(V+20)/20)
Gd2 = 0.05
Gr = 4.34587e5 * np.exp(-0.0211539274*V)
# Define a state variable for time and irradiance dependent activation
# function for ChR2 (post isomerization)
theta = 100*I # Optical stimulation protocol
S0 = 0.5*(1+np.tanh(120*(theta-0.1)))
ans['p'] = S0
# Define the light-sensitive rate constants for state transitions
lamda = 470 # Wavelength of max absorption for retinal
eps1 = 0.8535 # quantum efficiency for photon absorption from C1
eps2 = 0.14 # quantum efficiency for photon absorption from C2
w_loss = 0.77;
F = 0.00006*I*lamda/w_loss #Photon flux (molecules/photon/sec)
# F = (sig_ret/hc)*I*lamda/w_loss*1e-9; % Photon flux (molecules/photon/sec)
# Light sensitive rates for C1->01 and C2->O2 transition
k1 = eps1 * F * S0
k2 = eps2 * F * S0
# Light sensitive O1->02 transitions
e12d = 0.011
e12c1 = 0.005
e12c2 = 0.024
e12 = e12d + e12c1*np.log(1+I/e12c2)
# Light sensitive O2->O1 transitions
e21d = 0.008
e21c1 = 0.004
e21c2 = 0.024
e21 = e21d + e21c1*np.log(1+I/e21c2)
mat = np.array([[Gd1 + e12, e12, Gd1, 0],
[e21, -(Gd2 + e21), 0, Gd2],
[k1, 0, k1, 0],
[0, k2, Gd2, (k2 + Gr)]])
import scipy.linalg
eigen = scipy.linalg.eig(mat * .01 - np.eye(4), left=True)
eigen = eigen[0]
#print eigen
stationary = eigen / np.sum(np.array(list(eigen)) ** 2)
ans['O1'] = stationary[0]
ans['O2'] = stationary[1]
ans['C1'] = stationary[2]
ans['C2'] = stationary[3]
return ans
def IV_plot(self, start = 0, stop = 2000):
dt = np.dtype([(self.compartment.name, [('V', np.float64), ('[Ca]', np.float64), (self.name, self.latent_dtype)])])
import matplotlib.pyplot as plt
from matplotlib import cm
# from mpl_toolkits.mplot3d import Axes3D
Irr = np.linspace(0, 700, 100)
V = np.linspace(-500, 1000, 100)
X, Y = np.meshgrid(Irr, V)
Z = np.zeros(X.shape)
for row in range(X.shape[0]):
for col in range(X.shape[1]):
latent = np.ndarray(
buffer = np.hstack((
np.array([[Y[row, col], 0]]),
np.array([self.stationary(X[row, col], Y[row, col]).tolist()])
)),
dtype = dt,
shape = dt.shape
)
Z[row, col] = self.evaluate_state(np.array([latent]), ())[0][0]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
#ax.contour(X, Y, Z, zdir='x', cmap=cm.coolwarm)
#ax.contour(X, Y, Z, zdir='y', cmap=cm.coolwarm)
#ax.contour(X, Y, Z, zdir='z', cmap=cm.coolwarm)
ax.set_xlabel('Irr')
ax.set_ylabel('V')
plt.title(self.name)
plt.show()
| gpl-2.0 |
stylianos-kampakis/scikit-learn | examples/classification/plot_lda_qda.py | 78 | 5046 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
| bsd-3-clause |
trafferty/utils | python/Process_PSD.py | 1 | 7904 | import os
import sys
import argparse
import math
import time
from struct import unpack, pack
import numpy as np
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import matplotlib
# matplotlib needs a window framework, lets use QT4
matplotlib.use('Qt4Agg')
from colors import *
import lineplot
PrintColors = (BRIGHT_YELLOW, BRIGHT_CYAN)
# before use, set an env var pointing to the top of the proto dir, ie, .../TacticalSensorStack/libsensorservice/proto
TGI_PROTO_DIR = os.getenv("TGI_PROTO_DIR")
# to allow importing of services.sensor.ss_*
sys.path.append(os.path.abspath(TGI_PROTO_DIR))
# now import the GPB definitions
import services.sensor.ss_pb2 as pb
import services.sensor.ss_dataproduct_pb2 as dp
# for plot, this is the x-axis tick size
xtick_width_MHz = 1.0
def Process_PSD_Stream(inFile, plt):
"""Processes a stream of PSD data in the form of GPBs according to ss_dataproduct
Args:
inFile: the file handle for the input stream
plt : handle to the lineplot opbject to use (see lineplot.py)
Returns:
none...returns when stream is empty
Raises:
none.
"""
prev_group_id = -1
# create a dict for metrics, and initialize
metrics = {}
metrics['bytes_read'] = 0
metrics['data_rate_Mbps'] = 0.0
metrics['elapsed_time_s'] = 0.0
metrics['start_time_ts'] = time.time()
metrics['sweep_count'] = 0.0
metrics['sweeps_per_sec'] = 0.0
try:
in_buf = inFile.read(4)
while in_buf != "":
size = int(in_buf.encode('hex'), 16)
#size = unpack('I', pack('<I', int(data.encode('hex'), 16)))[0]
# read in the data_product
rawGPB = inFile.read(size)
metrics['bytes_read'] += size
data_response = pb.DataResponse()
data_response.ParseFromString(rawGPB)
for data_prod in data_response.data_products:
if data_prod.HasExtension(dp.power_spectrum_data):
power_spec_data = data_prod.Extensions[dp.power_spectrum_data]
# read the data portion into a numpy array
data_np = np.frombuffer(power_spec_data.spectrum_data.data, dtype=np.int16)
# grab the header data
start_freq_mhz = (power_spec_data.header.start_freq_hz / 1.0e6)
end_freq_mhz = (power_spec_data.header.end_freq_hz / 1.0e6)
inc_mhz = (end_freq_mhz - start_freq_mhz) / len(data_np)
# print some stats
print_PSD_Stats(data_np, power_spec_data.header, power_spec_data.spectrum_data.format,
(prev_group_id < power_spec_data.header.group_id), metrics['data_rate_Mbps'], metrics['sweeps_per_sec'])
# now either append to the plot data, or if we have a full spectrum, plot it
if prev_group_id < power_spec_data.header.group_id and plt != None:
if 'plot_data_np' in locals():
plt.ylim = (min(plot_data_np) - 20, max(plot_data_np) + 20)
plt.xlim = (min(x), max(x))
plt.set_data((x, plot_data_np))
plt.xticks = xticks
plt.xlabel = "MHz - [Bins: %d, Incr (kHz): %4.1f, GroupId: %d]" % ( len(data_np), (inc_mhz * 1000), power_spec_data.header.group_id )
plt.title = "Power Spectrum Density - [Sweeps per sec: %f, Data rate (Mbps): %f]" % (metrics['sweeps_per_sec'], metrics['data_rate_Mbps'])
#print "Starting new plot data..."
plot_data_np = data_np
x = np.linspace(start_freq_mhz, start_freq_mhz + (len(data_np) * inc_mhz), len(data_np), False)
xticks = np.arange(start_freq_mhz, end_freq_mhz, xtick_width_MHz).tolist()
metrics['sweep_count'] += 1.0
else:
plot_data_np = np.append(plot_data_np, data_np)
x = np.append(x, np.linspace(start_freq_mhz, start_freq_mhz + (len(data_np) * inc_mhz), len(data_np), False) )
for xtick in np.arange(start_freq_mhz, end_freq_mhz, xtick_width_MHz).tolist(): xticks.append(xtick)
prev_group_id = power_spec_data.header.group_id
in_buf = inFile.read(4)
# do some metrics on data to determine data rate
metrics['elapsed_time_s'] = time.time() - metrics['start_time_ts']
if metrics['elapsed_time_s'] >= 2.0:
metrics['data_rate_Mbps'] = ((metrics['bytes_read'] * 8 ) / (metrics['elapsed_time_s'] * 1024 * 1024 ))
metrics['sweeps_per_sec'] = ( metrics['sweep_count'] / metrics['elapsed_time_s'] )
metrics['bytes_read'] = 0
metrics['start_time_ts'] = time.time()
metrics['elapsed_time_s'] = 0.0
metrics['sweep_count'] = 0
finally:
inFile.close()
def print_PSD_Stats(data_np, header, format, print_header, data_rate_Mbps, sweeps_per_sec):
inc_mhz = ((header.end_freq_hz / 1.0e6) - (header.start_freq_hz / 1.0e6)) / len(data_np)
if print_header and not header.group_id % 5:
# 123456 12345 123456 12345 12345 12345 12345 1234567 1234567 1234567 1234 1234 1234 1234567 12345
print "%s------------------------------------------------------------------------------------------------------------------------%s" % (WHITE, RESET)
print "%s Group Data Bits/ Scaling Start End Incr DataRate Sweeps/ %s" % (WHITE, RESET)
print "%s ID Len Value Signed Factor Offset Comp Freq Freq (kHz) Max Min Mean (Mbps) sec %s" % (WHITE, RESET)
print "%s------------------------------------------------------------------------------------------------------------------------%s" % (WHITE, RESET)
print("%s% 6d % 6d % 5d % 6d % 5d % 5d % 5d %07.3f %07.3f %05.1f % 4d % 4d % 4d %07.3f %04.1f%s" %
(PrintColors[header.group_id%2], header.group_id, len(data_np),
format.bits_per_value, format.signed, format.scaling_factor, format.offset,
format.compression, (header.start_freq_hz / 1.0e6), (header.end_freq_hz / 1.0e6), (inc_mhz * 1000),
data_np.max(), data_np.min(), data_np.mean(), data_rate_Mbps, sweeps_per_sec, RESET))
class GPBProcess(QThread):
def run(self):
Process_PSD_Stream(self.inFile, self.plt)
if __name__ == "__main__":
'''
process_PSD.py GPBDataFile
'''
parser = argparse.ArgumentParser(description='Processes a stream of GPBs, extracting PSD data, from either a file or stdin, analyze and (optionally) plot')
parser.add_argument('-i', dest='in_file', type=str, help='input file...if not specified then use stdin')
parser.add_argument('-p', dest='do_plot', type=bool, default=True, help='plot data. default is True')
args = parser.parse_args()
# matplotlib needs a window framework...we use QT4, so we need to create a QApplication
qapp = QApplication([])
if args.do_plot:
plt = lineplot.LinePlotWidget(title="Power Spectrum")
plt.show()
else:
print "No plotting!"
plt = None
plt.ylabel = "dBm"
plt.xlabel = "MHz"
if args.in_file:
inFile = open(args.in_file, 'rb')
else:
inFile = sys.stdin
#QTimer.singleShot(1000, lambda: Process_PSD_Stream(inFile, plt))
gpb_process = GPBProcess()
gpb_process.inFile = inFile
gpb_process.plt = plt
QTimer.singleShot(100, gpb_process.start)
qapp.exec_()
| gpl-2.0 |
scikit-optimize/scikit-optimize | examples/sampler/sampling_comparison.py | 2 | 6903 | """
==========================================
Comparing initial point generation methods
==========================================
Holger Nahrstaedt 2020
.. currentmodule:: skopt
Bayesian optimization or sequential model-based optimization uses a surrogate
model to model the expensive to evaluate function `func`. There are several
choices for what kind of surrogate model to use. This notebook compares the
performance of:
* Halton sequence,
* Hammersly sequence,
* Sobol' sequence and
* Latin hypercube sampling
as initial points. The purely random point generation is used as
a baseline.
"""
print(__doc__)
import numpy as np
np.random.seed(123)
import matplotlib.pyplot as plt
#############################################################################
# Toy model
# =========
#
# We will use the :class:`benchmarks.hart6` function as toy model for the expensive function.
# In a real world application this function would be unknown and expensive
# to evaluate.
from skopt.benchmarks import hart6 as hart6_
# redefined `hart6` to allow adding arbitrary "noise" dimensions
def hart6(x, noise_level=0.):
return hart6_(x[:6]) + noise_level * np.random.randn()
from skopt.benchmarks import branin as _branin
def branin(x, noise_level=0.):
return _branin(x) + noise_level * np.random.randn()
#############################################################################
from matplotlib.pyplot import cm
import time
from skopt import gp_minimize, forest_minimize, dummy_minimize
def plot_convergence(result_list, true_minimum=None, yscale=None, title="Convergence plot"):
ax = plt.gca()
ax.set_title(title)
ax.set_xlabel("Number of calls $n$")
ax.set_ylabel(r"$\min f(x)$ after $n$ calls")
ax.grid()
if yscale is not None:
ax.set_yscale(yscale)
colors = cm.hsv(np.linspace(0.25, 1.0, len(result_list)))
for results, color in zip(result_list, colors):
name, results = results
n_calls = len(results[0].x_iters)
iterations = range(1, n_calls + 1)
mins = [[np.min(r.func_vals[:i]) for i in iterations]
for r in results]
ax.plot(iterations, np.mean(mins, axis=0), c=color, label=name)
#ax.errorbar(iterations, np.mean(mins, axis=0),
# yerr=np.std(mins, axis=0), c=color, label=name)
if true_minimum:
ax.axhline(true_minimum, linestyle="--",
color="r", lw=1,
label="True minimum")
ax.legend(loc="best")
return ax
def run(minimizer, initial_point_generator,
n_initial_points=10, n_repeats=1):
return [minimizer(func, bounds, n_initial_points=n_initial_points,
initial_point_generator=initial_point_generator,
n_calls=n_calls, random_state=n)
for n in range(n_repeats)]
def run_measure(initial_point_generator, n_initial_points=10):
start = time.time()
# n_repeats must set to a much higher value to obtain meaningful results.
n_repeats = 1
res = run(gp_minimize, initial_point_generator,
n_initial_points=n_initial_points, n_repeats=n_repeats)
duration = time.time() - start
# print("%s %s: %.2f s" % (initial_point_generator,
# str(init_point_gen_kwargs),
# duration))
return res
#############################################################################
# Objective
# =========
#
# The objective of this example is to find one of these minima in as
# few iterations as possible. One iteration is defined as one call
# to the :class:`benchmarks.hart6` function.
#
# We will evaluate each model several times using a different seed for the
# random number generator. Then compare the average performance of these
# models. This makes the comparison more robust against models that get
# "lucky".
from functools import partial
example = "hart6"
if example == "hart6":
func = partial(hart6, noise_level=0.1)
bounds = [(0., 1.), ] * 6
true_minimum = -3.32237
n_calls = 40
n_initial_points = 10
yscale = None
title = "Convergence plot - hart6"
else:
func = partial(branin, noise_level=2.0)
bounds = [(-5.0, 10.0), (0.0, 15.0)]
true_minimum = 0.397887
n_calls = 30
n_initial_points = 10
yscale="log"
title = "Convergence plot - branin"
#############################################################################
from skopt.utils import cook_initial_point_generator
# Random search
dummy_res = run_measure("random", n_initial_points)
lhs = cook_initial_point_generator(
"lhs", lhs_type="classic", criterion=None)
lhs_res = run_measure(lhs, n_initial_points)
lhs2 = cook_initial_point_generator("lhs", criterion="maximin")
lhs2_res = run_measure(lhs2, n_initial_points)
sobol = cook_initial_point_generator("sobol", randomize=False,
min_skip=1, max_skip=100)
sobol_res = run_measure(sobol, n_initial_points)
halton_res = run_measure("halton", n_initial_points)
hammersly_res = run_measure("hammersly", n_initial_points)
grid_res = run_measure("grid", n_initial_points)
#############################################################################
# Note that this can take a few minutes.
plot = plot_convergence([("random", dummy_res),
("lhs", lhs_res),
("lhs_maximin", lhs2_res),
("sobol'", sobol_res),
("halton", halton_res),
("hammersly", hammersly_res),
("grid", grid_res)],
true_minimum=true_minimum,
yscale=yscale,
title=title)
plt.show()
#############################################################################
# This plot shows the value of the minimum found (y axis) as a function
# of the number of iterations performed so far (x axis). The dashed red line
# indicates the true value of the minimum of the :class:`benchmarks.hart6`
# function.
#############################################################################
# Test with different n_random_starts values
lhs2 = cook_initial_point_generator("lhs", criterion="maximin")
lhs2_15_res = run_measure(lhs2, 12)
lhs2_20_res = run_measure(lhs2, 14)
lhs2_25_res = run_measure(lhs2, 16)
#############################################################################
# n_random_starts = 10 produces the best results
plot = plot_convergence([("random - 10", dummy_res),
("lhs_maximin - 10", lhs2_res),
("lhs_maximin - 12", lhs2_15_res),
("lhs_maximin - 14", lhs2_20_res),
("lhs_maximin - 16", lhs2_25_res)],
true_minimum=true_minimum,
yscale=yscale,
title=title)
plt.show()
| bsd-3-clause |
operalib/operalib | operalib/preprocessing/simplex.py | 2 | 3304 | """Simplex coding module."""
from numpy import dot, array, vstack, hstack, ones, zeros, sqrt, asarray
from sklearn.preprocessing import LabelBinarizer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
# pylint: disable=W0201,C0103
class SimplexCoding(BaseEstimator, TransformerMixin):
"""Simplex coding."""
def __init__(self, binarizer=None):
self.binarizer = binarizer
# self.simplex_operator_ = None
# self.binarizer_ = None
@staticmethod
def _code_i(dimension):
"""Simplex coding operator (internal).
https://papers.nips.cc/paper/4764-multiclass-learning-with-simplex-
coding.pdf
"""
if dimension > 1:
block1 = vstack((ones((1, 1)), zeros((dimension - 1, 1))))
block2 = vstack((-ones((1, dimension)) / dimension,
SimplexCoding._code_i(dimension - 1) *
sqrt(1. - 1. / (dimension * dimension))))
return hstack((block1, block2))
elif dimension == 1:
return array([1., -1.])
else:
raise ValueError('dimension should be at least one.')
@staticmethod
def code(dimension):
"""Simplex coding operator."""
return SimplexCoding._code_i(dimension - 1)
def fit(self, y):
"""Fit simplex coding
Parameters
----------
targets : array, shape = [n_samples,] or [n_samples, n_classes]
Target values. The 2-d array represents the simplex coding for
multilabel classification.
Returns
-------
self : returns an instance of self.
"""
if self.binarizer is None:
self.binarizer_ = LabelBinarizer(neg_label=0, pos_label=1,
sparse_output=True)
self.binarizer_.fit(y)
dimension = self.binarizer_.classes_.size
if dimension > 2:
self.simplex_operator_ = SimplexCoding.code(dimension)
else:
self.simplex_operator_ = ones((1, 1))
return self
def transform(self, y):
"""Transform multi-class labels to the simplex code.
Parameters
----------
targets : array or sparse matrix, shape = [n_samples,] or
[n_samples, n_classes]
Target values. The 2-d matrix represents the simplex code for
multilabel classification.
Returns
-------
Y : numpy array of shape [n_samples, n_classes - 1]
"""
check_is_fitted(self, 'simplex_operator_', 'binarizer_')
dimension = self.binarizer_.classes_.size
if dimension == 2:
return self.binarizer_.transform(y).toarray()
else:
return self.binarizer_.transform(y).dot(
asarray(self.simplex_operator_).T)
def inverse_transform(self, y):
"""Inverse transform."""
check_is_fitted(self, 'simplex_operator_', 'binarizer_')
dimension = self.binarizer_.classes_.size
if dimension == 2:
return self.binarizer_.inverse_transform(y)
else:
return self.binarizer_.inverse_transform(
dot(y, self.simplex_operator_))
| bsd-3-clause |
seaotterman/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 28 | 9485 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
| apache-2.0 |
fja05680/pinkfish | examples/150.scaling-in-out/scaling_in_out.py | 1 | 6538 | """
Scaling in and out of using the double-7s strategy.
1. The SPY is above its 200-day moving average.
2. The SPY closes at a X-day low, buy some shares. If it sets further
lows, buy some more.
3. If the SPY closes at a X-day high, sell some. If it sets further
highs, sell some more, etc...
Note:
This example help demonstrate using some of the lower level
pinkfish API. However, an easier approach using adjust_percent()
is given in strategy.py.
"""
import datetime
import matplotlib.pyplot as plt
import pandas as pd
from talib.abstract import *
import pinkfish as pf
pf.DEBUG = False
default_options = {
'use_adj' : False,
'use_cache' : False,
'stop_loss_pct' : 1.0,
'margin' : 1,
'period' : 7,
'max_open_trades' : 4,
'enable_scale_in' : True,
'enable_scale_out' : True
}
class Strategy:
def __init__(self, symbol, capital, start, end, options=default_options):
self.symbol = symbol
self.capital = capital
self.start = start
self.end = end
self.options = options,copy()
self.ts = None
self.rlog = None
self.tlog = None
self.dbal = None
self.stats = None
def _algo(self):
pf.TradeLog.cash = self.capital
pf.TradeLog.margin = self.options['margin']
stop_loss = 0
for i, row in enumerate(self.ts.itertuples()):
date = row.Index.to_pydatetime()
high = row.high; low = row.low; close = row.close;
end_flag = pf.is_last_row(self.ts, i)
shares = 0
max_open_trades = self.options['max_open_trades']
enable_scale_in = self.options['enable_scale_in']
enable_scale_out = self.options['enable_scale_out']
max_open_trades_buy = max_open_trades if enable_scale_in else 1
num_open_trades = self.tlog.num_open_trades
# Buy Logic
# - Buy if still open trades slots left
# and bull regime
# and price closes at period low
# and not end end_flag
if (num_open_trades < max_open_trades_buy
and row.regime > 0 and close == row.period_low and not end_flag):
# Calc number of shares for another cash-equal trade.
buying_power = self.tlog.calc_buying_power(close)
cash = buying_power / (max_open_trades_buy - num_open_trades)
shares = self.tlog.calc_shares(close, cash)
# Buy more shares if we have the cash.
if shares > 0:
# Enter buy in trade log
self.tlog.buy(date, close, shares)
# Set stop loss
stop_loss = (1-self.options['stop_loss_pct'])*close
# set sell_parts to max_open_trades
num_out_trades = max_open_trades
# Sell Logic
# First we check if we have any open trades, then
# - Sell if price closes at X day high.
# - Sell if price closes below stop loss.
# - Sell if end of data.
elif (num_open_trades > 0
and (close == row.period_high or low < stop_loss or end_flag)):
if not enable_scale_out or low < stop_loss or end_flag:
# Exit all positions.
shares = None
elif enable_scale_in:
# Exit one position.
shares = -1
else:
# Scaling out is done here by shares, for example
# if there are 100 shares and num trades is 4,
# then we reduce by 25 each time. This is
# different than scaling out by percentage of
# total fund value as is done in strategy.py.
shares = int(self.tlog.shares / num_out_trades)
num_out_trades -= 1
# Enter sell in trade log.
shares = self.tlog.sell(date, close, shares)
if shares > 0:
pf.DBG("{0} BUY {1} {2} @ {3:.2f}".format(
date, shares, self.symbol, close))
elif shares < 0:
pf.DBG("{0} SELL {1} {2} @ {3:.2f}".format(
date, -shares, self.symbol, close))
# Record daily balance.
self.dbal.append(date, high, low, close)
def run(self):
# Fetch and select timeseries.
self.ts = pf.fetch_timeseries(self.symbol, use_cache=self.options['use_cache'])
self.ts = pf.select_tradeperiod(self.ts, self.start, self.end, use_adj=self.options['use_adj'])
# Add technical indicator: 200 day sma regime filter.
self.ts['regime'] = pf.CROSSOVER(self.ts, timeperiod_fast=1, timeperiod_slow=200)
# Add technical indicators: X day high, and X day low.
self.ts['period_high'] = pd.Series(self.ts.close).rolling(self.options['period']).max()
self.ts['period_low'] = pd.Series(self.ts.close).rolling(self.options['period']).min()
# Finalize timeseries.
self.ts, self.start = pf.finalize_timeseries(self.ts, self.start)
# Create tlog and dbal objects.
self.tlog = pf.TradeLog(self.symbol)
self.dbal = pf.DailyBal()
# Run algo, get logs, and get stats.
self._algo()
self._get_logs()
self._get_stats()
def _get_logs(self):
self.rlog = self.tlog.get_log_raw()
self.tlog = self.tlog.get_log()
self.dbal = self.dbal.get_log(self.tlog)
def _get_stats(self):
self.stats = pf.stats(self.ts, self.tlog, self.dbal, self.capital)
def summary(strategies, metrics):
"""
Stores stats summary in a DataFrame.
stats() must be called before calling this function.
"""
index = []
columns = strategies.index
data = []
# Add metrics.
for metric in metrics:
index.append(metric)
data.append([strategy.stats[metric] for strategy in strategies])
df = pd.DataFrame(data, columns=columns, index=index)
return df
def plot_bar_graph(df, metric):
"""
Plot Bar Graph.
stats() must be called before calling this function.
"""
df = df.loc[[metric]]
df = df.transpose()
fig = plt.figure()
axes = fig.add_subplot(111, ylabel=metric)
df.plot(kind='bar', ax=axes, legend=False)
axes.set_xticklabels(df.index, rotation=0)
| mit |
moonbury/pythonanywhere | MasteringMLWithScikit-learn/8365OS_04_Codes/movies.py | 3 | 2223 | __author__ = 'gavin'
import pandas as pd
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn.metrics.metrics import classification_report, accuracy_score, confusion_matrix
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
import matplotlib.pyplot as plt
def main():
pipeline = Pipeline([
('vect', TfidfVectorizer()),
('clf', LogisticRegression())
])
parameters = {
# 'vect__max_df': (0.25, 0.5, 0.75),
'vect__stop_words': ('english', None),
# 'vect__max_features': (5000, 10000, None),
# 'vect__ngram_range': ((1, 1), (1, 2)),
# 'vect__use_idf': (True, False),
# 'vect__norm': ('l1', 'l2'),
# 'clf__penalty': ('l1', 'l2'),
# 'clf__C': (0.1, 1, 10),
}
df = pd.read_csv('movie-reviews/train.tsv', header=0, delimiter='\t')
X, y = df['Phrase'], df['Sentiment'].as_matrix()
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5)
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1, scoring='accuracy')
grid_search.fit(X_train, y_train)
print 'Best score: %0.3f' % grid_search.best_score_
print 'Best parameters set:'
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
predictions = grid_search.predict(X_test)
print 'Accuracy:', accuracy_score(y_test, predictions)
print 'Classification Report:', classification_report(y_test, predictions)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, predictions)
print cm
plt.matshow(cm)
plt.title('Confusion matrix')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
predictions = np.ones(len(predictions)) * 2
print 'Accuracy:', accuracy_score(y_test, predictions)
print 'Degenerate Classification Report:', classification_report(y_test, predictions)
if __name__ == '__main__':
main() | gpl-3.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/svm/tests/test_sparse.py | 63 | 13366 | import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import (assert_raises, assert_true, assert_false,
assert_warns, assert_raise_message,
ignore_warnings)
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| mit |
kprestel/PyInvestment | pytech/fin/asset/asset.py | 2 | 6592 | import datetime as dt
import logging
from abc import ABCMeta, abstractmethod
from typing import Tuple
import numpy as np
import pandas as pd
import pytech.utils as utils
from pytech.data._holders import DfLibName
from pytech.decorators.decorators import memoize, write_chunks
from pytech.data.reader import BarReader
from pytech.fin.market_data.market import Market
BETA_STORE = 'pytech.beta'
def _calc_beta(df: pd.DataFrame) -> pd.Series:
"""
Calculates beta given a :class:`pd.DataFrame`.
It is expected that the df has the stock returns are in column 1 and the
market returns in column 0.
"""
x = df.values[:, [0]]
# noinspection PyUnresolvedReferences
x = np.concatenate([np.ones_like(x), x], axis=1)
beta = np.linalg.pinv(x.T.dot(x)).dot(x.T).dot(df.values[:, 1:])
return pd.Series(beta[1], df.columns[1:], name=df.index[-1])
class Asset(metaclass=ABCMeta):
"""
This is the base class that all Asset classes should inherit from.
Inheriting from it will provide a table name and the proper mapper args
required for the db. It will also allow it to have a relationship
with the :class:``OwnedAsset``.
The child class is responsible for giving each instance a ticker to identify it.
If the child class needs any more fields it is responsible for creating
them at the class level as well as populating them via the child's constructor,
in addition to calling the ``Asset`` constructor.
Any child class instance of this base class is considered to be a part of
the **Asset Universe** or the assets that
are eligible to be traded. If a child instance of an Asset does not yet
exist in the universe and the
:class:``~pytech.portfolio.Portfolio`` tries to trade it an exception will occur.
"""
def __init__(self, ticker: str, start_date: dt.datetime,
end_date: dt.datetime):
self.ticker = ticker
self.asset_type = self.__class__.__name__
self.logger = logging.getLogger(self.__class__.__name__)
start_date, end_date = utils.sanitize_dates(start_date, end_date)
self.start_date = start_date
self.end_date = end_date
self.market = Market(start_date=self.start_date,
end_date=self.end_date)
if self.start_date >= self.end_date:
raise ValueError('start_date must be older than end_date. '
f'start_date: {start_date} end_date: {end_date}.')
@property
def df(self):
return self.get_data()
@df.setter
def df(self, ohlcv):
if isinstance(ohlcv, pd.DataFrame) or isinstance(ohlcv, pd.Series):
self._ohlcv = ohlcv
else:
raise TypeError('data must be a pandas DataFrame or Series. '
f'{type(ohlcv)} was provided.')
@classmethod
def get_subclass_dict(cls, subclass_dict=None):
"""
Get a dictionary of subclasses for :class:`Asset` where the key is
the string name of the class and the value is the actual class
reference.
:param dict subclass_dict: This is used for recursion to maintain the
subclass_dict through each call.
:return: A dictionary where the key is the string name of the
subclass and the value is the reference to the class
:rtype: dict
"""
if subclass_dict is None:
subclass_dict = {}
else:
subclass_dict = subclass_dict
for subclass in cls.__subclasses__():
# prevent duplicate keys
if subclass.__name__ not in subclass_dict:
subclass_dict[subclass.__name__] = subclass
subclass.get_subclass_dict(subclass_dict)
return subclass_dict
@abstractmethod
def get_data(self) -> pd.DataFrame:
"""Must return a :class:`pd.DataFrame` with ticker data."""
raise NotImplementedError
class Stock(Asset):
def __init__(self, ticker: str, start_date: dt.datetime,
end_date: dt.datetime, source: str = 'google',
lib_name: str = 'pytech.bars'):
self.source = source
self.reader = BarReader(lib_name)
self.lib_name = lib_name
super().__init__(ticker, start_date, end_date)
@memoize
def get_data(self) -> pd.DataFrame:
return self.reader.get_data(self.ticker, self.source,
self.start_date, self.end_date)
def last_price(self, col=utils.CLOSE_COL):
return self.df[col][-1]
@write_chunks()
def _rolling_beta(self,
col=utils.CLOSE_COL,
window: int = 30) -> DfLibName:
"""
Calculate the rolling beta over a given window.
:param col: The column to use to get the returns.
:param window: The window to use to calculate the rolling beta (days)
:return: A DataFrame with the betas.
"""
stock_pct_change = pd.DataFrame(self.returns(col))
mkt_pct_change = pd.DataFrame(self.market.market[col].pct_change())
df: pd.DataFrame = pd.concat([mkt_pct_change, stock_pct_change],
axis=1)
betas = pd.concat([_calc_beta(sdf)
for sdf in utils.roll(df, window)], axis=1).T
betas['ticker'] = self.ticker
return DfLibName(betas, BETA_STORE)
def rolling_beta(self,
col=utils.CLOSE_COL,
window: int = 30) -> pd.DataFrame:
"""
Calculate the rolling beta over a given window.
This is a wrapper around `_rolling_beta` to return just a dataframe.
:param col: The column to use to get the returns.
:param window: The window to use to calculate the rolling beta (days)
:return: A DataFrame with the betas.
"""
df_lib_name = self._rolling_beta(col, window)
return df_lib_name.df
def returns(self, col=utils.CLOSE_COL) -> pd.Series:
return self.df[col].pct_change()
def avg_return(self, col=utils.CLOSE_COL):
ret = self.returns(col).mean()
return ret * 252
def cagr(self, col=utils.CLOSE_COL):
"""Compounding annual growth rate."""
days = (self.df.index[-1] - self.df.index[0]).days
return ((self.df[col][-1] / self.df[col][1]) ** (365.0 / days)) - 1
def std(self, col=utils.CLOSE_COL):
"""Standard deviation of returns, *annualized*."""
return self.returns(col).std() * np.sqrt(252)
| mit |
trouden/MultiMediaVerwerking | Labo01/opdracht2RGB.py | 1 | 2398 | import math
import cv2
from matplotlib import pyplot as plt
import numpy as np
orig = cv2.imread('contrast.jpg')
orig = cv2.resize(orig, (0,0), fx=0.3, fy=0.3)
img = cv2.copyMakeBorder(orig,0,0,0,0,cv2.BORDER_REPLICATE)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
height = img.shape[1]
width = img.shape[0]
#print width
#print height
I = img.mean() / 255.0
print I*255.0
sigma = 0
for w in range(0, width):
for h in range(0, height):
px = np.array([img.item(w,h,0), img.item(w,h,1), img.item(w,h,2)]) / 255.0
temp = np.average(px) - I
temp *= temp #kwadraat
sigma += temp
sigma /= (width * height)
sigma = math.sqrt(sigma)
print "I= " + str(I)
print "Sigma= " + str(sigma)
c = 1 #default waarde
gamma = 0
if I > 0.5:
gamma = 1 + (math.fabs(0.5 - I) / sigma)
else:
gamma = 1 / (1 + (math.fabs(0.5 - I ) / sigma))
print "gamma: " + str(gamma)
for w in range(0, width):
for h in range(0, height):
px = np.array([img.item(w,h,0), img.item(w,h,1), img.item(w,h,2)]) / 255.0
G = c * np.power(px, gamma)
G = G * 255.0
#print G[0]
img.itemset((w,h,0), G[0])
img.itemset((w,h,1), G[1])
img.itemset((w,h,2), G[2])
print str(img.min())
print str(img.max())
### andere methode
### gemiddelde brightness naar ~128 brengen?
#img2 = cv2.copyMakeBorder(orig,0,0,0,0,cv2.BORDER_REPLICATE)
#img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
#
#height = img2.shape[1]
#width = img2.shape[0]
#
#
#I = img2.mean()
#precisie = 0.1 #10 procent
#c = 1
#gamma = 0
#count = 0
## .9 * I <= I <= 1.1 * I
#
#while I > (1 + precisie) * 128 or I < (1 - precisie) * 128:
# print I
# print "gamma: " + str(gamma)
# for w in range(0, width):
# for h in range(0, height):
# px = img2.item(w,h) / 255.0
#
# if I > 128:
# gamma = 2.5
# else:
# gamma = .4
#
# G = c * math.pow(px, gamma)
# img2.itemset((w,h), G * 255.0)
#
# I = img2.mean()
# count += 1
#
#print "image2 average I: " + str(img2.mean())
#print "steps: " + str(count)
plt.subplot(131),plt.imshow(orig,'gray'),plt.title('ORIGINAL')
plt.subplot(132),plt.imshow(img,'gray'),plt.title('auto gamma correctie 1')
#plt.subplot(133),plt.imshow(img2,'gray'),plt.title('auto gamma correctie 2')
plt.show()
# AAYYY LFMAO DIEDERIK
| mit |
rrohan/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
pianomania/scikit-learn | sklearn/discriminant_analysis.py | 27 | 26804 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
.. versionadded:: 0.17
tol : float, optional
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self._max_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) +
np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Get the maximum number of components
if self.n_components is None:
self._max_components = len(self.classes_) - 1
else:
self._max_components = min(len(self.classes_) - 1,
self.n_components)
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
NDKoehler/DataScienceBowl2017_7th_place | dsb3_networks/classification/resnet2D_0.5res_80/config_2Dfinal.py | 1 | 3202 | from collections import defaultdict
from datetime import datetime
import json
import tensorflow as tf
import os, sys
import pandas as pd
#config dic
H = defaultdict(lambda: None)
#All possible config options:
H['optimizer'] = 'MomentumOptimizer'#'RMSPropOptimizer'
H['learning_rate'] = 0.001
H['momentum'] = 0.9 #0.99
H['kernel_num'] = 16 #32
H['dropout_keep_prob'] = 1.0
H['gpu_fraction'] = 0.9
H['num_classes'] = 2
H['model_name'] = 'resnet2D'
H['pretrained_checkpoint_dir'] = '../luna_resnet2D/output_dir/gold_prio3_plane_mil0'#../luna_resnet2D/output_dir/gen8_20z_3rot_stage1_deep
H['output_dir'] = 'output_dir/old_but_gold_plane_mil0_b4_init_luna' #cross_crop_retrain_zrot
H['predictions_dir'] = ''
H['allow_soft_placement'] = True
H['log_device_placement'] = False
H['max_steps'] = 35
H['MOVING_AVERAGE_DECAY'] = 0.9
H['BATCH_NORM_CENTER'] = True
H['BATCH_NORM_SCALE'] = True
H['weights_initializer'] = 'xavier_initializer' #'xavier_initializer', 'xavier_initializer_conv2d', 'truncated_normal_initializer'
H['gpus'] = [0]
H['summary_step'] = 10
# list iterator
# H['train_lst'] = '../data/multiview-2/tr.lst'
# H['val_lst'] = '../data/multiview-2/va.lst'
H['train_lst'] = '../../datapipeline_final/dsb3_0/interpolate_candidates_res05/tr_patients_80.lst'
H['val_lst'] = '../../datapipeline_final/dsb3_0/interpolate_candidates_res05/va_patients_20.lst'
#tr_path = '/media/niklas/Data_3/dsb3/datapipeline_gen9/dsb3_0/interpolate_candidates/cv5/cv/tr' + str(run_id) + '.lst'
#va_path = '/media/niklas/Data_3/dsb3/datapipeline_gen9/dsb3_0/interpolate_candidates/cv5/cv/va' + str(run_id) + '.lst'
#H['train_lst'] = tr_path
#H['val_lst'] = va_path
H['candidate_mode'] = False
# crossed axes options - cross is centrally cropped -> layers are stacked in z-dim
H['num_crossed_layers'] = 1
H['crossed_axes'] = [0,1,2]
H['rand_drop_planes']=0
H['plane_mil'] = False
# y and x image_shape must be equal -> z has same shape!!!
# you can crop if the equal z,y and x in image shape are and smaller than in in_image_shape
# images
# in_image_shapes[1:] must be equal to len of crop_before_loading_in_RAM_ZminZmaxYminYmaxXminXmax
H['in_image_shape'] = [10, 64, 64, 64, 2] #256
# not working #H['crop_before_loading_in_RAM_ZminZmaxYminYmaxXminXmax'] = [False,False,False,False,False,False] # Default = False or None
H['image_shape'] = [10, 3*H['num_crossed_layers'], 64, 64, 2]
H['label_shape'] = [1] #256
H['batch_size'] = 4
#iterator settings
H['load_in_ram'] = True
# due to time consuming operation and quality loss only rotation around one axis is processed randomly chosen
H['rand_rot_axes'] = [0]#,1,2] # 0: z, 1: y, 2: x (attention: x and y rotation lasts long)
H['rand_rot'] = True
H['degree_90_rot'] = H['rand_rot']
H['min_rot_angle'] = -10 #degree
H['max_rot_angle'] = 10 #degree
H['rand_mirror_axes'] = [0,1,2] # 0: z, 1: y, 2: x else False
H['rand_cropping_ZminZmaxYminYmaxXminXmax'] = [False,False,False,False,False,False] # crop within given range # default False: full range
H['save_step'] = 10 # saving checkpoint
H['tr_num_examples'] = len(pd.read_csv(H['train_lst'], header=None, sep='\t'))
H['va_num_examples'] = len(pd.read_csv(H['val_lst'], header=None, sep='\t'))
| mit |
noamraph/dreampie | dreampielib/gui/__init__.py | 1 | 59742 | # Copyright 2010 Noam Yorav-Raphael
#
# This file is part of DreamPie.
#
# DreamPie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DreamPie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DreamPie. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
from os import path
import time
import tempfile
from optparse import OptionParser
import subprocess
import webbrowser
import re
from keyword import iskeyword
import logging
from logging import debug
#logging.basicConfig(format="dreampie: %(message)s", level=logging.DEBUG)
def find_data_dir():
"""
Find the data directory in which to find files.
If we are inside the source directory, build subp zips.
"""
# The data directory is normally located at dreampielib/data.
# When running under py2exe, it is in the same directory as the executable.
# Running inside the source directory is detected by the presence of a
# file called 'dreampie' in the same directory as 'dreampielib'.
from os.path import join, dirname, abspath, isfile
if hasattr(sys, 'frozen'):
return abspath(join(dirname(sys.executable), 'data'))
dreampielib_dir = dirname(dirname(abspath(__file__)))
if isfile(join(dirname(dreampielib_dir), 'dreampie')):
# We're in the source path. Build zips if needed, and return the right
# dir.
from ..subp_lib import build
build()
return join(dreampielib_dir, 'data')
data_dir = find_data_dir()
gladefile = path.join(data_dir, 'dreampie.glade')
def load_pygtk():
"""On win32, load PyGTK from subdirectory, if available."""
from os.path import join, dirname, abspath
if hasattr(sys, 'frozen'):
pygtk_dir = join(dirname(abspath(sys.executable)), 'gtk-2.0')
else:
pygtk_dir = join(dirname(dirname(dirname(abspath(__file__)))), 'gtk-2.0')
if os.path.isdir(pygtk_dir):
sys.path.insert(0, pygtk_dir)
import runtime #@UnresolvedImport
if sys.platform == 'win32':
load_pygtk()
import gobject
gobject.threads_init() #@UndefinedVariable
import gtk
from gtk import gdk, glade
import pango
import gtksourceview2
from . import gtkexcepthook
gtkexcepthook.install(gladefile)
try:
from glib import timeout_add, idle_add
except ImportError:
# In PyGObject 2.14, it's in gobject.
from gobject import timeout_add, idle_add
from .. import __version__
from .SimpleGladeApp import SimpleGladeApp
from .keyhandler import (make_keyhandler_decorator, handle_keypress,
parse_keypress_event)
from .config import Config
from .config_dialog import ConfigDialog
from .write_command import write_command
from .newline_and_indent import newline_and_indent
from .output import Output
from .folding import Folding
from .selection import Selection
from .status_bar import StatusBar
from .vadj_to_bottom import VAdjToBottom
from .history import History
from .hist_persist import HistPersist
from .autocomplete import Autocomplete
from .call_tips import CallTips
from .autoparen import Autoparen
from .crash_workaround import TextViewCrashWorkaround
from .subprocess_handler import SubprocessHandler, StartError
from .common import beep, get_text, TimeoutError
from .file_dialogs import save_dialog
from .tags import (OUTPUT, STDIN, STDOUT, STDERR, EXCEPTION, PROMPT, COMMAND,
COMMAND_DEFS, COMMAND_SEP, MESSAGE, RESULT_IND, RESULT)
from . import tags
from .update_check import update_check
from . import bug_report
INDENT_WIDTH = 4
# Default line length, by which we set the default window size
LINE_LEN = 80
# Time to wait before autocompleting, to see if the user continues to type
AUTOCOMPLETE_WAIT = 400
# Time to wait for the subprocess for a result. The subprocess may be doing
# idle jobs, and so not return a result.
SUBP_WAIT_TIMEOUT_S = .5
# Maybe someday we'll want translations...
_ = lambda s: s
# A decorator for managing sourceview key handlers
sourceview_keyhandlers = {}
sourceview_keyhandler = make_keyhandler_decorator(sourceview_keyhandlers)
def get_widget(name):
"""Create a widget from the glade file."""
xml = glade.XML(gladefile, name)
return xml.get_widget(name)
class DreamPie(SimpleGladeApp):
def __init__(self, pyexec, runfile):
"""
pyexec - the Python interpreter executable
runfile - a filename to run upon startup, or None.
"""
SimpleGladeApp.__init__(self, gladefile, 'window_main')
self.load_popup_menus()
self.set_mac_accelerators()
self.config = Config()
if self.config.get_bool('start-rpdb2-embedded'):
print 'Starting rpdb2 embedded debugger...',
sys.stdout.flush()
import rpdb2; rpdb2.start_embedded_debugger('1234', timeout=0.1)
print 'Done.'
self.window_main.set_icon_from_file(
path.join(data_dir, 'dreampie.png'))
self.textbuffer = tb = self.textview.get_buffer()
self.init_textbufferview()
# Mark where the cursor was when the popup menu was popped
self.popup_mark = tb.create_mark('popup-mark', tb.get_start_iter(),
left_gravity=True)
# Remove the page in the notebook, which was added because empty
# notebooks cause warnings
self.notebook.remove_page(0)
# A list of callbacks to call when changing the sourcebuffer
self.sv_changed = []
self.sourceview = self.create_sourcebufferview()
self.sourcebuffer = self.sourceview.get_buffer()
# A tuple (page_num, text) of the recently closed tab
self.reopen_tab_data = None
# last (font, vertical_layout) configured. If they are changed,
# configure() will resize the window and place the paned.
self.last_configured_layout = (None, None)
self.configure()
self.output = Output(self.textview)
self.folding = Folding(self.textbuffer, LINE_LEN)
self.selection = Selection(self.textview, self.sourceview,
self.sv_changed,
self.on_is_something_selected_changed)
self.status_bar = StatusBar(self.sourcebuffer, self.sv_changed,
self.statusbar)
self.vadj_to_bottom = VAdjToBottom(self.scrolledwindow_textview
.get_vadjustment())
self.history = History(self.textview, self.sourceview, self.sv_changed,
self.config)
self.recent_manager = gtk.recent_manager_get_default()
self.menuitem_recent = [self.menuitem_recent0, self.menuitem_recent1,
self.menuitem_recent2, self.menuitem_recent3]
self.recent_filenames = [None] * len(self.menuitem_recent)
self.recent_manager.connect('changed', self.on_recent_manager_changed)
self.histpersist = HistPersist(self.window_main, self.textview,
self.status_bar, self.recent_manager)
self.update_recent()
self.autocomplete = Autocomplete(self.sourceview,
self.sv_changed,
self.window_main,
self.complete_attributes,
self.complete_firstlevels,
self.get_func_args,
self.find_modules,
self.get_module_members,
self.complete_filenames,
self.complete_dict_keys,
INDENT_WIDTH)
# Hack: we connect this signal here, so that it will have lower
# priority than the key-press event of autocomplete, when active.
self.sourceview_keypress_handler = self.sourceview.connect(
'key-press-event', self.on_sourceview_keypress)
self.sv_changed.append(self.on_sv_changed)
self.call_tips = CallTips(self.sourceview, self.sv_changed,
self.window_main, self.get_func_doc,
INDENT_WIDTH)
self.autoparen = Autoparen(self.sourcebuffer, self.sv_changed,
self.is_callable_only,
self.get_expects_str,
self.autoparen_show_call_tip,
INDENT_WIDTH)
self.subp = SubprocessHandler(
pyexec, data_dir,
self.on_stdout_recv, self.on_stderr_recv, self.on_object_recv,
self.on_subp_terminated)
# Number of RPC calls that timed out and expecting results
self._n_unclaimed_results = 0
try:
self.subp.start()
except StartError, e:
msg = gtk.MessageDialog(
None, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE,
_("Couldn't start subprocess: %s") % e)
_response = msg.run()
msg.destroy()
print >> sys.stderr, e
sys.exit(1)
# Is the subprocess executing a command
self.set_is_executing(False)
# Are we trying to shut down
self.is_terminating = False
self.window_main.show()
self.subp_welcome, self.subp_can_mask_sigint = (
self.call_subp(u'get_subprocess_info'))
self.show_welcome()
self.configure_subp()
self.run_init_code(runfile)
bug_report.set_subp_info(pyexec, self.subp_welcome)
if self.config.get_bool('show-getting-started'):
self.show_getting_started_dialog()
self.config.set_bool('show-getting-started', False)
self.config.save()
update_check(self.on_update_available)
def on_sv_changed(self, new_sv):
self.sourceview.disconnect(self.sourceview_keypress_handler)
self.sourceview = new_sv
self.sourcebuffer = new_sv.get_buffer()
self.sourceview_keypress_handler = self.sourceview.connect(
'key-press-event', self.on_sourceview_keypress)
def load_popup_menus(self):
# Load popup menus from the glade file. Would not have been needed if
# popup menus could be children of windows.
xml = glade.XML(gladefile, 'popup_sel_menu')
xml.signal_autoconnect(self)
self.popup_sel_menu = xml.get_widget('popup_sel_menu')
xml = glade.XML(gladefile, 'popup_nosel_menu')
xml.signal_autoconnect(self)
self.popup_nosel_menu = xml.get_widget('popup_nosel_menu')
self.fold_unfold_section_menu = xml.get_widget('fold_unfold_section_menu')
self.copy_section_menu = xml.get_widget('copy_section_menu')
self.view_section_menu = xml.get_widget('view_section_menu')
self.save_section_menu = xml.get_widget('save_section_menu')
def set_mac_accelerators(self):
# Set up accelerators suitable for the Mac.
# Ctrl-Up and Ctrl-Down are taken by the window manager, so we use
# Ctrl-PgUp and Ctrl-PgDn.
# We want it to be easy to switch, so both sets of keys are always
# active, but only one, most suitable for each platform, is displayed
# in the menu.
accel_group = gtk.accel_groups_from_object(self.window_main)[0]
menu_up = self.menuitem_history_up
UP = gdk.keyval_from_name('Up')
PGUP = gdk.keyval_from_name('Prior')
menu_dn = self.menuitem_history_down
DN = gdk.keyval_from_name('Down')
PGDN = gdk.keyval_from_name('Next')
if sys.platform != 'darwin':
menu_up.add_accelerator('activate', accel_group, PGUP,
gdk.CONTROL_MASK, 0)
menu_dn.add_accelerator('activate', accel_group, PGDN,
gdk.CONTROL_MASK, 0)
else:
menu_up.remove_accelerator(accel_group, UP, gdk.CONTROL_MASK)
menu_up.add_accelerator('activate', accel_group, PGUP,
gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_up.add_accelerator('activate', accel_group, UP,
gdk.CONTROL_MASK, 0)
menu_dn.remove_accelerator(accel_group, DN, gdk.CONTROL_MASK)
menu_dn.add_accelerator('activate', accel_group, PGDN,
gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_dn.add_accelerator('activate', accel_group, DN,
gdk.CONTROL_MASK, 0)
def on_cut(self, _widget):
return self.selection.cut()
def on_copy(self, _widget):
return self.selection.copy()
def on_copy_commands_only(self, _widget):
return self.selection.copy_commands_only()
def on_paste(self, _widget):
return self.selection.paste()
def on_upward_find(self, _widget):
self.find(is_upward=True)
def on_downward_find(self, _widget):
self.find(is_upward=False)
def find(self, is_upward):
tb = self.textbuffer
sb = self.sourcebuffer
search_str = get_text(sb, sb.get_start_iter(), sb.get_end_iter())
if not search_str:
self.status_bar.set_status(_(
"Type the text you want to search for in the code box, and "
"press Ctrl-F"))
beep()
return
if tb.get_has_selection():
sel_start, sel_end = tb.get_selection_bounds()
it = sel_start if is_upward else sel_end
elif self.textview.has_focus():
it = tb.get_iter_at_mark(tb.get_insert())
else:
it = tb.get_end_iter()
flags = gtk.TEXT_SEARCH_VISIBLE_ONLY
if is_upward:
match = it.backward_search(search_str, flags)
if match is None:
match = tb.get_end_iter().backward_search(search_str, flags)
else:
match = it.forward_search(search_str, flags)
if match is None:
match = tb.get_start_iter().forward_search(search_str, flags)
if match is None:
beep()
else:
start, end = match
tb.select_range(start, end)
self.textview.scroll_to_iter(start, 0)
def on_is_something_selected_changed(self, is_something_selected):
self.menuitem_cut.props.sensitive = is_something_selected
self.menuitem_copy.props.sensitive = is_something_selected
self.menuitem_copy_commands_only.props.sensitive = is_something_selected
self.menuitem_interrupt.props.sensitive = not is_something_selected
# Source buffer, Text buffer
def init_textbufferview(self):
tv = self.textview
tb = self.textbuffer
tv.set_wrap_mode(gtk.WRAP_CHAR)
self.textview_crash_workaround = TextViewCrashWorkaround(tv)
tags.add_tags(tb)
tv.connect('key-press-event', self.on_textview_keypress)
tv.connect('focus-in-event', self.on_textview_focus_in)
def get_char_width_height(self):
tv = self.textview
context = tv.get_pango_context()
metrics = context.get_metrics(tv.style.font_desc,
context.get_language())
charwidth = pango.PIXELS(metrics.get_approximate_digit_width())
# I don't know why +1
charheight = pango.PIXELS(metrics.get_ascent() + metrics.get_descent())+1
return charwidth, charheight
def set_window_size(self, vertical_layout):
charwidth, charheight = self.get_char_width_height()
if vertical_layout:
# I don't know why I have to add 2, but it works.
width = charwidth*(LINE_LEN+2)
height = charheight*30
else:
width = charwidth*((LINE_LEN-10)*2+2)
height = charheight*26
self.window_main.resize(width, height)
# Set the position of the paned. We wait until it is exposed because
# then its max_position is meaningful.
# In vertical layout we set it to maximum, since the sourceview has
# a minimum height.
def callback(_widget, _event):
if vertical_layout:
pane = self.vpaned_main
pane.set_position(pane.props.max_position)
else:
pane = self.hpaned_main
pane.set_position(pane.props.max_position // 2)
self.sourceview.disconnect(callback_id)
callback_id = self.sourceview.connect('expose-event', callback)
def create_sourcebufferview(self, page_num=None):
sb = gtksourceview2.Buffer()
sv = gtksourceview2.View(sb)
sv.show()
sv.connect('focus-in-event', self.on_sourceview_focus_in)
sv.connect('button-press-event', self.on_sourceview_button_press_event)
_charwidth, charheight = self.get_char_width_height()
self.configure_sourceview(sv)
lm = gtksourceview2.LanguageManager()
lm.set_search_path([path.join(data_dir, 'language-specs')])
sb.set_language(lm.get_language('python'))
scroll = gtk.ScrolledWindow()
scroll.show()
scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scroll.add(sv)
scroll.set_size_request(-1, charheight * 4)
lbl = gtk.Label(' ')
if page_num is None:
page_num = self.notebook.get_current_page() + 1
self.notebook.insert_page(scroll, lbl, page_num)
self.notebook.set_current_page(page_num)
sv.grab_focus()
return sv
def sv_scroll_cursor_onscreen(self):
self.sourceview.scroll_mark_onscreen(self.sourcebuffer.get_insert())
def on_textview_focus_in(self, _widget, _event):
# Clear the selection of the sourcebuffer
self.sourcebuffer.move_mark(self.sourcebuffer.get_selection_bound(),
self.sourcebuffer.get_iter_at_mark(
self.sourcebuffer.get_insert()))
def on_sourceview_focus_in(self, _widget, _event):
# Clear the selection of the textbuffer
self.textbuffer.move_mark(self.textbuffer.get_selection_bound(),
self.textbuffer.get_iter_at_mark(
self.textbuffer.get_insert()))
def on_sourceview_button_press_event(self, _widget, event):
if event.button == 2 and self.textbuffer.get_has_selection():
commands = self.selection.get_commands_only()
self.sourcebuffer.insert_interactive_at_cursor(commands, True)
return True
def write(self, data, *tag_names):
self.textbuffer.insert_with_tags_by_name(
self.textbuffer.get_end_iter(), data, *tag_names)
def write_output(self, data, tag_names, onnewline=False, addbreaks=True):
"""
Call self.output.write with the given arguments, and autofold if needed.
"""
it = self.output.write(data, tag_names, onnewline, addbreaks)
if self.config.get_bool('autofold'):
self.folding.autofold(it, self.config.get_int('autofold-numlines'))
def set_is_executing(self, is_executing):
self.is_executing = is_executing
label = _(u'Execute Code') if not is_executing else _(u'Write Input')
self.menuitem_execute.child.props.label = label
self.menuitem_discard_hist.props.sensitive = not is_executing
@staticmethod
def replace_gtk_quotes(source):
# Work around GTK+ bug https://bugzilla.gnome.org/show_bug.cgi?id=610928
# in order to fix bug #525469 - replace fancy quotes with regular
# quotes.
return source.replace(u'\xa8', '"').replace(u'\xb4', "'")
def execute_source(self):
"""Execute the source in the source buffer.
"""
sb = self.sourcebuffer
source = get_text(sb, sb.get_start_iter(), sb.get_end_iter())
source = source.rstrip()
source = self.replace_gtk_quotes(source)
try:
# There's a chance that the subprocess won't reply, because it's
# busy doing "idle jobs". For most queries we can ask for an answer
# and if it doesn't arrive, cancel what we tried to do and ignore
# the answer when it comes. However, here we can't let the execute
# function run and ignore the result, so we first call 'pause_idle'.
# If we don't get a reply for pause_idle, we don't execute.
self.call_subp_noblock(u'pause_idle')
except TimeoutError:
self.subp.send_object((u'resume_idle', ()))
self._n_unclaimed_results += 1
self.status_bar.set_status(_("The subprocess is currently busy"))
beep()
return
is_ok, syntax_error_info = self.call_subp(u'execute', source)
if not is_ok:
if syntax_error_info:
msg, lineno, offset = syntax_error_info
status_msg = _("Syntax error: %s (at line %d col %d)") % (
msg, lineno+1, offset+1)
# Work around a bug: offset may be wrong, which will cause
# gtk to crash if using sb.get_iter_at_line_offset.
iter = sb.get_iter_at_line(lineno)
iter.forward_chars(offset+1)
sb.place_cursor(iter)
else:
# Incomplete
status_msg = _("Command is incomplete")
sb.place_cursor(sb.get_end_iter())
self.status_bar.set_status(status_msg)
beep()
else:
self.set_is_executing(True)
write_command(self.write, source.strip())
self.output.start_new_section()
if not self.config.get_bool('leave-code'):
sb.delete(sb.get_start_iter(), sb.get_end_iter())
self.vadj_to_bottom.scroll_to_bottom()
def send_stdin(self):
"""Send the contents of the sourcebuffer as stdin."""
sb = self.sourcebuffer
s = get_text(sb, sb.get_start_iter(), sb.get_end_iter())
if not s.endswith('\n'):
s += '\n'
self.write_output(s, [COMMAND, STDIN], addbreaks=False)
self.write('\r', COMMAND_SEP)
self.output.start_new_section()
self.vadj_to_bottom.scroll_to_bottom()
if not self.config.get_bool('leave-code'):
sb.delete(sb.get_start_iter(), sb.get_end_iter())
self.subp.write(s)
@sourceview_keyhandler('Return', 0)
def on_sourceview_return(self):
sb = self.sourcebuffer
# If we are on the first line, and it doesn't end with a ' ':
# * If we are not executing, try to execute (if failed, continue
# with normal behavior)
# * If we are executing, send the line as stdin.
insert_iter = sb.get_iter_at_mark(sb.get_insert())
if (insert_iter.equal(sb.get_end_iter())
and insert_iter.get_line() == 0
and insert_iter.get_offset() != 0
and not get_text(sb, sb.get_start_iter(),
insert_iter).endswith(' ')):
if not self.is_executing:
source = get_text(sb, sb.get_start_iter(), sb.get_end_iter())
source = source.rstrip()
source = self.replace_gtk_quotes(source)
try:
is_incomplete = self.call_subp_noblock(u'is_incomplete', source)
except TimeoutError:
is_incomplete = True
if not is_incomplete:
self.execute_source()
return True
else:
# is_executing
self.send_stdin()
return True
# If we are after too many newlines, the user probably just wanted to
# execute - notify him.
# We check if this line is empty and the previous one is.
show_execution_tip = False
if insert_iter.equal(sb.get_end_iter()):
it = sb.get_end_iter()
# This goes to the beginning of the line, and another line
# backwards, so we get two lines
it.backward_lines(1)
text = get_text(sb, it, sb.get_end_iter())
if not text.strip():
show_execution_tip = True
# We didn't execute, so newline-and-indent.
r = newline_and_indent(self.sourceview, INDENT_WIDTH)
if show_execution_tip:
self.status_bar.set_status(_(
"Tip: To execute your code, use Ctrl+Enter."))
return r
@sourceview_keyhandler('KP_Enter', 0)
def on_sourceview_kp_enter(self):
self.on_execute_command(None)
return True
@sourceview_keyhandler('Tab', 0)
def on_sourceview_tab(self):
sb = self.sourcebuffer
sel = sb.get_selection_bounds()
if not sel:
insert = sb.get_iter_at_mark(sb.get_insert())
insert_linestart = sb.get_iter_at_line(insert.get_line())
line = get_text(sb, insert_linestart, insert)
if not line.strip():
# We are at the beginning of a line, so indent - forward to next
# "tab stop"
sb.insert_at_cursor(' '*(INDENT_WIDTH - len(line)%INDENT_WIDTH))
else:
# Completion should come here
self.autocomplete.show_completions(is_auto=False, complete=True)
else:
# Indent
start, end = sel
start = sb.get_iter_at_line(start.get_line())
if not end.ends_line():
end.forward_to_line_end()
text = get_text(sb, start, end)
newtext = '\n'.join(' '+line for line in text.split('\n'))
start_offset = start.get_offset()
sb.delete(start, end)
sb.insert(end, newtext)
sb.select_range(sb.get_iter_at_offset(start_offset), end)
self.sv_scroll_cursor_onscreen()
return True
@sourceview_keyhandler('ISO_Left_Tab', 0)
def on_sourceview_shift_tab(self):
sb = self.sourcebuffer
sel = sb.get_selection_bounds()
if sel:
start, end = sel
else:
start = end = sb.get_iter_at_mark(sb.get_insert())
start = sb.get_iter_at_line(start.get_line())
if not end.ends_line():
end.forward_to_line_end()
text = get_text(sb, start, end)
lines = text.split('\n')
if not all(line.startswith(' ')
for line in lines if line.strip() != ''):
beep()
else:
newlines = [line[4:] for line in lines]
newtext = '\n'.join(newlines)
start_offset = start.get_offset()
sb.delete(start, end)
sb.insert(end, newtext)
sb.select_range(sb.get_iter_at_offset(start_offset), end)
return True
@sourceview_keyhandler('Home', 0)
def on_sourceview_home(self):
# If the cursor is already at the beginning of the line, move to the
# beginning of the text.
sb = self.sourcebuffer
insert = sb.get_iter_at_mark(sb.get_insert())
if insert.starts_line():
while insert.get_char() == ' ':
insert.forward_char()
sb.place_cursor(insert)
return True
@sourceview_keyhandler('BackSpace', 0)
def on_sourceview_backspace(self):
sb = self.sourcebuffer
insert = sb.get_iter_at_mark(sb.get_insert())
insert_linestart = sb.get_iter_at_line(insert.get_line())
line = get_text(sb, insert_linestart, insert)
if line and not line.strip():
# There are only space before us, so remove spaces up to last
# "tab stop"
delete_from = ((len(line) - 1) // INDENT_WIDTH) * INDENT_WIDTH
it = sb.get_iter_at_line_offset(insert.get_line(), delete_from)
sb.delete(it, insert)
self.sv_scroll_cursor_onscreen()
return True
return False
# The following 3 handlers are for characters which may trigger automatic
# opening of the completion list. (slash and backslash depend on path.sep)
# We leave the final decision whether to open the list to the autocompleter.
# We just notify it that the char was inserted and the user waited a while.
@sourceview_keyhandler('period', 0)
def on_sourceview_period(self):
timeout_add(AUTOCOMPLETE_WAIT, self.check_autocomplete, '.')
@sourceview_keyhandler('slash', 0)
def on_sourceview_slash(self):
timeout_add(AUTOCOMPLETE_WAIT, self.check_autocomplete, '/')
@sourceview_keyhandler('backslash', 0)
def on_sourceview_backslash(self):
timeout_add(AUTOCOMPLETE_WAIT, self.check_autocomplete, '\\')
@sourceview_keyhandler('bracketleft', 0)
def on_sourceview_bracketleft(self):
timeout_add(AUTOCOMPLETE_WAIT, self.check_autocomplete, '[')
def check_autocomplete(self, last_char):
"""
If the last char in the sourcebuffer is last_char, call
show_completions.
"""
sb = self.sourcebuffer
if self.sourceview.is_focus():
it = sb.get_iter_at_mark(sb.get_insert())
it2 = it.copy()
it2.backward_chars(1)
char = get_text(sb, it2, it)
if char == last_char:
self.autocomplete.show_completions(is_auto=True, complete=False)
# return False so as not to be called repeatedly.
return False
@sourceview_keyhandler('parenleft', 0)
def on_sourceview_parenleft(self):
idle_add(self.call_tips.show, True)
def on_sourceview_keypress(self, _widget, event):
return handle_keypress(self, event, sourceview_keyhandlers)
# Autoparen
@sourceview_keyhandler('space', 0)
def on_sourceview_space(self):
"""
If a space was hit after a callable-only object, add parentheses.
"""
if self.is_executing:
return False
if not self.config.get_bool('autoparen'):
return False
return self.autoparen.add_parens()
def is_callable_only(self, expr):
return self.call_subp_catch(u'is_callable_only', expr)
def get_expects_str(self):
return set(self.config.get('expects-str-2').split())
def autoparen_show_call_tip(self):
self.call_tips.show(is_auto=True)
# History
def on_textview_keypress(self, _widget, event):
keyval_name, state = parse_keypress_event(event)
if (keyval_name, state) in (('Return', 0), ('KP_Enter', 0)):
return self.history.copy_to_sourceview()
def on_history_up(self, _widget):
self.history.history_up()
def on_history_down(self, _widget):
self.history.history_down()
# Subprocess
def show_welcome(self):
s = self.subp_welcome + 'DreamPie %s\n' % __version__
self.write(s, MESSAGE)
self.output.start_new_section()
def configure_subp(self):
config = self.config
if config.get_bool('use-reshist'):
reshist_size = config.get_int('reshist-size')
else:
reshist_size = 0
self.call_subp(u'set_reshist_size', reshist_size)
self.menuitem_clear_reshist.props.sensitive = (reshist_size > 0)
self.call_subp(u'set_pprint', config.get_bool('pprint'))
self.call_subp(u'set_matplotlib_ia',
config.get_bool('matplotlib-ia-switch'),
config.get_bool('matplotlib-ia-warn'))
def run_init_code(self, runfile=None):
"""
Runs the init code.
This will result in the code being run and a '>>>' printed afterwards.
If there's no init code, will just print '>>>'.
If runfile is given, will also execute the code in that.
"""
init_code = unicode(eval(self.config.get('init-code')))
if runfile:
msg = "Running %s" % runfile
# This should be both valid py3 and py2 code.
init_code += ('\n\nprint(%r)\nexec(open(%r).read())\n'
% (msg, runfile))
if init_code:
is_ok, syntax_error_info = self.call_subp(u'execute', init_code)
if not is_ok:
msg, lineno, offset = syntax_error_info
warning = _(
"Could not run initialization code because of a syntax "
"error:\n"
"%s at line %d col %d.") % (msg, lineno+1, offset+1)
msg = gtk.MessageDialog(self.window_main, gtk.DIALOG_MODAL,
gtk.MESSAGE_WARNING, gtk.BUTTONS_CLOSE,
warning)
_response = msg.run()
msg.destroy()
else:
self.set_is_executing(True)
if not self.is_executing:
self.write('>>> ', COMMAND, PROMPT)
def on_subp_terminated(self):
if self.is_terminating:
return
# This may raise an exception if subprocess couldn't be started,
# but hopefully if it was started once it will be started again.
self._n_unclaimed_results = 0
self.subp.start()
self.set_is_executing(False)
self.write('\n')
self.write(
'==================== New Session ====================\n',
MESSAGE)
self.output.start_new_section()
self.configure_subp()
self.run_init_code()
self.vadj_to_bottom.scroll_to_bottom()
self.sourceview.grab_focus()
def on_restart_subprocess(self, _widget):
self.subp.kill()
def on_stdout_recv(self, data):
self.write_output(data, STDOUT)
def on_stderr_recv(self, data):
self.write_output(data, STDERR)
def call_subp(self, funcname, *args):
"""
Make an RPC call, blocking until an answer is received.
"""
assert not self.is_executing
while self._n_unclaimed_results:
self.subp.recv_object()
self.subp.send_object((funcname, args))
return self.subp.recv_object()
def call_subp_noblock(self, funcname, *args):
"""
Make a non-blocking RPC call.
Will wait for SUBP_WAIT_TIMEOUT_S and if no answer is received will
raise a TimeoutError. The query will be executed when the subprocess
becomes responsive again, but will be discarded.
"""
assert not self.is_executing
while self._n_unclaimed_results:
returned = self.subp.wait_for_object(SUBP_WAIT_TIMEOUT_S)
if returned:
self.subp.recv_object()
else:
raise TimeoutError
self.subp.send_object((funcname, args))
returned = self.subp.wait_for_object(SUBP_WAIT_TIMEOUT_S)
if returned:
return self.subp.recv_object()
else:
self._n_unclaimed_results += 1
raise TimeoutError
def call_subp_catch(self, funcname, *args):
"""
Make a non-blocking RPC call.
If executing, return None.
If a TimeoutError is raised, catch it and return None.
"""
if self.is_executing:
return None
try:
return self.call_subp_noblock(funcname, *args)
except TimeoutError:
return None
def on_object_recv(self, obj):
if self._n_unclaimed_results:
self._n_unclaimed_results -= 1
return
assert self.is_executing
is_success, val_no, val_str, exception_string, rem_stdin = obj
if not is_success:
self.write_output(exception_string, EXCEPTION, onnewline=True)
else:
if val_str is not None:
if val_no is not None:
sep = ' ' if '\n' not in val_str else '\n'
self.write_output('%d:%s' % (val_no, sep), RESULT_IND,
onnewline=True)
self.write_output(val_str+'\n', RESULT)
self.write('>>> ', COMMAND, PROMPT)
self.set_is_executing(False)
self.handle_rem_stdin(rem_stdin)
def handle_rem_stdin(self, rem_stdin):
"""
Add the stdin text that was not processed to the source buffer.
Remove it from the text buffer (we check that the STDIN text is
consistent with rem_stdin - otherwise we give up)
"""
if not rem_stdin:
return
self.sourcebuffer.insert(self.sourcebuffer.get_start_iter(), rem_stdin)
self.sv_scroll_cursor_onscreen()
tb = self.textbuffer
stdin = tb.get_tag_table().lookup(STDIN)
it = tb.get_end_iter()
if not it.ends_tag(stdin):
it.backward_to_tag_toggle(stdin)
while True:
it2 = it.copy()
it2.backward_to_tag_toggle(stdin)
cur_stdin = get_text(tb, it2, it, True)
min_len = min(len(cur_stdin), len(rem_stdin))
assert min_len > 0
if cur_stdin[-min_len:] != rem_stdin[-min_len:]:
debug("rem_stdin doesn't match what's in textview")
break
it2.forward_chars(len(cur_stdin)-min_len)
tb.delete(it2, it)
rem_stdin = rem_stdin[:-min_len]
if not rem_stdin:
break
else:
it = it2
# if rem_stdin is left, it2 must be at the beginning of the
# stdin region.
it2.backward_to_tag_toggle(stdin)
assert it2.ends_tag(stdin)
def on_execute_command(self, _widget):
if self.is_executing:
self.send_stdin()
elif self.sourcebuffer.get_char_count() == 0:
beep()
else:
self.execute_source()
return True
def on_interrupt(self, _widget):
if self.subp_can_mask_sigint or self.is_executing:
self.subp.interrupt()
else:
self.status_bar.set_status(
_("A command isn't being executed currently"))
beep()
# History persistence
def on_save_history(self, _widget):
self.histpersist.save()
def on_save_history_as(self, _widget):
self.histpersist.save_as()
def on_load_history(self, _widget):
self.histpersist.load()
# Recent history files
def on_recent_manager_changed(self, _recent_manager):
self.update_recent()
def update_recent(self):
"""Update the menu and self.recent_filenames"""
rman = self.recent_manager
recent_items = [it for it in rman.get_items()
if it.has_application('dreampie')
and it.get_uri().startswith('file://')]
# it.get_visited() makes more sense, but since we call RecentManager.add
# when we open and when we save, get_modified() does the trick.
recent_items.sort(key=lambda it: it.get_modified(),
reverse=True)
self.menuitem_recentsep.props.visible = (len(recent_items) > 0)
for i, menuitem in enumerate(self.menuitem_recent):
if i < len(recent_items):
it = recent_items[i]
fn = it.get_uri()[len('file://'):]
menuitem.props.visible = True
menuitem.child.props.label = "_%d %s" % (i, fn)
self.recent_filenames[i] = fn
else:
menuitem.props.visible = False
self.recent_filenames[i] = None
def on_menuitem_recent(self, widget):
num = self.menuitem_recent.index(widget)
fn = self.recent_filenames[num]
self.histpersist.load_filename(fn)
# Discard history
def discard_hist_before_tag(self, tag):
"""
Discard history before the given tag. If tag == COMMAND, this discards
all history, and if tag == MESSAGE, this discards previous sessions.
"""
tb = self.textbuffer
tag = tb.get_tag_table().lookup(tag)
it = tb.get_end_iter()
it.backward_to_tag_toggle(tag)
if not it.begins_tag(tag):
it.backward_to_tag_toggle(tag)
tb.delete(tb.get_start_iter(), it)
def on_discard_history(self, _widget):
xml = glade.XML(gladefile, 'discard_hist_dialog')
d = xml.get_widget('discard_hist_dialog')
d.set_transient_for(self.window_main)
d.set_default_response(gtk.RESPONSE_OK)
previous_rad = xml.get_widget('previous_rad')
all_rad = xml.get_widget('all_rad')
previous_rad.set_group(all_rad)
previous_rad.props.active = True
r = d.run()
d.destroy()
if r == gtk.RESPONSE_OK:
tb = self.textbuffer
if previous_rad.props.active:
self.discard_hist_before_tag(MESSAGE)
else:
self.discard_hist_before_tag(COMMAND)
tb.insert_with_tags_by_name(
tb.get_start_iter(),
'================= History Discarded =================\n',
MESSAGE)
self.status_bar.set_status(_('History discarded.'))
self.histpersist.forget_filename()
# Folding
def on_section_menu_activate(self, widget):
"""
Called when the used clicked a section-related item in a popup menu.
"""
tb = self.textbuffer
it = tb.get_iter_at_mark(self.popup_mark)
r = self.folding.get_section_status(it)
if r is None:
# May happen if something was changed in the textbuffer between
# popup and activation
return
typ, is_folded, start_it = r
if widget is self.fold_unfold_section_menu:
# Fold/Unfold
if is_folded is None:
# No point in folding.
beep()
elif not is_folded:
self.folding.fold(typ, start_it)
else:
self.folding.unfold(typ, start_it)
else:
if typ == COMMAND:
text = self.history.iter_get_command(start_it)
else:
end_it = start_it.copy()
end_it.forward_to_tag_toggle(self.folding.get_tag(typ))
text = get_text(tb, start_it, end_it)
if sys.platform == 'win32':
text = text.replace('\n', '\r\n')
if widget is self.copy_section_menu:
# Copy
self.selection.clipboard.set_text(text)
elif widget is self.view_section_menu:
# View
fd, fn = tempfile.mkstemp()
os.write(fd, text)
os.close(fd)
viewer = eval(self.config.get('viewer'))
self.spawn_and_forget('%s %s' % (viewer, fn))
elif widget is self.save_section_menu:
# Save
def func(filename):
f = open(filename, 'wb')
f.write(text)
f.close()
save_dialog(func, _("Choose where to save the section"),
self.main_widget, _("All Files"), "*", None)
else:
assert False, "Unexpected widget"
def spawn_and_forget(self, argv):
"""
Start a process and forget about it.
"""
if sys.platform == 'linux2':
# We use a trick so as not to create zombie processes: we fork,
# and let the fork spawn the process (actually another fork). The
# (first) fork immediately exists, so the process we spawned is
# made the child of process number 1.
pid = os.fork()
if pid == 0:
_p = subprocess.Popen(argv, shell=True)
os._exit(0)
else:
os.waitpid(pid, 0)
else:
_p = subprocess.Popen(argv, shell=True)
def on_double_click(self, event):
"""If we are on a folded section, unfold it and return True, to
avoid event propagation."""
tv = self.textview
if tv.get_window(gtk.TEXT_WINDOW_TEXT) is not event.window:
# Probably a click on the border or something
return
x, y = tv.window_to_buffer_coords(gtk.TEXT_WINDOW_TEXT,
int(event.x), int(event.y))
it = tv.get_iter_at_location(x, y)
r = self.folding.get_section_status(it)
if r is not None:
typ, is_folded, start_it = r
if is_folded:
self.folding.unfold(typ, start_it)
return True
def on_fold_last(self, _widget):
self.folding.fold_last()
def on_unfold_last(self, _widget):
self.folding.unfold_last()
# Notebook tabs
def on_notebook_switch_page(self, _widget, _page, page_num):
new_sv = self.notebook.get_nth_page(page_num).get_child()
for cb in self.sv_changed:
cb(new_sv)
def new_tab(self, index=None):
# The following line should result in on_notebook_switch_page, which
# will take care of calling on_sv_change functions.
self.create_sourcebufferview(index)
self.notebook.props.show_tabs = True
self.reopen_tab_data = None
self.menuitem_reopen_tab.props.sensitive = False
def on_new_tab(self, _widget):
self.new_tab()
def on_reopen_tab(self, _widget):
index, text = self.reopen_tab_data
self.new_tab(index)
self.sourcebuffer.set_text(text)
def on_close_tab(self, _widget):
if self.notebook.get_n_pages() == 1:
beep()
return
else:
self.close_current_tab()
def close_current_tab(self):
assert self.notebook.get_n_pages() > 1
cur_page = self.notebook.get_current_page()
text = get_text(self.sourcebuffer, self.sourcebuffer.get_start_iter(),
self.sourcebuffer.get_end_iter())
if text:
self.reopen_tab_data = (cur_page, text)
self.menuitem_reopen_tab.props.sensitive = True
else:
self.reopen_tab_data = None
self.menuitem_reopen_tab.props.sensitive = False
scrolledwin = self.notebook.get_nth_page(cur_page)
new_page = cur_page-1 if cur_page > 0 else 1
# This should result in on_notebook_switch_page which will set
# everything to use the new sourcebuffer
self.notebook.set_current_page(new_page)
assert self.sourceview is not scrolledwin.get_child()
self.notebook.remove_page(cur_page)
if self.notebook.get_n_pages() == 1:
self.notebook.props.show_tabs = False
if True:
scrolledwin.destroy()
else:
# Verify that the sourceview and sourcebuffer are indeed destroyed,
# and not referenced anywhere
import weakref, gc
r = weakref.ref(scrolledwin.get_child().get_buffer())
scrolledwin.destroy()
gc.collect()
assert r() is None
def on_prev_tab(self, _widget):
self.notebook.prev_page()
def on_next_tab(self, _widget):
self.notebook.next_page()
# Other events
def on_show_completions(self, _widget):
self.autocomplete.show_completions(is_auto=False, complete=False)
def complete_dict_keys(self, expr):
return self.call_subp_catch(u'complete_dict_keys', expr)
def complete_attributes(self, expr):
return self.call_subp_catch(u'complete_attributes', expr)
def complete_firstlevels(self):
return self.call_subp_catch(u'complete_firstlevels')
def get_func_args(self, expr):
return self.call_subp_catch(u'get_func_args', expr)
def find_modules(self, expr):
return self.call_subp_catch(u'find_modules', expr)
def get_module_members(self, expr):
return self.call_subp_catch(u'get_module_members', expr)
def complete_filenames(self, str_prefix, text, str_char, add_quote):
return self.call_subp_catch(u'complete_filenames', str_prefix, text, str_char,
add_quote)
def on_show_calltip(self, _widget):
self.call_tips.show(is_auto=False)
def get_func_doc(self, expr):
return self.call_subp_catch(u'get_func_doc', expr)
def configure(self):
"""
Apply configuration. Called on initialization and after configuration
was changed by the configuration dialog.
"""
config = self.config
tv = self.textview; tb = self.textbuffer
sourceviews = [self.notebook.get_nth_page(i).get_child()
for i in range(self.notebook.get_n_pages())]
font_name = config.get('font')
font = pango.FontDescription(font_name)
tv.modify_font(font)
for sv in sourceviews:
sv.modify_font(font)
theme = tags.get_theme(self.config, self.config.get('current-theme'))
tags.apply_theme_text(tv, tb, theme)
for sv in sourceviews:
tags.apply_theme_source(sv.get_buffer(), theme)
vertical_layout = self.config.get_bool('vertical-layout')
if vertical_layout:
pane = self.vpaned_main; other_pane = self.hpaned_main
self.notebook.props.tab_pos = gtk.POS_BOTTOM
else:
pane = self.hpaned_main; other_pane = self.vpaned_main
self.notebook.props.tab_pos = gtk.POS_TOP
pane.props.visible = True
other_pane.props.visible = False
if pane.get_child1() is None:
child1 = other_pane.get_child1(); other_pane.remove(child1)
child2 = other_pane.get_child2(); other_pane.remove(child2)
pane.pack1(child1, resize=True, shrink=False)
pane.pack2(child2, resize=not vertical_layout, shrink=False)
# If the fonts were changed, we might need to enlarge the window
last_font, last_vertical = self.last_configured_layout
if last_font != font or last_vertical != vertical_layout:
self.set_window_size(vertical_layout)
self.last_configured_layout = font, vertical_layout
command_defs = self.textbuffer.get_tag_table().lookup(COMMAND_DEFS)
command_defs.props.invisible = config.get_bool('hide-defs')
def configure_sourceview(self, sv):
"""
Apply configuration to a newly created sourceview.
This does the same for a single sourceview as configure() does for
all of them.
"""
font_name = self.config.get('font')
font = pango.FontDescription(font_name)
sv.modify_font(font)
theme = tags.get_theme(self.config, self.config.get('current-theme'))
tags.apply_theme_source(sv.get_buffer(), theme)
def on_preferences(self, _widget):
cd = ConfigDialog(self.config, gladefile, self.window_main)
r = cd.run()
if r == gtk.RESPONSE_OK:
self.configure()
self.configure_subp()
cd.destroy()
def on_clear_reshist(self, _widget):
try:
self.call_subp_noblock(u'clear_reshist')
except TimeoutError:
# Will happen anyway when idle job ends
pass
self.status_bar.set_status(_("Result history cleared."))
def on_close(self, _widget, _event):
self.quit()
return True
def on_quit(self, _widget):
self.quit()
def quit(self):
was_saved = self.histpersist.was_saved()
if (self.textbuffer.get_modified()
and (was_saved or self.config.get_bool('ask-on-quit'))):
d = gtk.MessageDialog(
parent=self.window_main,
flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
type=gtk.MESSAGE_WARNING,
message_format=_('Save history before closing?'))
d.props.secondary_text = _("If you don't save, your history will be lost.")
CANCEL, DISCARD, SAVE = range(3)
discard_btn = d.add_button(_("Close _without saving"), DISCARD)
_cancel_btn = d.add_button(_("_Cancel"), CANCEL)
save_btn = d.add_button(_("_Save"), SAVE)
if not was_saved:
dontask_chk = gtk.CheckButton(
_("Don't ask me again when the history was never saved"))
dontask_chk.show()
d.get_content_area().pack_start(dontask_chk, fill=True, expand=False)
d.set_default_response(DISCARD)
discard_btn.grab_focus()
else:
d.set_default_response(SAVE)
save_btn.grab_focus()
r = d.run()
if not was_saved and r == DISCARD and dontask_chk.props.active:
self.config.set_bool('ask-on-quit', False)
self.config.save()
if r == SAVE:
saved = self.histpersist.save()
quit = saved
elif r == DISCARD:
quit = True
else:
quit = False
d.destroy()
else:
quit = True
if quit:
self.is_terminating = True
self.window_main.destroy()
self.subp.kill()
gtk.main_quit()
def on_about(self, _widget):
d = get_widget('about_dialog')
d.set_transient_for(self.window_main)
d.set_version(__version__)
d.set_logo(gdk.pixbuf_new_from_file(
path.join(data_dir, 'dreampie.png')))
d.run()
d.destroy()
def on_update_available(self, is_git, latest_name, latest_time):
date = time.strftime('%Y/%m/%d', time.localtime(latest_time))
if is_git:
msg = _("A new git commit is available, from %s. "
"Run 'git pull' to update." % date)
else:
self.get_update_menu.show()
msg = _("A new DreamPie version, %s, is available. "
"Click Help->Get New Version to update." % latest_name)
self.status_bar.set_status(msg)
def on_get_update_menu_activate(self, _widget):
webbrowser.open('http://www.dreampie.org/download.html')
def on_report_bug(self, _widget):
bug_report.bug_report(self.window_main, gladefile, None)
def on_homepage(self, _widget):
webbrowser.open('http://www.dreampie.org/')
def on_getting_started(self, _widget):
self.show_getting_started_dialog()
def show_getting_started_dialog(self):
d = get_widget('getting_started_dialog')
d.set_transient_for(self.window_main)
d.run()
d.destroy()
def on_textview_button_press_event(self, _widget, event):
if event.button == 3:
self.show_popup_menu(event)
return True
elif event.button == 2:
return self.on_sourceview_button_press_event(_widget, event)
elif event.type == gdk._2BUTTON_PRESS:
return self.on_double_click(event)
def show_popup_menu(self, event):
tv = self.textview
tb = self.textbuffer
if tb.get_has_selection():
self.popup_sel_menu.popup(None, None, None, event.button,
event.get_time())
else:
if tv.get_window(gtk.TEXT_WINDOW_TEXT) is not event.window:
# Probably a click on the border or something
return
x, y = tv.window_to_buffer_coords(gtk.TEXT_WINDOW_TEXT,
int(event.x), int(event.y))
it = tv.get_iter_at_location(x, y)
r = self.folding.get_section_status(it)
if r is not None:
typ, is_folded, _start_it = r
if typ == OUTPUT:
typ_s = _('Output Section')
else:
typ_s = _('Code Section')
self.fold_unfold_section_menu.props.visible = (
is_folded is not None)
self.fold_unfold_section_menu.child.props.label = (
_('Unfold %s') if is_folded else _('Fold %s')) % typ_s
self.copy_section_menu.child.props.label = _('Copy %s') % typ_s
self.view_section_menu.child.props.label = _('View %s') % typ_s
self.save_section_menu.child.props.label = _('Save %s') % typ_s
self.view_section_menu.props.visible = \
bool(eval(self.config.get('viewer')))
tb.move_mark(self.popup_mark, it)
self.popup_nosel_menu.popup(None, None, None, event.button,
event.get_time())
else:
beep()
def main():
usage = "%prog [options] [python-executable]"
version = 'DreamPie %s' % __version__
parser = OptionParser(usage=usage, version=version)
parser.add_option("--run", dest="runfile",
help="A file to run upon initialization. It will be "
"run only once.")
if sys.platform == 'win32':
parser.add_option("--hide-console-window", action="store_true",
dest="hide_console",
help="Hide the console window")
opts, args = parser.parse_args()
if len(args) > 1:
parser.error("Can accept at most one argument")
if len(args) == 1:
pyexec = args[0]
elif 'dreampie' in sys.executable.lower():
# We are under py2exe.
msg = gtk.MessageDialog(
None, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE,
_("DreamPie must be given the file name of a Python interpreter. "
"Please create a shortcut to something like '%s "
"--hide-console-window c:\\python26\\python.exe'.")
% os.path.abspath(sys.argv[0]))
_response = msg.run()
msg.destroy()
sys.exit(1)
else:
pyexec = sys.executable
if sys.platform == 'win32' and opts.hide_console:
from .hide_console_window import hide_console_window
hide_console_window()
gtk.widget_set_default_direction(gtk.TEXT_DIR_LTR)
_dp = DreamPie(pyexec, opts.runfile)
gtk.main()
| gpl-3.0 |
huanzhang12/lightgbm-gpu | examples/python-guide/sklearn_example.py | 3 | 1490 | # coding: utf-8
# pylint: disable = invalid-name, C0111
import lightgbm as lgb
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
# load or create your dataset
print('Load data...')
df_train = pd.read_csv('../regression/regression.train', header=None, sep='\t')
df_test = pd.read_csv('../regression/regression.test', header=None, sep='\t')
y_train = df_train[0].values
y_test = df_test[0].values
X_train = df_train.drop(0, axis=1).values
X_test = df_test.drop(0, axis=1).values
print('Start training...')
# train
gbm = lgb.LGBMRegressor(objective='regression',
num_leaves=31,
learning_rate=0.05,
n_estimators=20)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
early_stopping_rounds=5)
print('Start predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
print('Calculate feature importances...')
# feature importances
print('Feature importances:', list(gbm.feature_importances_))
# other scikit-learn modules
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid)
gbm.fit(X_train, y_train)
print('Best parameters found by grid search are:', gbm.best_params_)
| mit |
kagayakidan/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
brooksandrew/postman_problems | postman_problems/tests/test_example_sleeping_giant.py | 1 | 6009 | import math
import pkg_resources
import itertools
import pandas as pd
import networkx as nx
from postman_problems.viz import add_node_attributes
from postman_problems.graph import (
read_edgelist, create_networkx_graph_from_edgelist, get_odd_nodes, get_shortest_paths_distances
)
from postman_problems.solver import rpp, cpp
# ###################
# PARAMETERS / DATA #
# ###################
EDGELIST = pkg_resources.resource_filename('postman_problems', 'examples/sleeping_giant/edgelist_sleeping_giant.csv')
NODELIST = pkg_resources.resource_filename('postman_problems', 'examples/sleeping_giant/nodelist_sleeping_giant.csv')
START_NODE = 'b_end_east'
#########
# TESTS #
#########
def test_read_sleeping_giant_edgelist():
df = read_edgelist(EDGELIST, keep_optional=True)
# check that our Sleeping Giant example dataset contains the correct fields and values
assert ['node1', 'node2', 'trail', 'color', 'distance', 'estimate', 'required'] in df.columns.values
assert math.isclose(df[df['required'] == 1]['distance'].sum(), 26.01)
assert math.isclose(df['distance'].sum(), 30.48)
df_req = read_edgelist(EDGELIST, keep_optional=False)
assert math.isclose(df_req['distance'].sum(), 26.01)
assert 'req' not in df_req.columns
def test_create_networkx_graph_from_edgelist():
df = read_edgelist(EDGELIST, keep_optional=True)
graph = create_networkx_graph_from_edgelist(df, edge_id='id')
# check that our starting graph is created correctly
assert isinstance(graph, nx.MultiGraph)
assert len(graph.edges()) == 133
assert len(graph.nodes()) == 78
assert graph['b_end_east']['b_y'][0]['color'] == 'blue'
assert graph['b_end_east']['b_y'][0]['trail'] == 'b'
assert graph['b_end_east']['b_y'][0]['distance'] == 1.32
# check that starting graph with required trails only is correct
df_req = read_edgelist(EDGELIST, keep_optional=False)
graph_req = create_networkx_graph_from_edgelist(df_req, edge_id='id')
assert isinstance(graph_req, nx.MultiGraph)
assert len(graph_req.edges()) == 121
assert len(graph_req.nodes()) == 74
def test_add_node_attributes():
# create objects for testing
df = read_edgelist(EDGELIST)
graph = create_networkx_graph_from_edgelist(df, edge_id='id')
nodelist_df = pd.read_csv(NODELIST)
graph_node_attrs = add_node_attributes(graph, nodelist_df)
assert len(graph_node_attrs.nodes()) == 74
# check that each node attribute has an X and Y coordinate
for k, v in graph_node_attrs.nodes(data=True):
assert 'X' in v
assert 'Y' in v
# spot check node attributes for first node
node_data_from_graph = list(graph_node_attrs.nodes(data=True))
node_names = [n[0] for n in node_data_from_graph]
assert 'rs_end_north' in node_names
key = node_names.index('rs_end_north')
assert node_data_from_graph[key][1]['X'] == 1772
assert node_data_from_graph[key][1]['Y'] == 172
def test_get_shortest_paths_distances():
df = read_edgelist(EDGELIST)
graph = create_networkx_graph_from_edgelist(df, edge_id='id')
odd_nodes = get_odd_nodes(graph)
odd_node_pairs = list(itertools.combinations(odd_nodes, 2))
# coarsely checking structure of `get_shortest_paths_distances` return value
odd_node_pairs_shortest_paths = get_shortest_paths_distances(graph, odd_node_pairs, 'distance')
assert len(odd_node_pairs_shortest_paths) == 630
assert type(odd_node_pairs_shortest_paths) == dict
# check that each node name appears the same number of times in `get_shortest_paths_distances` return value
node_names = list(itertools.chain(*[i[0] for i in odd_node_pairs_shortest_paths.items()]))
assert set(pd.value_counts(node_names)) == set([35])
def test_nodelist_edgelist_overlap():
"""
Test that the nodelist and the edgelist contain the same node names. If using X,Y coordinates for plotting and
not all nodes have attributes, this could get messy.
"""
eldf = read_edgelist(EDGELIST, keep_optional=True)
nldf = pd.read_csv(NODELIST)
edgelist_nodes = set(eldf['node1'].append(eldf['node2']))
nodelist_nodes = set(nldf['id'])
nodes_in_el_but_not_nl = edgelist_nodes - nodelist_nodes
assert nodes_in_el_but_not_nl == set(), \
"Warning: The following nodes are in the edgelist, but not the nodelist: {}".format(nodes_in_el_but_not_nl)
nodes_in_nl_but_not_el = nodelist_nodes - edgelist_nodes
assert nodes_in_nl_but_not_el == set(), \
"Warning: The following nodes are in the nodelist, but not the edgelist: {}".format(nodes_in_nl_but_not_el)
def test_sleeping_giant_cpp_solution():
cpp_solution, graph = cpp(edgelist_filename=EDGELIST, start_node=START_NODE)
# make number of edges in solution is correct
assert len(cpp_solution) == 155
# make sure our total mileage is correct
cpp_solution_distance = sum([edge[3]['distance'] for edge in cpp_solution])
assert math.isclose(cpp_solution_distance, 33.25)
# make sure our circuit begins and ends at the same place
assert cpp_solution[0][0] == cpp_solution[-1][1] == START_NODE
# make sure original graph is properly returned
assert len(graph.edges()) == 121
[e[2].get('augmented') for e in graph.edges(data=True)].count(True) == 35
def test_sleeping_giant_rpp_solution():
rpp_solution, graph = rpp(edgelist_filename=EDGELIST, start_node=START_NODE)
# make number of edges in solution is correct
assert len(rpp_solution) == 151
# make sure our total mileage is correct
rpp_solution_distance = sum([edge[3]['distance'] for edge in rpp_solution])
assert math.isclose(rpp_solution_distance, 32.12)
# make sure our circuit begins and ends at the same place
assert rpp_solution[0][0] == rpp_solution[-1][1] == START_NODE
# make sure original graph is properly returned
assert len(graph.edges()) == 133
[e[3].get('augmented') for e in graph.edges(data=True, keys=True)].count(True) == 30
| mit |
vipints/oqtans | oqtans_tools/KIRMES/0.8/src/EasySVM.py | 2 | 44265 | """
#############################################################################################
# #
# This class is part of the MLB-Galaxy package, adding some sequence analysis #
# functionality to PSU's Galaxy framework. #
# Copyright (C) 2008 Cheng Soon Ong <[email protected]> #
# Copyright (C) 2008 Gunnar Raetsch <[email protected]> #
# Copyright (C) 2007, 2009 Sebastian J. Schultheiss <[email protected] #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, see http://www.gnu.org/licenses #
# or write to the Free Software Foundation, Inc., 51 Franklin Street, #
# Fifth Floor, Boston, MA 02110-1301 USA #
# #
#############################################################################################
# #
# Original Author: Sebastian J. Schultheiss, version 0.8.0 #
# Please add a notice of any modifications here: #
# Gunnar Raetsch: rewrote code for training on sequences to be read from files #
# Cheng Soon Ong: Added code for educational toolbox #
# Sebastian J. Schultheiss: Class-ified for KIRMES use in 02/2009 #
# Sebastian J. Schultheiss: Updated for shogun-0.9.1 in 02/2010 #
# Sebastian J. Schultheiss: Tweaks for Galaxy Integration in 12/2010 #
# #
#############################################################################################
"""
__version__ = "EasySVM.py 0.8.0"
import os
import random
import shutil
import warnings
import numpy
from numpy import ones
from shogun.Kernel import GaussianKernel, WeightedDegreePositionStringKernel
from shogun.Kernel import LinearKernel, PolyKernel, LocalAlignmentStringKernel, LocalityImprovedStringKernel, CommWordStringKernel
from shogun.Features import RealFeatures, Labels, StringCharFeatures, DNA, StringWordFeatures
from shogun.Classifier import LibSVM
from shogun.PreProc import SortWordString
from shogun.Evaluation import PerformanceMeasures
class EasySVM(object):
"""A wrapper around shogun SVM objects like the kernel, features ..."""
def __init__(self, kparam = {}, kernel_file = None):
"Initialize with default parameters (None)"
self.kparam = kparam
self.kernel = None
if kernel_file is not None:
self.setKernel(kernel_file)
def getKernel(self, kernel_file = None):
"Writes the kernel to a file"
return pickle.dump(self.kernel, kernel_file)
def setKernel(self, kernel_file):
"Load a kernel from a file"
self.kernel = pickle.load(kernel_file)
def getKernelName(self):
"Return the kernel parameter \'Kernel name\'"
return self.kparam['name']
def setKernelName(self, name):
"Set the kernel name/type"
self.kparam['name'] = name
def getKernelParameter(self, parameter_name = None):
"""Returns any of the following kernel parameters:
kernelname,
width,
modelsel_name, modelsel_params,
degree, scale,
inhomogene, normal
shift, seqlength,
indeg, outdeg,
C,
poimdegree, ...
"""
if parameter_name is None:
return self.kparam
else:
return self.kparam[parameter_name]
def setKernelParameter(self, parameter_name = None, parameter_value = None):
"""Set an arbitrary kernel parameter,
or set the whole kparam dictionary if parameter_name is empty"""
if parameter_name is None and parameter_value is not None:
self.kparam = parameter_value
elif parameter_name is not None:
self.kparam[parameter_name] = parameter_value
def parseKernelParameters(self, parameter_string, model_selection = False):
"""Parse the arguments for a particular kernel"""
parameters = parameter_string.split(" ")
kernelname = parameters[0]
self.kparam = {}
self.kparam["name"] = kernelname
self.kparam["modelsel_name"] = None
self.kparam["modelsel_params"] = None
if kernelname == 'gauss':
if len(parameters) < 2:
raise ValueError('Not enough arguments for a Gauss-type kernel.\nUsage: gauss <width>\n')
if model_selection:
self.kparam['width'] = None
self.kparam["modelsel_name"] = "width"
self.kparam["modelsel_params"] = parseFloatList(parameters[1])
else:
self.kparam['width'] = float(parameters[1])
elif kernelname == 'linear':
self.kparam['scale'] = 1
# no parameters
elif kernelname == 'poly':
if len(parameters) < 4:
raise ValueError('Not enough arguments for a polynomial kernel.\nUsage: poly <degree> <true|false> <true|false>\n')
if model_selection:
self.kparam['degree'] = None
self.kparam["modelsel_name"] = "degree"
self.kparam["modelsel_params"] = parseIntList(parameters[1])
else:
self.kparam['degree'] = int(parameters[1])
self.kparam['inhomogene'] = (parameters[2] == 'true')
self.kparam['normal'] = (parameters[3] == 'true')
elif kernelname == 'wd':
if len(parameters) < 3:
raise ValueError('Not enough arguments for a WD kernel.\nUsage: wd <degree> <shift>\n')
if model_selection:
self.kparam['degree'] = None
self.kparam["modelsel_name"] = "degree"
self.kparam["modelsel_params"] = parseIntList(parameters[1])
else:
self.kparam['degree'] = int(parameters[1])
if model_selection and len(self.kparam["modelsel_params"]) == 1:
self.kparam['degree'] = self.kparam["modelsel_params"][0]
self.kparam['shift'] = None
self.kparam["modelsel_name"] = "shift"
self.kparam["modelsel_params"] = parseIntList(parameters[2])
else:
self.kparam['shift'] = int(parameters[2])
elif kernelname == 'spec':
if len(parameters) < 2:
raise ValueError('Not enough arguments for a Spectrum kernel.\nUsage: spec <degree>\n')
if model_selection:
self.kparam['degree'] = None
self.kparam["modelsel_name"] = "degree"
self.kparam["modelsel_params"] = parseIntList(parameters[1])
else:
self.kparam['degree'] = int(parameters[1])
elif kernelname == 'localalign':
# no parameters
pass
elif kernelname == 'localimprove':
if len(parameters) < 4:
raise ValueError('Not enough arguments for a localimprove kernel.\nUsage: localimprove <length> <indegree> <outdegree>\n')
self.kparam['length'] = int(parameters[1])
if model_selection:
self.kparam['width'] = None
self.kparam["modelsel_name"] = "indeg"
self.kparam["modelsel_params"] = parseIntList(parameters[2])
else:
self.kparam['indeg'] = int(parameters[2])
self.kparam['outdeg'] = int(parameters[3])
else:
raise ValueError('Unknown kernel name \"' + kernelname + '\" in the parameter_string\n')
def setC(self, C):
"Set the oft-used kernel parameter C"
self.setKernelParameter('C', C)
def getC(self):
"Return the current value for kernel parameter C"
return self.getKernelParameter('C')
def createFeatures(self, examples):
"""Converts numpy arrays or sequences into shogun features"""
if self.kparam['name'] == 'gauss' or self.kparam['name'] == 'linear' or self.kparam['name'] == 'poly':
examples = numpy.array(examples)
feats = RealFeatures(examples)
elif self.kparam['name'] == 'wd' or self.kparam['name'] == 'localalign' or self.kparam['name'] == 'localimprove':
#examples = non_atcg_convert(examples, nuc_con)
feats = StringCharFeatures(examples, DNA)
elif self.kparam['name'] == 'spec':
#examples = non_atcg_convert(examples, nuc_con)
feats = StringCharFeatures(examples, DNA)
wf = StringUlongFeatures( feats.get_alphabet() )
wf.obtain_from_char(feats, kparam['degree']-1, kparam['degree'], 0, kname=='cumspec')
del feats
if train_mode:
preproc = SortUlongString()
preproc.init(wf)
wf.add_preproc(preproc)
ret = wf.apply_preproc()
feats = wf
else:
print 'Unknown kernel %s' % self.kparam['name']
raise ValueError
return feats
def createKernel(self, feats_train):
"""Call the corresponding constructor for the kernel"""
if self.kparam['name'] == 'gauss':
kernel = GaussianKernel(feats_train, feats_train, self.kparam['width'])
elif self.kparam['name'] == 'linear':
kernel = LinearKernel(feats_train, feats_train, self.kparam['scale'])
elif self.kparam['name'] == 'poly':
kernel = PolyKernel(feats_train, feats_train, self.kparam['degree'],
self.kparam['inhomogene'], self.kparam['normal'])
elif self.kparam['name'] == 'wd':
kernel = WeightedDegreePositionStringKernel(feats_train, feats_train, self.kparam['degree'])
kernel.set_shifts(self.kparam['shift'] * numpy.ones(self.kparam['seqlength'], dtype=numpy.int32))
elif self.kparam['name'] == 'spec':
kernel = CommWordStringKernel(feats_train, feats_train)
elif self.kparam['name'] == 'localalign':
kernel = LocalAlignmentStringKernel(feats_train, feats_train)
elif self.kparam['name'] == 'localimprove':
kernel = LocalityImprovedStringKernel(feats_train, feats_train, self.kparam['length'], \
self.kparam['indeg'], self.kparam['outdeg'])
else:
print 'Unknown kernel %s' % self.kparam['name']
raise ValueError
self.kernel = kernel
return kernel
def __str__(self):
"""Generates a short string describing the model parameters"""
if self.kparam["modelsel_name"] is None or len(self.kparam["modelsel_params"]) == 1:
string = "\tC=%1.1f" % self.kparam['C']
else:
string = "\tC=%1.1f\t%s=%i" % (self.kparam['C'], self.kparam["modelsel_name"])
return string
def model2str(self, C, kp):
"""Generates a string describing the model parameters"""
if self.kparam["modelsel_name"] == None or len(self.kparam["modelsel_params"]) == 1:
string = "\tC=%1.1f" % C
else:
if type(kp) == type(int(0)):
string = "\tC=%1.1f\t%s=%i" % (C, self.kparam["modelsel_name"], kp)
else:
string = "\tC=%1.1f\t%s=%1.2f" % (C, self.kparam["modelsel_name"], kp)
return string
def train(self, trainexamples, trainlabels):
"""Trains a SVM with the given kernel"""
kernel_cache_size = 500
num_threads = 6
feats_train = self.createFeatures(trainexamples)
if self.kparam['name'] == 'wd':
self.kparam['seqlength'] = len(trainexamples[0])
self.createKernel(feats_train)
self.kernel.io.disable_progress()
self.kernel.set_cache_size(int(kernel_cache_size))
labels = Labels(numpy.array(trainlabels, numpy.double))
svm = LibSVM(self.getC(), self.kernel, labels)
svm.parallel.set_num_threads(num_threads)
svm.io.disable_progress()
svm.train()
return (svm, feats_train)
def trainAndTest(self, trainexamples, trainlabels, testexamples):
"""Trains a SVM with the given kernel, and predict on the test examples"""
(svm, feats_train) = self.train(trainexamples, trainlabels) #,C,kname,kparam)
feats_test = self.createFeatures(testexamples)
self.kernel.init(feats_train, feats_test)
output = svm.classify().get_labels()
return output
def crossvalidation(self, all_examples, all_labels, xval = 5):
"""Perform cross validation using an SVM
xval -- the number of folds
"""
print 'Using %i-fold crossvalidation' % xval
partitions = getPartitionedSet(len(all_labels), xval)
all_outputs = [0.0] * len(all_labels)
all_split = [-1] * len(all_labels)
for repetition in xrange(xval):
XT, LT, XTE, LTE = getCurrentSplit(repetition, partitions, all_labels, all_examples)
del LTE
svmout = self.trainAndTest(XT, LT, XTE)
for i in xrange(len(svmout)):
all_outputs[partitions[repetition][i]] = svmout[i]
all_split[partitions[repetition][i]] = repetition
return (all_outputs, all_split)
#
################################################################################
# main functions
def crossvalidationSVM(self, xval, examples, labels):
"""A top level member function to run cross validation"""
# run cross-validation
(all_outputs, all_split) = self.crossvalidation(xval, examples, labels)
res_str = '#example\toutput\tsplit\n'
for ix in xrange(len(all_outputs)):
res_str += '%d\t%2.7f\t%d\n' % (ix, all_outputs[ix], all_split[ix])
return res_str
def modelSelectionSVM(self, xval, examples, labels, Crange):
"""A top level member function to run model selection"""
# run cross-validation
mean_rocs = []
mean_prcs = []
mean_accs = []
all_Cs = []
all_kparam = []
if self.kparam["modelsel_name"] == None:
for C in Crange:
self.setC(C)
(all_outputs, all_split) = self.crossvalidation(xval, examples, labels)
(res_str, mean_roc, mean_prc, mean_acc) = self.evaluate(all_outputs, all_split, labels)
del res_str
mean_rocs.append(mean_roc)
mean_prcs.append(mean_prc)
mean_accs.append(mean_acc)
all_Cs.append(C)
all_kparam.append(None)
else: # also optimize one kernel parameter
for C in Crange:
for kp in self.kparam["modelsel_params"]:
self.kparam[self.kparam["modelsel_name"]] = kp
self.setC(C)
(all_outputs, all_split) = self.crossvalidation(xval, examples, labels)
(res_str, mean_roc, mean_prc, mean_acc) = self.evaluate(all_outputs, all_split, labels)
del res_str
mean_rocs.append(mean_roc)
mean_prcs.append(mean_prc)
mean_accs.append(mean_acc)
all_Cs.append(C)
all_kparam.append(kp)
max_roc = numpy.max(numpy.array(mean_rocs))
max_prc = numpy.max(numpy.array(mean_prcs))
max_acc = numpy.max(numpy.array(mean_accs))
if self.kparam["modelsel_name"] == None or len(self.kparam["modelsel_params"]) == 1:
detail_str = "\tC\tROC\tPRC\tAccuracy (at threshold 0)\n"
else:
detail_str = "\tC\t%s\tROC\tPRC\tAccuracy (at threshold 0)\n" % self.kparam["modelsel_name"]
best_roc_str = ''
best_prc_str = ''
best_acc_str = ''
for i in xrange(len(all_Cs)):
# determine the best parameter combinations
if mean_rocs[i] == max_roc:
rocsym = '+'
best_roc_str += self.model2str(all_Cs[i], all_kparam[i])+'\n'
else:
rocsym = ' '
if mean_prcs[i] == max_prc:
prcsym = '+'
best_prc_str += self.model2str(all_Cs[i], all_kparam[i])+'\n'
else:
prcsym = ' '
if mean_accs[i] == max_acc:
accsym = '+'
best_acc_str += self.model2str(all_Cs[i], all_kparam[i])+'\n'
else:
accsym = ' '
detail_str += self.model2str(all_Cs[i], all_kparam[i], False)+'\t'
if self.kparam["modelsel_name"] == None or len(self.kparam["modelsel_params"]) == 1:
detail_str += '%c%2.1f%%\t%c%2.1f%%\t%c%2.1f%%\n' % (rocsym, 100*mean_rocs[i], prcsym, 100*mean_prcs[i], accsym, 100*mean_accs[i])
else:
detail_str += '%c%2.1f%%\t%c%2.1f%%\t%c%2.1f%%\n' % (rocsym, 100*mean_rocs[i], prcsym, 100*mean_prcs[i], accsym, 100*mean_accs[i])
detail_str = ('Best model(s) according to ROC measure:\n%s' % best_roc_str) + detail_str
detail_str = ('\nBest model(s) according to PRC measure:\n%s' % best_prc_str) + detail_str
detail_str = ('\nBest model(s) according to accuracy measure:\n%s' % best_acc_str) + detail_str
detail_str = ('\nDetailed results:\n') + detail_str
return detail_str
def predictSVM(self, trainexamples, trainlabels, testexamples):
"""A top level script to parse input parameters and train and predict"""
# run training and testing
svmout = self.trainAndTest(trainexamples, trainlabels, testexamples)
# write output file
res_str = '#example\toutput\n'
for ix in xrange(len(svmout)):
res_str += str(ix) + '\t' + str(svmout[ix]) + '\n'
return res_str
def evaluateSVM(self, trainexamples, trainlabels, prediction_file, roc_or_prc = None):
"""A top level script to parse input parameters and evaluate"""
(predictions, splitassignments) = parsePrediction(prediction_file)
roc_fname = None
prc_fname = None
if roc_or_prc is not None:
if roc_or_prc.startswith('roc'):
roc_fname = roc_or_prc
elif roc_or_prc.startswith('prc'):
prc_fname = roc_or_prc
# run training and testing
(res_str, mean_roc, mean_prc, mean_acc) = evaluate(predictions, splitassignments, trainlabels, roc_fname, prc_fname)
del mean_acc
del mean_prc
del mean_roc
return res_str
def poimSVM(self, examples, labels, poimfile):
"""A top level script to parse input parameters and plot poims"""
# train svm and compute POIMs
(svm, feats_train) = self.train(examples, labels)
del feats_train
(poim, max_poim, diff_poim, poim_totalmass) = computePOIMs(svm, self.kernel, self.kparam['poimdegree'], len(examples[0]))
# plot poims
plotPOIMs(poimfile, poim, max_poim, diff_poim, poim_totalmass, self.kparam['poimdegree'], len(examples[0]))
# independent functions .............
def computePOIMs(svm, kernel, poimdegree, max_len):
"""For a trained SVM, compute Position Oligomer Importance Matrices"""
distr = ones((max_len, 4))/4
kernel.prepare_POIM2(distr)
kernel.compute_POIM2(poimdegree, svm)
poim = kernel.get_POIM2()
kernel.cleanup_POIM2()
(poim, max_poim, diff_poim) = reshapeNormalizeContribs(poim, poimdegree, max_len)
(poim_weightmass, poim_totalmass) = computeWeightMass(poim, poimdegree, max_len)
del poim_weightmass
poim_totalmass = poim_totalmass/numpy.sum(poim_totalmass)
return (poim, max_poim, diff_poim, poim_totalmass)
def computeWeightMass(C, maxOrder, seqLen):
"""POIM Function"""
mass = numpy.zeros((maxOrder, seqLen), numpy.double)
for i in xrange(0, maxOrder):
mass[i, :] = sum(numpy.abs(C[i]))
total = numpy.sum(mass)
return (mass, total)
def reshapeNormalizeContribs(C, maxOrder, seqLen): #, background): #opts = {}):
"""POIM Function"""
alphabetSize = 4
Contribs = []
l = 0
for i in xrange(0, maxOrder):
L = l + (alphabetSize**(i + 1)) * seqLen
vec = C[l:L].copy()
Contribs.append(vec.reshape(seqLen, alphabetSize**(i + 1) ).T)
l = L
assert(l == len(C))
maxContribs = numpy.zeros((maxOrder, seqLen), numpy.double)
maxp_str = numpy.zeros((maxOrder, seqLen), numpy.int)
for i in xrange(0, maxOrder):
con = numpy.abs(Contribs[i])
maxContribs[i, :] = numpy.max(con, axis = 0)
maxp_str[i, :] = numpy.argmax(con, axis = 0)
diffmaxContribs = numpy.zeros((maxOrder, seqLen), numpy.double)
for k in xrange(1, maxOrder ):
numsy = 4**(k + 1)
for l in xrange(0, seqLen-k):
km = maxp_str[k, l]
A = numpy.abs(Contribs[k - 1][numpy.floor(km/4), l])
B = numpy.abs(Contribs[k - 1][numpy.mod(km, numsy/4), l + 1])
correction = numpy.max([A, B])
diffmaxContribs[k, l] = maxContribs[k, l] - correction
return (Contribs, maxContribs, diffmaxContribs)
def plotROC(output, LTE, draw_random = False, figure_fname = "", roc_label = 'ROC'):
"""Uses matplotlib to plot the area under
the ROC curve into a supplied figure_fname file"""
from matplotlib import use, font_manager
use("Agg") # matplotlib save without display
from pylab import figure, plot, xticks, yticks, xlabel, ylabel, legend, savefig, axis
figure(1, dpi = 150, figsize = (4, 4))
pm = PerformanceMeasures(Labels(numpy.array(LTE)), Labels(numpy.array(output)))
points = pm.get_ROC()
points = numpy.array(points).T # for pylab.plot
plot(points[0], points[1], 'b-', label = roc_label)
if draw_random:
plot([0, 1], [0, 1], 'r-', label = 'random guessing')
axis([0, 1, 0, 1])
ticks = numpy.arange(0., 1., .1, dtype = numpy.float64)
xticks(ticks, size = 10)
yticks(ticks, size = 10)
xlabel('1 - specificity (false positive rate)', size = 10)
ylabel('sensitivity (true positive rate)', size = 10)
legend(loc = 'lower right', prop = font_manager.FontProperties('tiny'))
if figure_fname != None:
warnings.filterwarnings('ignore', 'Could not match*')
tempfname = figure_fname + '.png'
savefig(tempfname)
shutil.move(tempfname, figure_fname)
auROC = pm.get_auROC()
return auROC
def plotPRC(output, LTE, figure_fname = "", prc_label = 'PRC'):
"""Plots a precision recall curve into the supplied
figure_fname file"""
from matplotlib import use
use("Agg") # matplotlib save without display
from pylab import figure, plot, axis, xticks, yticks, ylabel, xlabel, legend, savefig
figure(2, dpi = 150, figsize = (4, 4))
pm = PerformanceMeasures(Labels(numpy.array(LTE)), Labels(numpy.array(output)))
points = pm.get_PRC()
points = numpy.array(points).T # for pylab.plot
plot(points[0], points[1], 'b-', label = prc_label)
axis([0, 1, 0, 1])
ticks = numpy.arange(0., 1., .1, dtype = numpy.float64)
xticks(ticks, size = 10)
yticks(ticks, size = 10)
xlabel('sensitivity (true positive rate)', size = 10)
ylabel('precision (1 - false discovery rate)', size = 10)
legend(loc = 'lower right')
if figure_fname != None:
warnings.filterwarnings('ignore', 'Could not match*')
tempfname = figure_fname + '.png'
savefig(tempfname)
shutil.move(tempfname, figure_fname)
auPRC = pm.get_auPRC()
return auPRC
def weblogoPOIM(logofile, poim, max_len):
"""instead of plotting the POIM heatmap, create a weblogo from the 1st-degree poim"""
warnings.filterwarnings('ignore', ' This call to matplotlib.use()*')
from corebio.data import rna_letters, dna_letters, amino_acid_letters
from weblogolib import LogoData, LogoOptions, LogoFormat, classic, png_print_formatter
#print "WEBLOGO!"
#print "Writing ", logofile
#print poim[0]
positive_logo = []
negative_logo = []
for i in xrange(len(poim[0])):
positive_logo.append([])
negative_logo.append([])
for j in xrange(len(poim[0][i])):
if poim[0][i][j] < 0:
positive_logo[i].append(0)
negative_logo[i].append(poim[0][i][j] * -10000)
else:
negative_logo[i].append(0)
positive_logo[i].append(poim[0][i][j] * 1000)
#print "Positive logo: ", positive_logo
#print "Negative logo: ", negative_logo
pos_data = LogoData.from_counts('ACGT', numpy.array(positive_logo).T, None)
neg_data = LogoData.from_counts("ACGT", numpy.array(negative_logo).T, None)
neg_opt = LogoOptions()
neg_opt.fineprint += " from KIRMES POIM data"
#logoopt.number_interval = 5
neg_opt.small_fontsize = 4
neg_opt.title_fontsize = 8
neg_opt.scale_width = False
title = os.path.split(logofile)[1]
title = title[:title.rfind(".")]
if "_" in title:
title = title[title.rfind("_") + 1:]
neg_opt.logo_title = title + " Negative Logo"
neg_format = LogoFormat(neg_data, neg_opt)
pos_opt = LogoOptions()
#pos_opt.show_ends = True
pos_opt.scale_width = False
pos_opt.logo_title = title + " Positive Sequence Logo"
pos_opt.show_fineprint = False
pos_opt.color_scheme = classic
pos_format = LogoFormat(pos_data, pos_opt)
neg_logo = open(logofile + "n.png", 'w')
png_print_formatter(neg_data, neg_format, neg_logo)
neg_logo.close()
pos_logo = open(logofile + "p.png", 'w')
png_print_formatter(pos_data, pos_format, pos_logo)
pos_logo.close()
concatPNG(logofile, (logofile + "p.png", logofile + "n.png"))
os.remove(logofile + "n.png")
os.remove(logofile + "p.png")
def concatPNG(outfilename, infilenames):
"""Vertically concatenates a list of PNG files and writes them to outfilename.
This function uses the width of the first image supplied as"""
from PIL import Image
total_height = 0
infiles = []
total_imgs = 0
for infilename in infilenames:
try:
infiles.append(Image.open(infilename))
except:
print "Error loading image " + infilename
raise
total_height += infiles[total_imgs].size[1]
total_imgs += 1
im = Image.new("RGB", (infiles[0].size[0], total_height))
insert_at = 0
for i in range(total_imgs):
im.paste(infiles[i], (0, insert_at))
insert_at += infiles[i].size[1]
im.save(outfilename)
def plotPOIMs(poimfile, poim, max_poim, diff_poim, poim_totalmass, poimdegree, max_len):
"""Plot a summary of the information in poims"""
warnings.filterwarnings('ignore', 'Module pytz was already imported*')
warnings.filterwarnings('ignore', ' This call to matplotlib.use()*')
from matplotlib import use
use("Agg") # matplotlib save without display
from pylab import figure, savefig, subplot, title, pcolor, colorbar, yticks, ylabel
from pylab import axis, plot, xlabel, xticks, subplots_adjust, clf
#import matplotlib
figure(3, dpi = 150, figsize = (8, 3.5))
# summary figures
fontdict = dict(family = "cursive", weight = "bold", size = 12, y = 1.05)
#subplot(3, 2, 1)
#title('Total POIM Mass', fontdict)
#plot(poim_totalmass)
#ylabel('weight mass', size = 5)
#colorbar()
subplot(1, 2, 1)
title('POIMs', fontdict)
pcolor(max_poim, shading = 'flat')
subplots_adjust(wspace = 0.3)
colorbar()
#subplot(3, 2, 5)
#title('Differential POIMs', fontdict)
#pcolor(diff_poim, shading = 'flat')
#for plot in [3, 5]:
# subplot(3, 2, 3)
ticks = numpy.arange(1., poimdegree + 1, 1, dtype = numpy.float64)
ticks_str = []
for i in xrange(0, poimdegree):
ticks_str.append("%i" % (i + 1))
ticks[i] = i + 0.5
yticks(ticks, ticks_str)
ylabel('degree', size = 9)
# per k-mer figures
fontdict = dict(family = "cursive", weight = "bold", size = 12, y = 1.04)
# 1-mers
#subplot(3, 2, 2)
#title('1-mer Positional Importance', fontdict)
#pcolor(poim[0], shading = 'flat')
#ticks_str = ['A', 'C', 'G', 'T']
#ticks = [0.5, 1.5, 2.5, 3.5]
#yticks(ticks, ticks_str, size = 5)
#axis([0, max_len, 0, 4])
# 2-mers
subplot(1, 2, 2)
title('2-mer Positional Importance', fontdict)
pcolor(poim[1], shading = 'flat')
i = 0
ticks = []
ticks_str = []
for l1 in ['A', 'C', 'G', 'T']:
for l2 in ['A', 'C', 'G', 'T']:
ticks_str.append(l1 + l2)
ticks.append(0.5 + i)
i += 1
yticks(ticks, ticks_str, fontsize = 9)
axis([0, max_len, 0, 16])
# 3-mers
#subplot(3, 2, 6)
#title('3-mer Positional Importance', fontdict)
#if poimdegree > 2:
# pcolor(poim[2], shading = 'flat')
# i = 0
# ticks = []
# ticks_str = []
# for l1 in ['A', 'C', 'G', 'T']:
# for l2 in ['A', 'C', 'G', 'T']:
# for l3 in ['A', 'C', 'G', 'T']:
# if numpy.mod(i, 4) == 0:
# ticks_str.append(l1 + l2 + l3)
# ticks.append(0.5 + i)
# i += 1
# yticks(ticks, ticks_str, fontsize = 5)
# axis([0, max_len, 0, 64])
# x-axis on last two figures
#for plot in [5, 6]:
# subplot(3, 2, plot)
xlabel('sequence position', size = 9)
# finishing up
for plot in xrange(1, 3): # 6):
subplot(1, 2, plot)
xticks(fontsize = 9)
for plot in [1]: #, 3, 5]:
subplot(1, 2, plot)
yticks(fontsize = 9)
# write to file
warnings.filterwarnings('ignore', 'Could not match*')
#savefig('/tmp/temppylabfig.png')
savefig(poimfile)
clf()
def getPartitionedSet(total, crossval_repeat):
"""Partitions a number of samples into crossvalidation bins"""
size = int(total / crossval_repeat)
mod = total % crossval_repeat
splits = []
for i in range(0, crossval_repeat):
if i < mod:
splits.append(size + 1)
else:
splits.append(size)
random.seed()
ipartition = random.sample(xrange(0, total), total) # random sampling
index = 0
partitions = []
for size in splits:
partitions.append(ipartition[index:index + size])
index += size
return partitions
def getCurrentSplit(repetition, partitions, labels, seqs):
"""Returns the correct features & labels for this partition
for this repetition"""
X = []; Y = []; XT = []; YT = []
for i in range(0, len(partitions)):
if type(seqs) == type(list([])):
for j in range(0, len(partitions[i])):
if repetition != i:
X.append(seqs[partitions[i][j]])
Y.append(labels[partitions[i][j]])
else:
XT.append(seqs[partitions[i][j]])
YT.append(labels[partitions[i][j]])
else:
if repetition != i:
if len(X) == 0:
X = seqs.take(partitions[i], axis = 1)
Y = labels.take(partitions[i])
else:
X = numpy.concatenate((X, seqs.take(partitions[i], axis = 1)), axis = 1)
Y = numpy.concatenate((Y, labels.take(partitions[i])))
else:
XT = seqs.take(partitions[i], axis = 1)
YT = labels.take(partitions[i])
return X, Y, XT, YT
def saveSVM(pickle_filename, svm, kernel):
"""Pickles a Shogun SVM object to a file by saving its settings"""
from cPickle import Pickler
pickle_file = open(pickle_filename, 'wb')
pck = Pickler(pickle_file)
pck.dump((__version__, \
svm.get_num_support_vectors(), \
kernel.get_name(), \
svm.get_bias(), \
svm.get_alphas(), \
svm.get_support_vectors()))
pickle_file.close()
def loadSVM(pickled_svm_filename, C, labels):
"""Loads a Shogun SVM object which was pickled by saveSVM"""
from cPickle import Unpickler, PickleError
from shogun.Kernel import CombinedKernel
pickle_file = open(pickled_svm_filename, 'rb')
unpck = Unpickler(pickle_file)
(version, num_sv, name, bias, alphas, svs) = unpck.load()
if (version == __version__):
svm = LibSVM(num_sv) # same as .create_new_model(num_sv)
svm.set_bias(bias)
svm.set_alphas(alphas)
svm.set_support_vectors(svs)
kernel = CombinedKernel() #not sure if this is even required
kernel.set_name(name) # maybe not required
svm.set_kernel(kernel)
else:
print "File was pickled by another version of EasySVM.py or is not a kernel:"
print "Received from ", pickled_svm_filename, ": ", version, " expected: ", __version__
raise PickleError
return svm
def confusionMatrix(labels_test, labels_predicted):
"""Calculates the complete confusion matrix from true/false positives/negatives"""
if len(labels_test) != len(labels_predicted):
return 0
TP = 0; FP = 0; TN = 0; FN = 0
for i in range(0, len(labels_test)):
if labels_test[i] == 0 or labels_predicted[i] == 0:
return 0
if labels_test[i] > 0:
if labels_predicted[i] > 0: TP += 1
else: FN += 1
else:
if labels_predicted[i] > 0: FP += 1
else: TN += 1
return (TP, TN, FP, FN)
def accuracy(output, labels_test):
"""Calculates the accurracy from true/false positives/negatives"""
TP, TN, FP, FN = confusionMatrix(labels_test, numpy.sign(output))
return float(TP + TN) / (TP + TN + FP + FN)
def calcROC(output, LTE):
"""Uses shogun functions to calculate the area under the ROC curve"""
pm = PerformanceMeasures(Labels(numpy.array(LTE)), Labels(numpy.array(output)))
auROC = pm.get_auROC()
return auROC
def calcPRC(output, LTE):
"""Uses shogun functions to calculate the precision recall curve"""
pm = PerformanceMeasures(Labels(numpy.array(LTE)), Labels(numpy.array(output)))
auPRC = pm.get_auPRC()
return auPRC
def parseRange(string):
"""Parses a dash-separated string of ints into a tuple"""
splitarray = string.split("-")
if len(splitarray) == 1:
return (int(splitarray[0]), int(splitarray[0]))
if len(splitarray) == 2:
return (int(splitarray[0]), int(splitarray[1]))
raise ValueError("Cannot parse range " + string)
def parseFloatList(string):
"""Parses a comma-separated string of floats into a list"""
splitarray = string.split(",")
float_list = []
for elem in splitarray:
float_list.append(float(elem))
return float_list
def parseIntList(string):
"""Parses a comma-separated string of ints into a list"""
splitarray = string.split(",")
int_list = []
for elem in splitarray:
int_list.append(int(elem))
return int_list
def parsePrediction(prediction_file):
"""Returns the original output and split assignments
of a prediction run, from a prediction_file"""
outputs = []
splitassignments = []
f = open(prediction_file)
lines = f.readlines()
num = 0
for line in lines:
if len(line) > 0 and line[0] != '#':
elems = line.split('\t')
assert(len(elems) > 1)
assert(int(elems[0]) == num)
num += 1
if len(elems) == 2:
outputs.append(float(elems[1]))
else:
assert(len(elems) == 3)
outputs.append(float(elems[1]))
splitassignments.append(float(elems[2]))
f.close()
if len(splitassignments) == 0:
splitassignments = None
return (outputs, splitassignments)
def non_atcg_convert(seq, nuc_con):
""" Converts Non ATCG characters from DNA sequence """
if nuc_con == '':
sys.stderr.write("usage: Provide a choice for non ACGT nucleotide conversion [T|A|C|G|random] at last\n")
sys.exit(-1)
flag = 0
if len(nuc_con)>1:
if nuc_con != 'random':
flag = 1
else:
if re.match(r'[^ATCG]', nuc_con, re.IGNORECASE):
flag = 1
if flag == 1:
sys.stderr.write("usage: Conversion nucleotide choice -"+ nuc_con +"- failed. pick from [T|A|C|G|random]\n")
sys.exit(-1)
nuc_con = nuc_con.upper()
mod_seq = []
for i in range(len(seq)):
if re.search(r'[^ACTG]', seq[i], re.IGNORECASE):
if nuc_con == 'RANDOM':
nucleotide = 'ATCG'
line = ''
for single_nuc in seq[i]:
if re.match(r'[^ACGT]', single_nuc, re.IGNORECASE):
single = random.choice(nucleotide)
line += single
else:
single_nuc = single_nuc.upper()
line += single_nuc
mod_seq.append(line)
else:
seq[i] = re.sub(r'[^ATCG|actg]', nuc_con, seq[i])
seq[i] = seq[i].upper()
mod_seq.append(seq[i])
else:
seq[i] = seq[i].upper()
mod_seq.append(seq[i])
return mod_seq
def non_aminoacid_converter(seq, amino_con):
""" Converts Non amino acid characters from protein sequence """
if amino_con == '':
sys.stderr.write("usage: Provide a choice for replacing non amino acid characters\n")
sys.exit(-1)
flag = 0
if len(amino_con)>1:
if amino_con != 'random':
flag = 1
else:
if re.match(r'[^GPAVLIMCFYWHKRQNEDST]', amino_con, re.IGNORECASE):
flag = 1
if flag == 1:
sys.stderr.write("usage: Replace aminoacid chioce -"+ amino_con +"- failed. Pick a valid aminoacid single letter code/random\n")
sys.exit(-1)
amino_con = amino_con.upper()
opt_seq = []
for i in range(len(seq)):
if re.search(r'[^GPAVLIMCFYWHKRQNEDST]', seq[i], re.IGNORECASE):
if amino_con == 'RANDOM':
aminoacid = 'GPAVLIMCFYWHKRQNEDST'
line = ''
for single_amino in seq[i]:
if re.match(r'[^GPAVLIMCFYWHKRQNEDST]', single_amino, re.IGNORECASE):
r_amino = random.choice(aminoacid)
line += r_amino
else:
single_amino = single_amino.upper()
line += single_amino
opt_seq.append(line)
else:
seq[i] = re.sub(r'[^GPAVLIMCFYWHKRQNEDST|gpavlimcfywhkrqnedst]', amino_con, seq[i])
seq[i] = seq[i].upper()
opt_seq.append(seq[i])
else:
seq[i] = seq[i].upper()
opt_seq.append(seq[i])
return opt_seq
def evaluate(predictions, splitassignments, labels, roc_fname = None, prc_fname = None):
"""Evaluate prediction results"""
res_str = ""
xval = 1
if splitassignments != None:
for split in splitassignments:
if split + 1 > xval:
xval = int(split + 1)
if xval > 1:
res_str = "Evaluating on %i examples in %i splits\n" % (len(labels), xval)
else:
res_str = "Evaluating on %i examples\n" % len(labels)
output_splits = xval * [[]]
label_splits = xval * [[]]
for i in xrange(xval):
label_splits[i] = []
output_splits[i] = []
for i in xrange(0, len(labels)):
if xval > 1:
split = int(splitassignments[i])
else:
split = 0
output_splits[split].append(predictions[i])
label_splits[split].append(labels[i])
sum_accuracy = 0.0
sum_roc = 0.0
sum_prc = 0.0
for split in xrange(xval):
if xval > 1:
res_str += 'Split %d\n' % (split + 1)
LTE = label_splits[split]
svmout = output_splits[split]
numpos = 0
for l in LTE:
if l == 1:
numpos += 1
istwoclass = numpos > 0 and numpos < len(LTE)
if xval > 1:
res_str += ' number of positive examples = %i\n' % numpos
if xval > 1:
res_str += ' number of negative examples = %i\n' % (len(LTE)-numpos)
if istwoclass:
auROC = calcROC(svmout, LTE)
if xval > 1:
res_str += ' Area under ROC curve = %2.1f %%\n' % (100.0 * auROC)
sum_roc += auROC
if roc_fname != None:
if split != xval - 1:
plotROC(svmout, LTE, split == xval - 1, None, "ROC curve of SVM, split %i" % (split + 1))
else:
plotROC(svmout, LTE, split == xval - 1, roc_fname, "ROC curve of SVM, split %i" % (split + 1))
auPRC = calcPRC(svmout, LTE)
if xval > 1:
res_str += ' Area under PRC curve = %2.1f %%\n' % (100.0 * auPRC)
sum_prc += auPRC
if prc_fname != None:
if split != xval - 1:
plotPRC(svmout, LTE, None, "PRC curve of SVM, split %i" % (split + 1))
else:
plotPRC(svmout, LTE, prc_fname, "PRC curve of SVM, split %i" % (split + 1))
acc = accuracy(svmout, LTE)
if xval > 1:
res_str += ' accuracy (at threshold 0) = %2.1f %% \n' % (100.0 * acc)
sum_accuracy += acc
numpos = 0
for l in labels:
if l == 1:
numpos += 1
mean_roc = sum_roc/xval
mean_prc = sum_prc/xval
mean_acc = sum_accuracy/xval
res_str += 'Averages\n'
res_str += ' number of positive examples = %i\n' % round(numpos/xval)
res_str += ' number of negative examples = %i\n' % round((len(labels) - numpos)/xval)
res_str += ' Area under ROC curve = %2.1f %%\n' % (100.0 * mean_roc)
res_str += ' Area under PRC curve = %2.1f %%\n' % (100.0 * mean_prc)
res_str += ' accuracy (at threshold 0) = %2.1f %% \n' % (100.0 * mean_acc)
return (res_str, mean_roc, mean_prc, mean_acc) | bsd-3-clause |
snfactory/extract-star | scripts/extract_z.py | 1 | 23465 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
## Filename: extract_z.py
## Version: $Revision$
## Description: Extract redshift from galaxy spectrum
## Author: $Author$
## $Id$
##############################################################################
"""Extract redshift on galaxy spectrum from [OII] or Halpha/[NII]."""
__version__ = "$Id$"
__author__ = "Y. Copin <[email protected]>"
import numpy as N
import pyfits
from pySnurp import Spectrum
from pySNIFS import spectrum as SNIFS_spectrum
from pySNIFS import SNIFS_cube
import pySNIFS_fit
from ToolBox.MPL import errorband
from matplotlib.mlab import prctile
CLIGHT = 299792.458 # km/s
def find_max(lbda, flux, lrange):
"""Look for lbda of maximum flux within wavelength range lrange."""
lmin,lmax = lrange
g = (lmin<=lbda) & (lbda<=lmax)
if not lbda[g].any():
raise ValueError("Reasearch range %.2f-%.2f incompatible with "
"wavelength domaine %.2f-%.2f" %
(lmin,lmax,lbda[0],lbda[-1]))
return lbda[g][flux[g].argmax()]
class PolyBackground:
"""Polynomial background."""
name = "PolyBackground"
def __init__(self, params, cube):
self.deg,self.lmin,self.lmax = params
self.npar_ind = 0
self.npar_cor = int(self.deg+1)
self.npar = self.npar_ind*cube.nslice + self.npar_cor
self.l = N.reshape(cube.lbda,cube.data.shape)
self.x = (2*self.l - (self.lmin+self.lmax))/(self.lmax - self.lmin)
self.parnames = [ 'b%d' % i for i in range(self.npar_cor) ]
def __str__(self):
return "Polynomial background [deg=%d]" % self.deg
def comp(self, param):
self.param = param
# val = a0 + a1*x + a2*x**2 + ...
val = self.param[-1]
for par in self.param[-2::-1]:
val = val*self.x + par
return val
def deriv(self, param):
self.param = param
# val = a0 + a1*x + a2*x**2 + ...
grad = N.zeros((self.npar_cor,)+self.l.shape,'d')
for i in range(self.npar_cor):
grad[i] = self.x**i
return grad
class AbstractLineSingle:
"""Simple 1-gaussian emission line ABSTRACT class (without
resorting to Python ABC)."""
name = "" # Descriptive name
l0 = None # Emission line wavelength (std air)
parnames = ['1+z','sigma','intensity']
def __init__(self, cube):
if self.l0 is None or not self.name:
raise TypeError("'%s' should not be instanciated directly" %
self.__class__.__name__)
self.npar_ind = 0
self.npar_cor = len(self.parnames) # 1+z,sigma,I
self.npar = self.npar_ind*cube.nslice + self.npar_cor
self.l = N.reshape(cube.lbda,cube.data.shape)
def __str__(self):
return self.name
def comp(self, param):
self.param = zp1,sig,i = param
return i * N.exp(-0.5*( (self.l - self.l0*zp1)/sig )**2)
def deriv(self, param):
self.param = zp1,sig,i = param
d = (self.l - self.l0*zp1) / sig
g = N.exp(-0.5*d**2)
grad = N.zeros((self.npar_cor,)+self.l.shape,'d')
grad[0] = i*g*self.l0*d/sig # dval/dzp1
grad[1] = i*g*d**2/sig # dval/dsig
grad[2] = g # dval/di
return grad
def SingleLineFactory(name, l0):
"""Generate a class derived from abstract class EmissionLineSingle."""
class Line(AbstractLineSingle):
pass
Line.name = name # Descriptive name
Line.l0 = float(l0) # Emission line wavelength
return Line
class LinesOII:
"""[OII] doublet is described by 2 independant gaussians"""
name = "[OII] doublet"
l1 = 3726.03 # [OII] doublet
l2 = 3728.73
parnames = ['1+z','sigma','[OII]1','[OII]2']
def __init__(self, cube):
self.npar_ind = 0
self.npar_cor = len(self.parnames) # 1+z,sigma,I1,I2
self.npar = self.npar_ind*cube.nslice + self.npar_cor
self.l = N.reshape(cube.lbda,cube.data.shape)
def __str__(self):
return "[OII] doublet"
def comp(self, param):
self.param = zp1,sig,i1,i2 = param
val = i1 * N.exp(-0.5*( (self.l - self.l1*zp1)/sig )**2) + \
i2 * N.exp(-0.5*( (self.l - self.l2*zp1)/sig )**2)
return val
def deriv(self, param):
self.param = zp1,sig,i1,i2 = param
d1 = (self.l - self.l1*zp1) / sig
d2 = (self.l - self.l2*zp1) / sig
g1 = N.exp(-0.5*d1**2)
g2 = N.exp(-0.5*d2**2)
# val = i1*g1(zp1,sig) + i2*g2(zp1,sig)
grad = N.zeros((self.npar_cor,)+self.l.shape,'d')
grad[0] = i1*g1*self.l1*d1/sig + i2*g2*self.l2*d2/sig # dval/dzp1
grad[1] = i1*g1*d1**2/sig + i2*g2*d2**2/sig # dval/dsig
grad[2] = g1 # dval/di1
grad[3] = g2 # dval/di2
return grad
class LinesNIIHa:
"""[NII],Halpha complex is described by 1 gaussian for Ha + 2
correlated gaussians for [NII]."""
name = "[NII]+Ha complex"
lHa = 6562.80 # Halpha
lNII1 = 6547.96 # [NII]1
lNII2 = 6583.34 # [NII]2
rNII = 0.340 # i[NII]1/i[NII]2
parnames = ['1+z','sigma','Halpha','[NII]']
def __init__(self, cube):
self.npar_ind = 0
self.npar_cor = len(self.parnames) # 1+z,sigma,I(Ha),I([NII])
self.npar = self.npar_ind*cube.nslice + self.npar_cor
self.l = N.reshape(cube.lbda,cube.data.shape)
def __str__(self):
return "[NII],Ha complex"
def comp(self, param):
"""Halpha(G1) + [NII](G2,G3)"""
self.param = zp1,sig,iH,iN = param
val = iH*N.exp(-0.5*( (self.l - self.lHa*zp1)/sig )**2) # Halpha
val+= ( N.exp(-0.5*( (self.l - self.lNII1*zp1)/sig )**2) * self.rNII +
N.exp(-0.5*( (self.l - self.lNII2*zp1)/sig )**2) ) * iN # [NII]
return val
def deriv(self, param):
self.param = zp1,sig,iH,iN = param
dH = (self.l - self.lHa*zp1) / sig
d1 = (self.l - self.lNII1*zp1) / sig
d2 = (self.l - self.lNII2*zp1) / sig
gH = N.exp(-0.5*dH**2)
g1 = N.exp(-0.5*d1**2)
g2 = N.exp(-0.5*d2**2)
# val = iH*gH(zp1,sig) + iN*(r*g1(zp1,sig) + g2(zp1,sig))
grad = N.zeros((self.npar_cor,)+self.l.shape,'d')
grad[0] = iH * gH * self.lHa*dH/sig + \
iN * ( g1 * self.lNII1*d1/sig * self.rNII +
g2 * self.lNII2*d2/sig ) # dval/dzp1
grad[1] = iH * gH * dH**2/sig + \
iN * ( g1 * d1**2/sig * self.rNII +
g2 * d2**2/sig ) # dval/dsig
grad[2] = gH # dval/diH
grad[3] = g1*self.rNII + g2 # dval/diN
return grad
def flux(self, par, cov=None):
"""Flux (and error) of Halpha line."""
# par: 0:1+z, 1:sigma, 2:Halpha, 3:[NII]
f = N.sqrt(2*N.pi)*par[1] * (par[2] + par[3]*(1+self.rNII))
if cov is not None:
# Compute jacobian of f
j = N.empty(3, dtype='d')
j[0] = (par[2] + par[3]*(1+self.rNII))
j[1] = par[1]
j[2] = par[1] * (1+self.rNII)
j *= N.sqrt(2*N.pi)
c = cov[1:4,1:4] # Select proper submatrix
df = N.sqrt(N.dot(j, N.dot(c,j)))
return f,df
else:
return f
def addRedshiftedLines(ax, z):
lines = [('[OII]', (3726.03,3728.73)),
('[NeIII]', (3868.69,)),
('He', (3970.07,)),
('Hd', (4101.73,)),
('Hg', (4340.46,)),
('[OIII]', (4363.15,)),
('HeII', (4685.74,)),
('Hb', (4861.32,)),
('[OIII]', (4958.83,5006.77)),
('[NI]', (5197.90,5200.39)),
('[OI]', (5577.34,6300.20,)),
('[NII]', (6547.96,6583.34)),
('Ha', (6562.80,)),
('[SII]', (6716.31,6730.68))]
lmin,lmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
y0 = ymax - (ymax-ymin)/5
for name,lbdas in lines:
for i,l in enumerate(lbdas):
l *= (1+z)
if not lmin<l<lmax: continue
ax.axvline(l, ymin=0.2,ymax=0.7, c='0.7', label='_', zorder=1)
if i==0:
ax.text(l,y0,name, size='x-small',
horizontalalignment='center',
verticalalignment='center',
rotation='vertical')
def plot_correlation_matrix(ax, corr, parnames=None):
npar = len(corr)
im = ax.imshow(N.absolute(corr),
norm=P.matplotlib.colors.LogNorm(),
origin='upper', extent=(-0.5,npar-0.5,-0.5,npar-0.5))
if parnames:
assert len(parnames)==npar
ax.set_xticks(range(npar)) # Set the nb of ticks
ax.set_xticklabels(parnames, rotation=45, size='smaller')
ax.set_yticks(range(npar))
ax.set_yticklabels(parnames[::-1], rotation=45, size='smaller')
fig = ax.get_figure()
cb = fig.colorbar(im, ax=ax, orientation='horizontal')
cb.set_label("|Correlation matrix|")
if __name__ == '__main__':
import os
import optparse
usage = "usage: [PYSHOW=1] %prog [options] spec|cube.fits"
parser = optparse.OptionParser(usage, version=__version__)
parser.add_option("-e", "--emissionline", dest='line',
help="Emission line to be adjusted [%default]",
default='auto')
parser.add_option("-O", "--obsFrame", action='store_true',
help="Adjust line in observer frame (not for redshift).",
default=False)
parser.add_option("-M", "--map", action='store_true',
help="Generate spatial map (for cube input).",
default=False)
parser.add_option("-p", "--plot", action='store_true',
help="Plot flag.")
opts,args = parser.parse_args()
if len(args) != 1:
parser.error("No or too many arguments")
else:
specname = args[0]
try:
pspec = Spectrum(specname)
print pspec
isSpec = True
step = pspec.step
except KeyError:
cube = SNIFS_cube(specname)
print "Cube %s: %d spx, %d px [%.2f-%.2f]" % \
(specname, cube.nlens, cube.nslice, cube.lstart, cube.lend)
isSpec = False
step = cube.lstep
if isSpec: # Input is a Spectrum
lbda = pspec.x
resSpec = pspec.y
assert pspec.varname, "Input spectrum has no variance extension."
resVar = pspec.v
X = pspec.readKey('CHANNEL','X')[0].upper() # B or R
obj = pspec.readKey('OBJECT', 'Unknown')
filename = pspec.readKey('FILENAME', 'Unknown')
flxunits = pspec.readKey('FLXUNITS', 'counts')
else: # Input is a Cube
lbda = cube.lbda
resSpec = cube.data.mean(axis=1) # Mean spectrum
resVar = N.sqrt(cube.var.sum(axis=1)/cube.nlens**2)
X = cube.e3d_data_header.get('CHANNEL','X')[0].upper() # B or R
obj = cube.e3d_data_header.get('OBJECT', 'Unknown')
filename = cube.e3d_data_header.get('FILENAME', 'Unknown')
flxunits = cube.e3d_data_header.get('FLXUNITS', 'counts')
print "%s: %s" % (obj, filename)
if opts.line=='auto':
# Automatic redshift mode: use [OII] for B-channel, and
# [NIIHa] for R-channel
if X=='B':
opts.line = 'OII'
elif X=='R':
opts.line = 'NIIHa'
else:
raise IOError("Unrecognized input channel")
# Convert array to pySNIFS.spectrum on a restricted range
if opts.line=='OII':
l0 = find_max(lbda, resSpec, (3700,4200)) # OII from 1+z=1 to 1.13
lmin,lmax = l0-50,l0+50
print "Fit [OII] doublet around %.0f Å (%.0f Å window)" % (l0,100)
elif opts.line=='NIIHa':
l0 = find_max(lbda, resSpec, (6560,7400)) # Ha from 1+z=1 to 1.13
lmin,lmax = l0-100,l0+100
print "Fit [NII],Ha complex around %.0f Å (%.0f Å window)" % (l0,200)
elif opts.line=='OI':
Line = SingleLineFactory("night-sky line [OI]", 5577.34)
l0 = find_max(lbda, resSpec, (Line.l0-50,Line.l0+50))
lmin,lmax = l0-50,l0+50
print "Fit %s around %.0f Å (%.0f Å window)" % (Line.name,l0,100)
else: # Read 'name,l0' from option
try:
name,l0 = opts.line.split(',')
l0 = float(l0)
Line = SingleLineFactory(name, l0)
l0 = find_max(lbda, resSpec, (Line.l0-50,Line.l0+50))
lmin,lmax = l0-50,l0+50
print "Fit %s around %.0f Å (%.0f Å window)" % (Line.name,l0,100)
except ValueError:
parser.error("Unknown line '%s' ('OII'|'NIIHa'|'OI'|'name,l0')" % \
opts.line)
g = (lmin<=lbda) & (lbda<=lmax)
x = lbda[g]
bkg = N.median(resSpec[g])
norm = resSpec[g].max() - bkg
y = (resSpec[g] - bkg) / norm
v = resVar[g] / norm**2
sspec = SNIFS_spectrum(data=y, var=v, start=x[0], step=step)
if opts.line=='OII': # [OII] doublet + background
funcs = [ LinesOII.name,
'%s;1,%f,%f' % (PolyBackground.name,lmin,lmax) ]
# 1+z,sigma,I1,I2 + bkgnd(d=1)
zp1 = x[resSpec[g].argmax()] / LinesOII.l2
params = [ [zp1, 3, 0.5, 0.5], [0, 0] ]
bounds = [ [[1.0,1.13],[2,5],[0,1],[0,1]],
[[None,None]]*2] # No constraints on background
myfunc = {LinesOII.name: LinesOII,
PolyBackground.name: PolyBackground}
elif opts.line=='NIIHa': # [NII]+Halpha complex + background
funcs = [ LinesNIIHa.name,
'%s;1,%f,%f' % (PolyBackground.name,lmin,lmax) ]
# 1+z,sigma,IHa,I[NII] + bkgnd(d=1)
zp1 = x[resSpec[g].argmax()] / LinesNIIHa.lHa
params = [ [zp1, 4, 1, 0.5], [0, 0] ]
bounds = [ [[1.0,1.13],[2,5],[0.1,2],[0,1]],
[[None,None]]*2] # No constraints on background
myfunc = {LinesNIIHa.name: LinesNIIHa,
PolyBackground.name: PolyBackground}
else: # Single-gaussian emission line
funcs = [ Line.name,
'%s;1,%f,%f' % (PolyBackground.name,lmin,lmax) ]
# 1+z,sigma,I + bkgnd(d=1)
zp1 = x[resSpec[g].argmax()] / Line.l0
params = [ [zp1, 4, 1], [0, 0] ]
bounds = [ [[0.95,1.05],[2,5],[0.1,2]],
[[None,None]]*2] # No constraints on background
myfunc = {Line.name: Line,
PolyBackground.name: PolyBackground}
# Actual fit
model = pySNIFS_fit.model(data=sspec, func=funcs,
param=params, bounds=bounds, myfunc=myfunc)
parnames = [ model.func[i].parnames for i in range(len(funcs)) ]
flatparnames = reduce(lambda x,y:x+y, parnames)
print "Adjusted parameters:", parnames
print "Initial guess:", params
model.fit(save=True, msge=False)
model.khi2 *= model.dof # True Chi2
print "Status: %d, Chi2/DoF: %.1f/%d" % \
(model.status, model.khi2, model.dof)
# Quadratic errors, including correlations (tested against Minuit)
hess = pySNIFS_fit.approx_deriv(model.objgrad, model.fitpar, order=3)
cov = 2 * N.linalg.inv(hess) # Covariance matrix (for chi2-fit)
diag = cov.diagonal()
if (diag<0).any(): # Error in fit
model.status = 1
dfitpar = N.sqrt(diag)
corr = cov/N.outer(dfitpar,dfitpar) # Correlation matrix
print "Adjusted parameters (including normalization):"
for par,val,dval in zip(flatparnames, model.fitpar, dfitpar):
print " %s = %f ± %f" % (par,val,dval)
#print "Correlation matrix:"
#print N.array2string(corr, 79, 3)
# Detection level: flux(Ha) in units of sig(flux).
func = model.func[0]
if hasattr(func,'flux'):
f,df = func.flux(model.fitpar[:func.npar_cor],
cov=cov[:func.npar_cor,:func.npar_cor])
nsig = f/df
print "Detection level: %.1f-sigma (flux: %f ± %f)" % (nsig, f, df)
else:
print "WARNING: %s has no flux method, " \
"cannot compute detection level" % func.name
nsig = 0
if opts.obsFrame:
print "%s@%.2f Å: " \
"obs: %.2f ± %.2f Å, offset: %.2f Å" % \
(Line.name, Line.l0,
Line.l0*model.fitpar[0], Line.l0*dfitpar[0],
Line.l0*(model.fitpar[0]-1))
zsys0 = zsys = 0
dzsys = 0
label=u"%s@%.2f Å = %.2f ± %.2f Å" % \
(Line.name, Line.l0,
Line.l0*model.fitpar[0], Line.l0*dfitpar[0])
else:
# Mean redshift
zsys0 = model.fitpar[0] - 1
dzsys = dfitpar[0]
#print "Estimated redshift: %f ± %f (%.1f ± %.1f km/s)" % \
# (zsys0,dzsys,zsys0*CLIGHT,dzsys*CLIGHT)
# Barycentric correction: amount to add to an observed radial
# velocity to correct it to the solar system barycenter
v = pspec.get_skycalc('baryvcor') # Relative velocity [km/s]
print "Barycentric correction: %f (%.1f ± 0.01 km/s)" % (v/CLIGHT,v)
zsys = zsys0 + v/CLIGHT # Correction precision: 0.01 km/s
dzsys = N.hypot(dzsys, 0.01/CLIGHT)
print "Heliocentric redshift: %f ± %f (%.1f ± %.1f km/s)" % \
(zsys,dzsys,zsys*CLIGHT,dzsys*CLIGHT)
label = u"z(Helio) = %.5f ± %.1g" % (zsys,dzsys)
# Store results in input spectra (awkward way, but pySnurp is
# too dumb...)
if model.status==0 and isSpec:
hdu = pyfits.open(specname, mode='update', ignore_missing_end=True)
hdu[0].header.update('CVSEXTZ',__version__)
hdu[0].header.update('EXTZ_Z',zsys,
"extract_z heliocentric redshift")
hdu[0].header.update('EXTZ_DZ',dzsys,
"extract_z error on redshift")
hdu[0].header.update('EXTZ_K2',model.khi2,
"extract_z chi2")
if nsig:
hdu[0].header.update('EXTZ_NS',nsig,
"extract_z detection level")
hdu[0].header.update('EXTZ_L',funcs[0],
"extract_z lines")
hdu.close()
if not isSpec and opts.map:
ima = cube.slice2d([0,cube.nslice],'p')
zmap = ima * N.nan # Redshift map
# Use global fit result as initial guess
params = model.unflat_param(model.fitpar)
for ino,ii,ij in zip(cube.no,cube.i,cube.j):
print "Spx #%03d at %+2dx%+2d:" % (ino,ii-7,ij-7),
y = cube.spec(no=ino)[g]
ibkg = N.median(y)
inorm = y.max() - ibkg
y = ( y - ibkg ) / inorm
v = cube.spec(no=ino, var=True)[g] / inorm**2
ispec = SNIFS_spectrum(data=y, var=v, start=x[0], step=step)
imodel = pySNIFS_fit.model(data=ispec, func=funcs,
param=params, bounds=bounds,
myfunc=myfunc)
imodel.fit(msge=False, deriv=False)
imodel.khi2 *= imodel.dof # True Chi2
#print " Fitpar:", imodel.fitpar
z = imodel.fitpar[0] - 1
if opts.obsFrame:
print "Chi2/DoF=%.1f/%d, offset=%+.2f A" % \
(imodel.khi2, imodel.dof, z*Line.l0)
else:
print "Chi2/DoF=%.1f/%d, v=%+.2f km/s" % \
(imodel.khi2, imodel.dof, (z-zsys0)*CLIGHT)
zmap[ij,ii] = z
if opts.plot or os.environ.has_key('PYSHOW'):
import matplotlib.pyplot as P
fig = P.figure(figsize=(12,5))
fig.subplots_adjust(left=0.075, right=0.95)
title = "%s [%s]" % (obj, filename)
fig.text(0.5,0.94, title,
fontsize='large', horizontalalignment='center')
ax1 = fig.add_subplot(1,2,1,
xlabel=u'Wavelength [Å]',
ylabel='Flux [%s]' % flxunits)
ax2 = fig.add_subplot(1,4,3,
xlabel=u'Wavelength [Å]')
# Galaxy spectrum
lgal, = ax1.plot(lbda, resSpec, 'g-', label=label)
if model.status==0:
#ax1.plot(x, model.evalfit()*norm + bkg, 'r-')
addRedshiftedLines(ax1, zsys)
ax1.legend(prop=dict(size='x-small'))
ax1.set_xlim(lbda[0],lbda[-1])
ymin,ymax = ax1.get_ylim()
ax1.set_ylim(min(0,ymin/10),ymax)
# Zoom on adjusted line
ax2.plot(x, resSpec[g], 'g-')
errorband(ax2, x, resSpec[g], N.sqrt(resVar[g]), color='g', alpha=0.3)
if model.status==0:
ax2.plot(x, model.evalfit()*norm + bkg, 'r-')
addRedshiftedLines(ax2, zsys)
ax2.text(0.1,0.9,
"Chi2/DoF=%.1f/%d\nDetection: %.1f sigma" % \
(model.khi2, model.dof, nsig),
transform=ax2.transAxes, fontsize='small')
else:
ax2.plot(x, model.eval(model.flatparam)*norm + bkg, 'm-')
ax2.set_xlim(x[0],x[-1])
# Correlation matrix
if model.status==0:
ax3 = fig.add_subplot(1,4,4)
plot_correlation_matrix(ax3, corr, parnames=flatparnames)
if not isSpec and opts.map:
fig2 = P.figure(figsize=(6,6))
axv = fig2.add_subplot(1,1,1, title=title,
xlabel='I [spx]', ylabel='J [spx]',
aspect='equal')
# Velocity map
if opts.obsFrame:
vmap = zmap*Line.l0 # Convert to wavelength offset
label = u'Offset [Å]'
else:
vmap = (zmap - zsys0)*CLIGHT # Convert redshift to velocity
label = 'Velocity [km/s]'
vmin,vmax = prctile(vmap[N.isfinite(zmap)], p=(3,97))
imv = axv.imshow(vmap, vmin=vmin, vmax=vmax,
extent=(-7.5,7.5,-7.5,7.5))
cbv = fig2.colorbar(imv, ax=axv, shrink=0.9)
cbv.set_label(label)
for ino,ii,ij in zip(cube.no,cube.i,cube.j):
axv.text(ii-7,ij-7,str(ino),
size='x-small', ha='center', va='center')
if opts.plot:
path,name = os.path.split(specname)
figname = 'z_'+os.path.splitext(name)[0]+'.png'
print "Saving emission-line figure in", figname
fig.savefig(figname)
if not isSpec and opts.map:
figname = 'v_'+os.path.splitext(name)[0]+'.png'
print "Saving velocity-map in", figname
fig2.savefig(figname)
if os.environ.has_key('PYSHOW'):
P.show()
| mit |
feranick/GES_AT | GridEdgeAT/gridedgeat/fitMethods.py | 1 | 9006 | '''
fitMethods.py
----------------
Classes for providing advanced fitting for the resultsWindow
Copyright (C) 2017-2019 Nicola Ferralis <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
'''
import numpy as np
import pandas as pd
import time, random, math
from lmfit import Model
from scipy import (special, interpolate)
import sympy, scipy
from datetime import datetime
from PyQt5.QtWidgets import (QApplication,QAbstractItemView)
from PyQt5.QtCore import (Qt,QObject, QThread, pyqtSlot, pyqtSignal)
####################################################################
# Diode Equation
####################################################################
class FitMethods(QThread):
results = pyqtSignal(str)
func = pyqtSignal(object)
JV_fit = pyqtSignal(np.ndarray)
def __init__(self, parent=None):
super(FitMethods, self).__init__(parent)
def __del__(self):
self.wait()
def stop(self):
self.terminate()
# fit using Diode Equation
# From: https://github.com/mutovis/analysis-software
def fitDE(self,JV):
#def fitDE(self,cellEqn,JV):
def cellEqn(V,n,Rs,Rsh,I0,Iph):
'''
cellTemp = 29 #degC all analysis is done assuming the cell is at 29 degC
T = 273.15 + cellTemp #cell temp in K
K = 1.3806488e-23 #boltzman constant
q = 1.60217657e-19 #electron charge
thermalVoltage = K*T/q #thermal voltage ~26mv
Vth = thermalVoltage
#Vth = mpmath.convert(Vth)
tol = 1e-15
'''
A = 0.0260372697199036 #this is Vth
B = 38.4064846567063 # this is 1/Vth
#return (Rs*(I0*Rsh + Iph*Rsh - V) - Vth*n*(Rs + Rsh)*special.lambertw(I0*Rs*Rsh*npexp((Rs*(I0*Rsh + Iph*Rsh - V)/(Rs + Rsh) + V)/(Vth*n))/(Vth*n*(Rs + Rsh)),tol=tol))/(Rs*(Rs + Rsh))
return (Rs*(I0*Rsh + Iph*Rsh - V) - A*n*(Rs + Rsh)*special.lambertw(B*I0*Rs*Rsh*np.exp(B*(Rs*(I0*Rsh + Iph*Rsh - V)/(Rs + Rsh) + V)/n)/(n*(Rs + Rsh))))/(Rs*(Rs + Rsh))
vv_f = JV[:,0]
ii_f = JV[:,1]
vv_r = JV[:,2]
ii_r = JV[:,3]
#cellEqn = self.setupEq()
cellModel = Model(cellEqn,nan_policy='omit')
cellModel.set_param_hint('n',value=1)
cellModel.set_param_hint('Rs',value=6)
cellModel.set_param_hint('Rsh',value=1e5)
cellModel.set_param_hint('Iph',value=20e-3)
cellModel.set_param_hint('I0',value=1e-9)
try:
fitResult_f = cellModel.fit(ii_f, V=vv_f)
fitResult_r = cellModel.fit(ii_r, V=vv_r)
except Exception as error:
print('Caught this error: ' + repr(error))
return
resultParams_f = fitResult_f.params
resultParams_r = fitResult_r.params
self.results.emit("\nForward scan: "+fitResult_f.message)
self.results.emit(fitResult_f.fit_report()+"\n")
self.results.emit("Backward scan: "+fitResult_r.message)
self.results.emit(fitResult_r.fit_report()+"\n")
ii_f_fit = cellEqn(vv_f, \
fitResult_f.best_values['n'], \
fitResult_f.best_values['Rs'], \
fitResult_f.best_values['Rsh'],\
fitResult_f.best_values['Iph'],\
fitResult_f.best_values['I0']).real
ii_r_fit = cellEqn(vv_r, \
fitResult_r.best_values['n'], \
fitResult_r.best_values['Rs'], \
fitResult_r.best_values['Rsh'],\
fitResult_r.best_values['Iph'],\
fitResult_r.best_values['I0']).real
JVnew_f = np.hstack((np.array([vv_f]).T,np.array([ii_f_fit]).T))
JVnew_r = np.hstack((np.array([vv_r]).T,np.array([ii_r_fit]).T))
JVnew = np.hstack((JVnew_f,JVnew_r))
self.JV_fit.emit(JVnew)
# fit using scipy interpolate.interp1d
def fitInterp(self,JV):
vv_f = JV[:,0]
ii_f = JV[:,1]
vv_r = JV[:,2]
ii_r = JV[:,3]
f_f = interpolate.interp1d(vv_f,ii_f)
f_r = interpolate.interp1d(vv_r,ii_r)
JVnew_f = np.hstack((np.array([vv_f]).T,np.array([f_f(vv_f)]).T))
JVnew_r = np.hstack((np.array([vv_r]).T,np.array([f_r(vv_r)]).T))
JVnew = np.hstack((JVnew_f,JVnew_r))
self.JV_fit.emit(JVnew)
'''
def getDEeq(self):
self.results.emit("Solving calculation for the diode equation. Please wait...")
modelSymbols = sympy.symbols('I0 Iph Rs Rsh n I V Vth', real=True, positive=True)
I0, Iph, Rs, Rsh, n, I, V, Vth = modelSymbols
modelConstants = (Vth,)
modelVariables = tuple(set(modelSymbols)-set(modelConstants))
# calculate values for our model's constants now
cellTemp = 29 #degC all analysis is done assuming the cell is at 29 degC
T = 273.15 + cellTemp #cell temp in K
K = 1.3806488e-23 #boltzman constant
q = 1.60217657e-19 #electron charge
thermalVoltage = K*T/q #thermal voltage ~26mv
valuesForConstants = (thermalVoltage,)
lhs = I
rhs = Iph-((V+I*Rs)/Rsh)-I0*(sympy.exp((V+I*Rs)/(n*Vth))-1)
electricalModel = sympy.Eq(lhs,rhs)
#electricalModelVarsOnly= electricalModel.subs(zip(modelConstants,valuesForConstants))
symSolutionsNoSubs = {} # all the symbols preserved
solveForThese = [I, I0, V, n]
for symbol in solveForThese:
symSolutionsNoSubs[str(symbol)] = sympy.solve(electricalModel,symbol)[0]
Voc_eqn = symSolutionsNoSubs['V'].subs(I,0) # analytical solution for Voc
Isc_eqn = symSolutionsNoSubs['I'].subs(V,0) # analytical solution for Isc
PB = symSolutionsNoSubs['V']*I # analytical solution for power (current as independant variable)
P_primeB = sympy.diff(PB,I) # first derivative of power (WRT I)
symSolutions = {}
symSolutions['Isc'] = Isc_eqn.subs(zip(modelConstants,valuesForConstants))
symSolutions['Voc'] = Voc_eqn.subs(zip(modelConstants,valuesForConstants))
symSolutions['P_prime'] = P_primeB.subs(zip(modelConstants,valuesForConstants))
symSolutions['I'] = symSolutionsNoSubs['I'].subs(zip(modelConstants,valuesForConstants))
symSolutions['I0'] = symSolutionsNoSubs['I0'].subs(zip(modelConstants,valuesForConstants))
symSolutions['n'] = symSolutionsNoSubs['n'].subs(zip(modelConstants,valuesForConstants))
symSolutions['V'] = symSolutionsNoSubs['V'].subs(zip(modelConstants,valuesForConstants))
results = {}
results['symSolutions'] = symSolutions
results['modelSymbols'] = modelSymbols
results['modelVariables'] = modelVariables
print(results)
I0, Iph, Rs, Rsh, n, I, V, Vth = modelSymbols
# here we define any function substitutions we'll need for lambdification later
#if self.isFastAndSloppy:
# for fast and inaccurate math
functionSubstitutions = {"LambertW" : scipy.special.lambertw, "exp" : np.exp, "log" : np.log}
#functionSubstitutions = {"LambertW" : scipy.special.lambertw, "exp" : bigfloat.exp}
#else:
# this is a massive slowdown (forces a ton of operations into mpmath)
# but gives _much_ better accuracy and aviods overflow warnings/errors...
#functionSubstitutions = {"LambertW" : mpmath.lambertw, "exp" : mpmath.exp, "log" : mpmath.log}
slns = {}
solveForThese = [I, I0, V, n]
for symbol in solveForThese:
remainingVariables = list(set(modelVariables)-set([symbol]))
slns[str(symbol)] = sympy.lambdify(remainingVariables,symSolutions[str(symbol)],functionSubstitutions,dummify=False)
#slns[str(symbol)] = ufuncify(remainingVariables,self.symSolutions[str(symbol)],helpers=[['LambertW', sympy.LambertW(x), [x]]])
#slns[str(symbol)] = functools.partial(tmp)
slns['Voc'] = sympy.lambdify([I0,Rsh,Iph,n],symSolutions['Voc'],functionSubstitutions,dummify=False)
slns['P_prime'] = sympy.lambdify([I0,Rsh,Iph,n,Rs,I],symSolutions['P_prime'],functionSubstitutions,dummify=False)
slns['Isc'] = sympy.lambdify([I0,Rsh,Iph,n,Rs],symSolutions['Isc'],functionSubstitutions,dummify=False)
#slns['I'] = sympy.lambdify([I0,Rsh,Iph,n,Rs,V],symSolutions['I'],functionSubstitutions,dummify=False)
slns['I'] = sympy.lambdify([V,n,Rs,Rsh,I0,Iph],symSolutions['I'],functionSubstitutions,dummify=False)
#slns['V'] = sympy.lambdify([I0,Rsh,Iph,n,Rs,I],symSolutions['I'],functionSubstitutions,dummify=False)
#print(slns['I'](1,2,3,4,5,vv))
#print(slns['V'](1,2,3,4,5,ii))
self.results.emit(" Setup DE: Completed")
#return slns['I']
self.func.emit(slns['I'])
'''
| gpl-3.0 |
jshiv/turntable | turntable/utils.py | 1 | 10551 | '''The utils module provides a collection of methods used across the package or of general utility.
'''
import os
import re
import sys
import shutil
import errno
import fnmatch
import traceback
import numpy as np
import pandas as pd
try:
import cPickle as pickle
except:
import pickle
import random
import time
def catch(fcn, *args, **kwargs):
'''try:
retrun fcn(*args, **kwargs)
except:
print traceback
if 'spit' in kwargs.keys():
return kwargs['spit']
Parameters
----------
fcn : function
*args : unnamed parameters of fcn
**kwargs : named parameters of fcn
spit : returns the parameter named return in the exception
Returns
-------
The expected output of fcn or prints the exception traceback
'''
try:
# remove the special kwargs key "spit" and use it to return if it exists
spit = kwargs.pop('spit')
except:
spit = None
try:
results = fcn(*args, **kwargs)
if results:
return results
except:
print traceback.format_exc()
if spit:
return spit
def batch_list(sequence, batch_size, mod = 0, randomize = False):
'''
Converts a list into a list of lists with equal batch_size.
Parameters
----------
sequence : list
list of items to be placed in batches
batch_size : int
length of each sub list
mod : int
remainder of list length devided by batch_size
mod = len(sequence) % batch_size
randomize = bool
should the initial sequence be randomized before being batched
'''
if randomize:
sequence = random.sample(sequence, len(sequence))
return [sequence[x:x + batch_size] for x in xrange(0, len(sequence)-mod, batch_size)]
def to_pickle(obj, filename, clean_memory=False):
'''http://stackoverflow.com/questions/7900944/read-write-classes-to-files-in-an-efficent-way'''
path, filename = path_to_filename(filename)
create_dir(path)
with open(path + filename, "wb") as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
if clean_memory:
obj = None
# setting the global object to None requires a return assignment
return obj
def from_pickle(filename, clean_disk=False):
# to deserialize the object
with open(filename, "rb") as input:
obj = pickle.load(input) # protocol version is auto detected
if clean_disk:
os.remove(filename)
return obj
def path_to_filename(pathfile):
'''
Takes a path filename string and returns the split between the path and the filename
if filename is not given, filename = ''
if path is not given, path = './'
'''
path = pathfile[:pathfile.rfind('/') + 1]
if path == '':
path = './'
filename = pathfile[pathfile.rfind('/') + 1:len(pathfile)]
if '.' not in filename:
path = pathfile
filename = ''
if (filename == '') and (path[len(path) - 1] != '/'):
path += '/'
return path, filename
def add_path_string(root_path='./results', path_string=None):
rootPath = path_to_filename(rootPath)[0]
regEx = '[.<>"!,:;*/ -]'
if pathString is not None:
return path_to_filename(root_path + re.sub(regEx, '_', path_string))[0]
else:
return root_path
def create_dir(path, dir_dict={}):
'''
Tries to create a new directory in the given path.
**create_dir** can also create subfolders according to the dictionnary given as second argument.
Parameters
----------
path : string
string giving the path of the location to create the directory, either absolute or relative.
dir_dict : dictionary, optional
Dictionary ordering the creation of subfolders. Keys must be strings, and values either None or path dictionaries.
the default is {}, which means that no subfolders will be created
Examples
--------
>>> path = './project'
>>> dir_dict = {'dir1':None, 'dir2':{'subdir21':None}}
>>> utils.create_dir(path, dir_dict)
will create:
* *project/dir1*
* *project/dir2/subdir21*
in your parent directory.
'''
folders = path.split('/')
folders = [i for i in folders if i != '']
rootPath = ''
if folders[0] == 'C:':
folders = folders[1:]
count = 0
for directory in folders:
count += 1
# required to handle the dot operators
if (directory[0] == '.') & (count == 1):
rootPath = directory
else:
rootPath = rootPath + '/' + directory
try:
os.makedirs(rootPath)
# If the file already exists (EEXIST), raise exception and do nothing
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
for key in dir_dict.keys():
rootPath = path + "/" + key
try:
os.makedirs(rootPath)
# If the file already exists (EEXIST), raise exception and do nothing
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
if dir_dict[key] is not None:
create_dir(rootPath, dir_dict[key])
def Walk(root='.', recurse=True, pattern='*'):
'''
Generator for walking a directory tree.
Starts at specified root folder, returning files that match our pattern.
Optionally will also recurse through sub-folders.
Parameters
----------
root : string (default is *'.'*)
Path for the root folder to look in.
recurse : bool (default is *True*)
If *True*, will also look in the subfolders.
pattern : string (default is :emphasis:`'*'`, which means all the files are concerned)
The pattern to look for in the files' name.
Returns
-------
generator
**Walk** yields a generator from the matching files paths.
'''
for path, subdirs, files in os.walk(root):
for name in files:
if fnmatch.fnmatch(name, pattern):
yield os.path.join(path, name)
if not recurse:
break
def scan_path(root='.', recurse=False, pattern='*'):
'''
Runs a loop over the :doc:`Walk<relpy.utils.Walk>` Generator
to find all file paths in the root directory with the given
pattern. If recurse is *True*: matching paths are identified
for all sub directories.
Parameters
----------
root : string (default is *'.'*)
Path for the root folder to look in.
recurse : bool (default is *True*)
If *True*, will also look in the subfolders.
pattern : string (default is :emphasis:`'*'`, which means all the files are concerned)
The pattern to look for in the files' name.
Returns
-------
path_list : list
list of all the matching files paths.
'''
path_list = []
for path in Walk(root=root, recurse=recurse, pattern=pattern):
path_list.append(path)
return path_list
class Timer:
'''Timer that calculates time remaining for a process and the percent complete
.. todo::
Ask for details about the usage
Parameters
----------
nLoops : integer
numPrints : integer (default is *100*)
verbose : bool (default is *True*)
Attributes
----------
nLoops : integer
numPrints : integer
verbose : bool
if *True*, print values when **loop** is called
count : integer
elapsed : float
elapsed time
est_end : float
estimated end
ti : float
initial time
tf : float
current time
display_amt : integer
'''
def __init__(self, nLoops, numPrints=100, verbose=True):
self.nLoops = nLoops
self.numPrints = numPrints
self.verbose = verbose
self.count = 0
self.elapsed = 1
self.est_end = 1
self.ti = time.time()
self.display_amt = 1
def loop(self):
'''
Tracks the time in a loop. The estimated time to completion
can be calculated and if verbose is set to *True*, the object will print
estimated time to completion, and percent complete.
Actived in every loop to keep track'''
self.count += 1
self.tf = time.time()
self.elapsed = self.tf - self.ti
if self.verbose:
displayAll(self.elapsed, self.display_amt, self.est_end,
self.nLoops, self.count, self.numPrints)
def fin(self):
print "Elapsed time: %s :-)" % str(time.time() - self.ti)
def displayAll(elapsed, display_amt, est_end, nLoops, count, numPrints):
'''Displays time if verbose is true and count is within the display amount'''
if numPrints > nLoops:
display_amt = 1
else:
display_amt = round(nLoops / numPrints)
if count % display_amt == 0:
avg = elapsed / count
est_end = round(avg * nLoops)
(disp_elapsed,
disp_avg,
disp_est) = timeUnit(int(round(elapsed)),
int(round(avg)),
int(round(est_end)))
print "%s%%" % str(round(count / float(nLoops) * 100)), "@" + str(count),
totalTime = disp_est[0]
unit = disp_est[1]
if str(unit) == "secs":
remain = totalTime - round(elapsed)
remainUnit = "secs"
elif str(unit) == "mins":
remain = totalTime - round(elapsed) / 60
remainUnit = "mins"
elif str(unit) == "hr":
remain = totalTime - round(elapsed) / 3600
remainUnit = "hr"
print "ETA: %s %s" % (str(remain), remainUnit)
print
return
def timeUnit(elapsed, avg, est_end):
'''calculates unit of time to display'''
minute = 60
hr = 3600
day = 86400
if elapsed <= 3 * minute:
unit_elapsed = (elapsed, "secs")
if elapsed > 3 * minute:
unit_elapsed = ((elapsed / 60), "mins")
if elapsed > 3 * hr:
unit_elapsed = ((elapsed / 3600), "hr")
if avg <= 3 * minute:
unit_avg = (avg, "secs")
if avg > 3 * minute:
unit_avg = ((avg / 60), "mins")
if avg > 3 * hr:
unit_avg = ((avg / 3600), "hr")
if est_end <= 3 * minute:
unit_estEnd = (est_end, "secs")
if est_end > 3 * minute:
unit_estEnd = ((est_end / 60), "mins")
if est_end > 3 * hr:
unit_estEnd = ((est_end / 3600), "hr")
return [unit_elapsed, unit_avg, unit_estEnd]
| mit |
cactusbin/nyt | matplotlib/examples/color/colormaps_reference.py | 4 | 3411 | """
Reference for colormaps included with Matplotlib.
This reference example shows all colormaps included with Matplotlib. Note that
any colormap listed here can be reversed by appending "_r" (e.g., "pink_r").
These colormaps are divided into the following categories:
Sequential:
These colormaps are approximately monochromatic colormaps varying smoothly
between two color tones---usually from low saturation (e.g. white) to high
saturation (e.g. a bright blue). Sequential colormaps are ideal for
representing most scientific data since they show a clear progression from
low-to-high values.
Diverging:
These colormaps have a median value (usually light in color) and vary
smoothly to two different color tones at high and low values. Diverging
colormaps are ideal when your data has a median value that is significant
(e.g. 0, such that positive and negative values are represented by
different colors of the colormap).
Qualitative:
These colormaps vary rapidly in color. Qualitative colormaps are useful for
choosing a set of discrete colors. For example::
color_list = plt.cm.Set3(np.linspace(0, 1, 12))
gives a list of RGB colors that are good for plotting a series of lines on
a dark background.
Miscellaneous:
Colormaps that don't fit into the categories above.
"""
import numpy as np
import matplotlib.pyplot as plt
cmaps = [('Sequential', ['binary', 'Blues', 'BuGn', 'BuPu', 'gist_yarg',
'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd',
'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu',
'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']),
('Sequential (2)', ['afmhot', 'autumn', 'bone', 'cool', 'copper',
'gist_gray', 'gist_heat', 'gray', 'hot', 'pink',
'spring', 'summer', 'winter']),
('Diverging', ['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr',
'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'seismic']),
('Qualitative', ['Accent', 'Dark2', 'hsv', 'Paired', 'Pastel1',
'Pastel2', 'Set1', 'Set2', 'Set3', 'spectral']),
('Miscellaneous', ['gist_earth', 'gist_ncar', 'gist_rainbow',
'gist_stern', 'jet', 'brg', 'CMRmap', 'cubehelix',
'gnuplot', 'gnuplot2', 'ocean', 'rainbow',
'terrain', 'flag', 'prism'])]
nrows = max(len(cmap_list) for cmap_category, cmap_list in cmaps)
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
def plot_color_gradients(cmap_category, cmap_list):
fig, axes = plt.subplots(nrows=nrows)
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99)
axes[0].set_title(cmap_category + ' colormaps', fontsize=14)
for ax, name in zip(axes, cmap_list):
ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
for cmap_category, cmap_list in cmaps:
plot_color_gradients(cmap_category, cmap_list)
plt.show()
| unlicense |
LUTAN/tensorflow | tensorflow/examples/learn/text_classification_character_rnn.py | 61 | 3350 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using recurrent neural networks over characters for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
def char_rnn_model(features, target):
"""Character level recurrent neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.one_hot(features, 256, 1, 0)
byte_list = tf.unstack(byte_list, axis=1)
cell = tf.contrib.rnn.GRUCell(HIDDEN_SIZE)
_, encoding = tf.contrib.rnn.static_rnn(cell, byte_list, dtype=tf.float32)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_rnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
mattip/numpy | doc/neps/conf.py | 11 | 5689 | # -*- coding: utf-8 -*-
#
# NumPy Enhancement Proposals documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 11 12:45:09 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.imgmath',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../source/_templates/']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'content'
# General information about the project.
project = u'NumPy Enhancement Proposals'
copyright = u'2017-2018, NumPy Developers'
author = u'NumPy Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u''
# The full version, including alpha/beta/rc tags.
release = u''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
## -- Options for HTML output ----------------------------------------------
#
html_theme = 'pydata_sphinx_theme'
html_logo = '../source/_static/numpylogo.svg'
html_theme_options = {
"github_url": "https://github.com/numpy/numpy",
"twitter_url": "https://twitter.com/numpy_team",
"external_links": [
{"name": "Wishlist",
"url": "https://github.com/numpy/numpy/issues?q=is%3Aopen+is%3Aissue+label%3A%2223+-+Wish+List%22",
},
],
"show_prev_next": False,
}
html_title = "%s" % (project)
html_static_path = ['../source/_static']
html_last_updated_fmt = '%b %d, %Y'
html_use_modindex = True
html_copy_source = False
html_domain_indices = False
html_file_suffix = '.html'
htmlhelp_basename = 'numpy'
if 'sphinx.ext.pngmath' in extensions:
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
plot_html_show_formats = False
plot_html_show_source_link = False
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'NumPyEnhancementProposalsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'NumPyEnhancementProposals.tex', u'NumPy Enhancement Proposals Documentation',
u'NumPy Developers', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'numpyenhancementproposals', u'NumPy Enhancement Proposals Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NumPyEnhancementProposals', u'NumPy Enhancement Proposals Documentation',
author, 'NumPyEnhancementProposals', 'One line description of project.',
'Miscellaneous'),
]
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/dev', None),
'numpy': ('https://numpy.org/devdocs', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org', None)
}
| bsd-3-clause |
IndraVikas/scikit-neuralnetwork | examples/plot_mlp.py | 5 | 5613 | # -*- coding: utf-8 -*-
"""\
Visualizing Parameters in a Modern Neural Network
=================================================
"""
from __future__ import (absolute_import, unicode_literals, print_function)
print(__doc__)
__author__ = 'Alex J. Champandard'
import sys
import time
import logging
import argparse
import itertools
import numpy
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
# The neural network uses the `sknn` logger to output its information.
import logging
logging.basicConfig(format="%(message)s", level=logging.WARNING, stream=sys.stdout)
from sknn.platform import gpu32
from sknn.backend import pylearn2
from sknn import mlp
# All possible parameter options that can be plotted, separately or combined.
PARAMETERS = {
'activation': ['Rectifier', 'Tanh', 'Sigmoid', 'Maxout'],
'alpha': [0.001, 0.005, 0.01, 0.05, 0.1, 0.2],
'dropout': [None, 0.25, 0.5, 0.75],
'iterations': [100, 200, 500, 1000],
'output': ['Softmax', 'Linear', 'Gaussian'],
'regularize': [None, 'L1', 'L2', 'dropout'],
'rules': ['sgd', 'momentum', 'nesterov', 'adadelta', 'rmsprop'],
'units': [16, 64, 128, 256],
}
# Grab command line information from the user.
parser = argparse.ArgumentParser()
parser.add_argument('-p','--params', nargs='+', help='Parameter to visualize.',
choices=PARAMETERS.keys(), required=True)
args = parser.parse_args()
# Build a list of lists containing all parameter combinations to be tested.
params = []
for p in sorted(PARAMETERS):
values = PARAMETERS[p]
# User requested to test against this parameter?
if p in args.params:
params.append(values)
# Otherwise, use the first item of the list as default.
else:
params.append(values[:1])
# Build the classifiers for all possible combinations of parameters.
names = []
classifiers = []
for (activation, alpha, dropout, iterations, output, regularize, rule, units) in itertools.product(*params):
params = {'pieces': 2} if activation == "Maxout" else {}
classifiers.append(mlp.Classifier(
layers=[mlp.Layer(activation, units=units, **params), mlp.Layer(output)], random_state=1,
n_iter=iterations, n_stable=iterations, regularize=regularize,
dropout_rate=dropout, learning_rule=rule, learning_rate=alpha),)
t = []
for k, v in zip(sorted(PARAMETERS), [activation, alpha, dropout, iterations, output, regularize, rule, units]):
if k in args.params:
t.append(str(v))
names.append(','.join(t))
# Create randomized datasets for visualizations, on three rows.
seed = int(time.time())
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=0, n_clusters_per_class=1)
rng = numpy.random.RandomState(seed+1)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=seed+2),
make_circles(noise=0.2, factor=0.5, random_state=seed+3),
linearly_separable]
# Create the figure containing plots for each of the classifiers.
GRID_RESOLUTION = .02
figure = plt.figure(figsize=(18, 9))
i = 1
for X, y in datasets:
# Preprocess dataset, split into training and test part.
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
# Prepare coordinates of 2D grid to be visualized.
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = numpy.meshgrid(numpy.arange(x_min, x_max, GRID_RESOLUTION),
numpy.arange(y_min, y_max, GRID_RESOLUTION))
# Plot the dataset on its own first.
cm = plt.cm.get_cmap("PRGn")
cm_bright = ListedColormap(['#FF00FF', '#00FF00'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# Now iterate over every classifier...
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict_proba(numpy.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right', fontweight='bold')
i += 1
sys.stdout.write('.'); sys.stdout.flush()
sys.stdout.write('\n')
figure.subplots_adjust(left=.02, right=.98)
plt.show() | bsd-3-clause |
adi-sharma/RLIE_A3C | code/predict.py | 1 | 19030 | import numpy as np
from sklearn.linear_model import LogisticRegression
import scipy.sparse
import time
import itertools
import sys
import pickle
import inflect
import train_crf as crf
from train import load_data
import helper
import re, pdb, collections
import constants
import re
p = inflect.engine()
int2tags = ['TAG'] + constants.int2tags #since the constants file does not include the 'TAG' tag
NUM_ENTITIES = len(constants.int2tags)
tags2int = constants.tags2int
tags = range(len(int2tags))
helper.load_constants()
mode = constants.mode
CORRECT = collections.defaultdict(lambda:0.)
GOLD = collections.defaultdict(lambda:0.)
PRED = collections.defaultdict(lambda:0.)
def splitBars(w):
return [q.strip() for q in w.split('|')]
# main loop
def main(trained_model,testing_file,viterbi,output_tags="output.tag", output_predictions="output.pred"):
test_data, identifier = load_data(testing_file)
evaluate = True
## extract features
if not "crf" in trained_model:
if not isinstance(trained_model, list):
clf, previous_n, next_n, word_vocab,other_features = pickle.load( open( trained_model, "rb" ) )
else:
clf, previous_n, next_n, word_vocab,other_features = trained_model
tic = time.clock()
f = open(output_tags,'w')
confidences = []
for i in range(len(test_data)+len(identifier)):
if i%2 == 1:
if "crf" in trained_model:
y, tmp_conf = crf.predict(test_data[i/2][0], trained_model)
f.write(" ".join([test_data[i/2][0][j]+"_"+y[j] for j in range(len(test_data[i/2][0]))]))
else:
y, tmp_conf = predict_tags_n(viterbi, previous_n,next_n, clf, test_data[i/2][0], word_vocab,other_features)
f.write(" ".join([test_data[i/2][0][j]+"_"+int2tags[int(y[j])] for j in range(len(test_data[i/2][0]))]))
assert(len(y) == len(tmp_conf))
confidences.append(tmp_conf)
f.write("\n")
else:
f.write(identifier[i/2])
f.write("\n")
#print time.clock()-tic
f.close()
if evaluate:
eval_mode_batch(output_tags, confidences, helper.cities)
else:
predict_mode_batch(output_tags, output_predictions, helper.cities)
return
# Takes in a trained model and predict all the entities
# sentence - list of words
# viterbi - bool of whether or not to use viterbi decoding
# cities - set of cities to match for backup if no city was predicted
# Returns comma separated preditions of shooterNames, killedNum, woundedNum and city with shooter names separated by '|'
def predict(trained_model, sentence, viterbi, cities):
if type(trained_model) == str:
clf, previous_n,next_n, word_vocab,other_features = pickle.load( open( trained_model, "rb" ) )
else:
#trained_model is an already initialized list of params
clf, previous_n,next_n, word_vocab,other_features = trained_model
sentence = sentence.replace("_"," ")
words = re.findall(r"[\w']+|[.,!?;]", sentence)
y, tmp_conf = predict_tags_n(viterbi, previous_n,next_n, clf, words, word_vocab,other_features)
tags = []
for i in range(len(y)):
tags.append(int(y[i]))
pred = predict_mode(words, tags, tmp_conf, cities)
return pred
def predictWithConfidences(trained_model, sentence, viterbi, cities):
sentence = sentence.replace("_"," ")
words = re.findall(r"[\w']+|[.,!?;]", sentence)
cleanedSentence = []
i = 0
while i < len(words):
token = sentence[i]
end_token_range = i
for j in range(i+1,len(words)):
new_token = words[j]
if new_token == token:
end_token_range = j
else:
cleanedSentence.append(words[i])
break
i = end_token_range + 1
words = cleanedSentence
if "crf" in trained_model:
return predictCRF(trained_model, words, cities)
if type(trained_model) == str:
clf, previous_n,next_n, word_vocab,other_features = pickle.load( open( trained_model, "rb" ) )
else:
#trained_model is an already initialized list of params
clf, previous_n,next_n, word_vocab,other_features = trained_model
y, confidences = predict_tags_n(viterbi, previous_n,next_n, clf, words, word_vocab,other_features)
tags = []
for i in range(len(y)):
tags.append(int(y[i]))
pred, conf_scores, conf_cnts = predict_mode(words, tags, confidences, cities)
return pred, conf_scores, conf_cnts
## Return tag, conf scores, conf counts for CRF
def predictCRF(trained_model, words, cities):
tags, confidences = crf.predict(words, trained_model)
pred, conf_scores, conf_cnts = predict_mode(words, tags, confidences, cities, True)
return pred, conf_scores, conf_cnts
count_person = 0
# Make predictions using majority voting of the tag
# sentence - list of words
# tags - list of tags corresponding to sentence
def predict_ema_mode(sentence, tags, confidences):
assert len(tags) == len(confidences)
original_tags = tags
num_tags = len(int2tags) -1
output_entities = {}
entity_confidences = [0] * num_tags
entity_cnts = [0] * num_tags
for tag in int2tags[1:]:
output_entities[tag] = []
cleanedSentence = []
cleanedTags = []
cleanedConfidences = []
# Combine consecutive tags (like "United_Location States_Location into
# United States_Location")
i = 0
while i < len(sentence):
tag = int2tags[tags[i]] if not type(tags[i]) == str else tags[i]
end_range = i
if not tag == "TAG":
for j in range(i+1,len(sentence)):
new_tag = int2tags[tags[j]] if not type(tags[j]) == str else tags[j]
if new_tag == tag:
end_range = j
else:
break
cleanedSentence.append( " ".join(sentence[i:end_range+1]))
avgConf = sum(confidences[i:end_range+1])/(end_range+1 - i)
cleanedConfidences.append(avgConf)
cleanedTags.append(tags2int[tag])
i = end_range + 1
sentence = cleanedSentence
tags = cleanedTags
confidences = cleanedConfidences
for j in range(len(sentence)):
index = int2tags[tags[j]] if not type(tags[j]) == str else tags[j]
if index == "TAG":
continue
output_entities[index].append((sentence[j], confidences[j]))
output_pred_line = ""
for tag in int2tags[1:]:
# pdb.set_trace()
#one idea, if ent isn't in countries, try 1perm, two perm and stop there. then run something akin to #tryCities
mode, conf = get_mode(output_entities[tag])
if mode == "":
assert not tags2int[tag] in tags
assert not tags2int[tag] in original_tags
output_pred_line += "unknown"
entity_confidences[tags2int[tag]-1] += 0
else:
output_pred_line += mode
entity_confidences[tags2int[tag]-1] += conf
entity_cnts[tags2int[tag]-1] += 1
if not tag == int2tags[-1]:
output_pred_line += " ### "
return output_pred_line, entity_confidences, entity_cnts
def predict_mode(sentence, tags, confidences, cities, crf=False):
if constants.mode == "EMA":
return predict_ema_mode(sentence, tags, confidences)
output_entities = {}
entity_confidences = [0,0,0,0]
entity_cnts = [0,0,0,0]
for tag in int2tags:
output_entities[tag] = []
for j in range(len(sentence)):
ind = ""
if crf:
ind = tags[j]
else:
ind = int2tags[tags[j]]
output_entities[ind].append((sentence[j], confidences[j]))
output_pred_line = ""
#for shooter (OLD)
# for shooterName, conf in output_entities["shooterName"]:
# output_pred_line += shooterName.lower()
# output_pred_line += "|"
# entity_confidences[tags2int['shooterName']-1] += conf
# entity_cnts[tags2int['shooterName']-1] += 1
# output_pred_line = output_pred_line[:-1]
mode, conf = get_mode(output_entities["shooterName"])
output_pred_line += mode
entity_confidences[tags2int['shooterName']-1] += conf
entity_cnts[tags2int['shooterName']-1] += 1
for tag in int2tags:
if tag == "city":
output_pred_line += " ### "
possible_city_combos = []
# pdb.set_trace()
for permutation in itertools.permutations(output_entities[tag],2):
if permutation[0][0] in cities:
if "" in cities[permutation[0][0]]:
possible_city_combos.append((permutation[0][0], permutation[0][1]))
if permutation[1][0] in cities[permutation[0][0]]:
possible_city_combos.append((permutation[0][0] + " " + permutation[1][0],\
max(permutation[0][1], permutation[1][1]) ))
mode, conf = get_mode(possible_city_combos)
#try cities automatically
if mode == "":
possible_cities = []
for i in range(len(sentence)):
word1 = sentence[i]
if word1 in cities:
if "" in cities[word1]:
possible_cities.append((word1, 0.))
if i+1 < len(sentence):
word2 = sentence[i+1]
if word2 in cities[word1]:
possible_cities.append((word1 + " " + word2, 0.))
#print possible_cities
#print get_mode(possible_cities)
mode, conf = get_mode(possible_cities)
output_pred_line += mode
entity_confidences[tags2int['city']-1] += conf
entity_cnts[tags2int['city']-1] += 1
elif tag not in ["TAG", "shooterName"]:
output_pred_line += " ### "
mode, conf = get_mode(output_entities[tag])
if mode == "":
output_pred_line += "zero"
entity_confidences[tags2int[tag]-1] += 0
entity_cnts[tags2int[tag]-1] += 1
else:
output_pred_line += mode
entity_confidences[tags2int[tag]-1] += conf
entity_cnts[tags2int[tag]-1] += 1
assert not (output_pred_line.split(" ### ")[0].strip() == "" and len(output_entities["shooterName"]) >0)
return output_pred_line, entity_confidences, entity_cnts
# Make predictions using majority voting in batch
# output_tags - filename of tagged articles
# output_predictions - filename to write the predictions to
# Returns comma separated preditions of shooterNames, killedNum, woundedNum and city with shooter names separated by '|'
def predict_mode_batch(output_tags, output_predictions, cities):
tagged_data, identifier = load_data(output_tags)
f = open(output_predictions,'w')
for i in range(len(tagged_data)+len(identifier)):
if i%2 == 1:
f.write(predict_mode(tagged_data[i/2][0], tagged_data[i/2][1], cities))
f.write("\n")
else:
f.write(identifier[i/2])
f.write("\n")
return
def evaluateArticle(predEntities, goldEntities, shooterLenientEval=True,
shooterLastName=False, evalOutFile=None):
global PRED, GOLD, CORRECT
int2tags = constants.int2tags
if constants.mode == 'Shooter':
#shooterName first: only add this if gold contains a valid shooter
if goldEntities[0]!='':
if shooterLastName:
gold = set(splitBars(goldEntities[0].lower())[-1:])
else:
gold = set(splitBars(goldEntities[0].lower()))
pred = set(splitBars(predEntities[0].lower()))
correct = len(gold.intersection(pred))
if shooterLenientEval:
CORRECT[int2tags[0]] += (1 if correct> 0 else 0)
GOLD[int2tags[0]] += (1 if len(gold) > 0 else 0)
PRED[int2tags[0]] += (1 if len(pred) > 0 else 0)
else:
CORRECT[int2tags[0]] += correct
GOLD[int2tags[0]] += len(gold)
PRED[int2tags[0]] += len(pred)
# All other tags.
for i in range(1, NUM_ENTITIES):
if goldEntities[i] != 'zero':
GOLD[int2tags[i]] += 1
PRED[int2tags[i]] += 1
if predEntities[i].lower() == goldEntities[i].lower():
CORRECT[int2tags[i]] += 1
else:
# For EMA.
for i in range(NUM_ENTITIES):
if goldEntities[i] != 'unknown':
#old eval
gold = set(splitBars(goldEntities[i].lower()))
pred = set(splitBars(predEntities[i].lower()))
# if 'unknown' in pred:
# pred = set()
correct = len(gold.intersection(pred))
if shooterLenientEval:
CORRECT[int2tags[i]] += (1 if correct> 0 else 0)
GOLD[int2tags[i]] += (1 if len(gold) > 0 else 0)
PRED[int2tags[i]] += (1 if len(pred) > 0 else 0)
else:
CORRECT[int2tags[i]] += correct
GOLD[int2tags[i]] += len(gold)
PRED[int2tags[i]] += len(pred)
if evalOutFile:
evalOutFile.write("--------------------\n")
evalOutFile.write("Gold: "+str(gold)+"\n")
evalOutFile.write("Pred: "+str(pred)+"\n")
evalOutFile.write("Correct: "+str(correct)+"\n")
def eval_mode_batch(output_tags, confidences, cities):
tagged_data, identifier = load_data(output_tags)
num_tags = len(int2tags) - 1
assert len(tagged_data) == len(confidences)
for i in range(len(tagged_data)):
sentence = tagged_data[i][0]
tags = tagged_data[i][1]
tag_confs = confidences[i]
ident = identifier[i]
gold_ents = ident.split(',')[:num_tags] #Throw away title
output_pred_line, entity_confidences, entity_cnts = predict_mode(sentence, tags, tag_confs, cities)
predictions = output_pred_line.split(" ### ")
# Evaluate the predictions.
evaluateArticle(predictions, gold_ents)
print "------------\nEvaluation Stats: (Precision, Recall, F1):"
for tag in GOLD:
prec = CORRECT[tag]/PRED[tag]
rec = CORRECT[tag]/GOLD[tag]
f1 = (2*prec*rec)/(prec+rec)
print tag, prec, rec, f1, "########", CORRECT[tag], PRED[tag], GOLD[tag]
# Takes a ,;| seperated list of gold ents and a prediction
# Returns 'skip' if gold is unknown, 'no_predict' if no prediction was made,
# 1 if prediction in gold, and 0 if prediction not in gold
def evaluatePrediction(pred, goldLabel):
prediction = pred.strip().lower()
gold = goldLabel.strip().lower()
if gold == 'unknown' or gold == '':
return 'skip'
if prediction == 'unknown' or prediction == '':
return 'no_predict'
mode = "strict"
if mode == "strict":
gold_set = set([s.strip() for s in gold.split('|')])
return prediction in gold_set
elif mode == "loose":
return prediction in gold
elif mode == 'flex':
gold = gold.replace("|", "")
gold_set = set([s.strip() for s in gold.split(' ')])
return prediction in gold_set
# get mode of list l, returns "" if empty
#l consists of tuples (value, confidence)
def get_mode(l):
counts = collections.defaultdict(lambda:0)
Z = collections.defaultdict(lambda:0)
curr_max = 0
arg_max = ""
for element, conf in l:
try:
normalised = p.number_to_words(int(element))
except Exception, e:
normalised = element.lower()
counts[normalised] += conf
Z[normalised] += 1
for element in counts:
if counts[element] > curr_max and element != "" and element != "zero":
curr_max = counts[element]
arg_max = element
return arg_max, (counts[arg_max]/Z[arg_max] if Z[arg_max] > 0 else counts[arg_max])
# given a classifier and a sentence, predict the tag sequence
def predict_tags_n(viterbi, previous_n,next_n, clf, sentence, word_vocab,other_features,first_n = 10):
num_features = len(word_vocab) + len(other_features)
total_features = (previous_n + next_n + 1)*num_features + len(word_vocab) + previous_n * len(tags) + first_n
dataX = np.zeros((len(sentence),total_features))
dataY = np.zeros(len(sentence))
dataYconfidences = [None for i in range(len(sentence))]
other_words_lower = set([s.lower() for s in sentence[0]])
for i in range(len(sentence)):
word = sentence[i]
word_lower = word.lower()
if word_lower in word_vocab:
dataX[i,word_vocab[word_lower]] = 1
for j in range(previous_n):
if i+j+1<len(sentence):
dataX[i+j+1,(j+1)*num_features+word_vocab[word_lower]] = 1
for j in range(next_n):
if i-j-1 >= 0:
dataX[i-j-1,(previous_n+j+1)*num_features+word_vocab[word_lower]] = 1
for (index, feature_func) in enumerate(other_features):
if feature_func(word):
dataX[i,len(word_vocab)+index] = 1
for j in range(previous_n):
if i + j + 1 < len(sentence):
dataX[i+j+1,(j+1)*num_features+len(word_vocab)+index] = 1
for j in range(next_n):
if i - j - 1 >= 0:
dataX[i-j-1,(previous_n+j+1)*num_features+len(word_vocab)+index] = 1
for other_word_lower in other_words_lower:
if other_word_lower != word_lower and other_word_lower in word_vocab:
dataX[i,(previous_n+next_n+1)*num_features + word_vocab[other_word_lower]] = 1
if i < first_n:
dataX[i,(previous_n + next_n + 1)*num_features + len(word_vocab) + previous_n * len(tags)+ i ] = 1
for i in range(len(sentence)):
for j in range(previous_n):
if j < i:
dataX[i,(previous_n+next_n+1)*num_features+len(word_vocab)+len(tags)*j+int(dataY[i-j-1])] = 1
dataYconfidences[i] = clf.predict_proba(dataX[i,:].reshape(1, -1))
dataY[i] = np.argmax(dataYconfidences[i])
dataYconfidences[i] = dataYconfidences[i][0][int(dataY[i])]
return dataY, dataYconfidences
if __name__ == "__main__":
if mode == "EMA":
trained_model = "trained_model_crf.EMA.p"
testing_file = "../data/tagged_data/EMA/dev.tag"
elif mode == "Shooter":
trained_model = "trained_model2.p"
# testing_file = "../data/tagged_data/shooterLarge/dev.tag"
testing_file = "../data/tagged_data/Shootings/dev.tag"
viterbi = False #sys.argv[4]
main(trained_model,testing_file,viterbi)
| mit |
LANGFAN/APM | Tools/mavproxy_modules/lib/magcal_graph_ui.py | 108 | 8248 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvas
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from pymavlink.mavutil import mavlink
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
import geodesic_grid as grid
class MagcalPanel(wx.Panel):
_status_markup_strings = {
mavlink.MAG_CAL_NOT_STARTED: 'Not started',
mavlink.MAG_CAL_WAITING_TO_START: 'Waiting to start',
mavlink.MAG_CAL_RUNNING_STEP_ONE: 'Step one',
mavlink.MAG_CAL_RUNNING_STEP_TWO: 'Step two',
mavlink.MAG_CAL_SUCCESS: '<span color="blue">Success</span>',
mavlink.MAG_CAL_FAILED: '<span color="red">Failed</span>',
}
_empty_color = '#7ea6ce'
_filled_color = '#4680b9'
def __init__(self, *k, **kw):
super(MagcalPanel, self).__init__(*k, **kw)
facecolor = self.GetBackgroundColour().GetAsString(wx.C2S_HTML_SYNTAX)
fig = plt.figure(facecolor=facecolor, figsize=(1,1))
self._canvas = FigureCanvas(self, wx.ID_ANY, fig)
self._canvas.SetMinSize((300,300))
self._id_text = wx.StaticText(self, wx.ID_ANY)
self._status_text = wx.StaticText(self, wx.ID_ANY)
self._completion_pct_text = wx.StaticText(self, wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._id_text)
sizer.Add(self._status_text)
sizer.Add(self._completion_pct_text)
sizer.Add(self._canvas, proportion=1, flag=wx.EXPAND)
self.SetSizer(sizer)
ax = fig.add_subplot(111, axis_bgcolor=facecolor, projection='3d')
self.configure_plot(ax)
def configure_plot(self, ax):
extra = .5
lim = grid.radius + extra
ax.set_xlim3d(-lim, lim)
ax.set_ylim3d(-lim, lim)
ax.set_zlim3d(-lim, lim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
self._polygons_collection = Poly3DCollection(
grid.sections_triangles,
edgecolors='#386694',
)
ax.add_collection3d(self._polygons_collection)
def update_status_from_mavlink(self, m):
status_string = self._status_markup_strings.get(m.cal_status, '???')
self._status_text.SetLabelMarkup(
'<b>Status:</b> %s' % status_string,
)
def mavlink_magcal_report(self, m):
self.update_status_from_mavlink(m)
self._completion_pct_text.SetLabel('')
def mavlink_magcal_progress(self, m):
facecolors = []
for i, mask in enumerate(m.completion_mask):
for j in range(8):
section = i * 8 + j
if mask & 1 << j:
facecolor = self._filled_color
else:
facecolor = self._empty_color
facecolors.append(facecolor)
self._polygons_collection.set_facecolors(facecolors)
self._canvas.draw()
self._id_text.SetLabelMarkup(
'<b>Compass id:</b> %d' % m.compass_id
)
self._completion_pct_text.SetLabelMarkup(
'<b>Completion:</b> %d%%' % m.completion_pct
)
self.update_status_from_mavlink(m)
_legend_panel = None
@staticmethod
def legend_panel(*k, **kw):
if MagcalPanel._legend_panel:
return MagcalPanel._legend_panel
p = MagcalPanel._legend_panel = wx.Panel(*k, **kw)
sizer = wx.BoxSizer(wx.HORIZONTAL)
p.SetSizer(sizer)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._empty_color)
sizer.Add(marker, flag=wx.ALIGN_CENTER)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections not hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._filled_color)
sizer.Add(marker, border=10, flag=wx.ALIGN_CENTER | wx.LEFT)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
return p
class MagcalFrame(wx.Frame):
def __init__(self, conn):
super(MagcalFrame, self).__init__(
None,
wx.ID_ANY,
title='Magcal Graph',
)
self.SetMinSize((300, 300))
self._conn = conn
self._main_panel = wx.ScrolledWindow(self, wx.ID_ANY)
self._main_panel.SetScrollbars(1, 1, 1, 1)
self._magcal_panels = {}
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._main_panel.SetSizer(self._sizer)
idle_text = wx.StaticText(self._main_panel, wx.ID_ANY)
idle_text.SetLabelMarkup('<i>No calibration messages received yet...</i>')
idle_text.SetForegroundColour('#444444')
self._sizer.AddStretchSpacer()
self._sizer.Add(
idle_text,
proportion=0,
flag=wx.ALIGN_CENTER | wx.ALL,
border=10,
)
self._sizer.AddStretchSpacer()
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.timer_callback, self._timer)
self._timer.Start(200)
def add_compass(self, id):
if not self._magcal_panels:
self._sizer.Clear(deleteWindows=True)
self._magcal_panels_sizer = wx.BoxSizer(wx.HORIZONTAL)
self._sizer.Add(
self._magcal_panels_sizer,
proportion=1,
flag=wx.EXPAND,
)
legend = MagcalPanel.legend_panel(self._main_panel, wx.ID_ANY)
self._sizer.Add(
legend,
proportion=0,
flag=wx.ALIGN_CENTER,
)
self._magcal_panels[id] = MagcalPanel(self._main_panel, wx.ID_ANY)
self._magcal_panels_sizer.Add(
self._magcal_panels[id],
proportion=1,
border=10,
flag=wx.EXPAND | wx.ALL,
)
def timer_callback(self, evt):
close_requested = False
mavlink_msgs = {}
while self._conn.poll():
m = self._conn.recv()
if isinstance(m, str) and m == 'close':
close_requested = True
continue
if m.compass_id not in mavlink_msgs:
# Keep the last two messages so that we get the last progress
# if the last message is the calibration report.
mavlink_msgs[m.compass_id] = [None, m]
else:
l = mavlink_msgs[m.compass_id]
l[0] = l[1]
l[1] = m
if close_requested:
self._timer.Stop()
self.Destroy()
return
if not mavlink_msgs:
return
needs_fit = False
for k in mavlink_msgs:
if k not in self._magcal_panels:
self.add_compass(k)
needs_fit = True
if needs_fit:
self._sizer.Fit(self)
for k, l in mavlink_msgs.items():
for m in l:
if not m:
continue
panel = self._magcal_panels[k]
if m.get_type() == 'MAG_CAL_PROGRESS':
panel.mavlink_magcal_progress(m)
elif m.get_type() == 'MAG_CAL_REPORT':
panel.mavlink_magcal_report(m)
| gpl-3.0 |
Garrett-R/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
ChristianTremblay/BAC0 | BAC0/sql/sql.py | 1 | 8703 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 by Christian Tremblay, P.Eng <[email protected]>
# Licensed under LGPLv3, see file LICENSE in this source tree.
#
"""
sql.py -
"""
# --- standard Python modules ---
import pickle
import os.path
# --- 3rd party modules ---
import sqlite3
import contextlib
try:
import pandas as pd
from pandas.io import sql
try:
from pandas import Timestamp
except ImportError:
from pandas.lib import Timestamp
_PANDAS = True
except ImportError:
_PANDAS = False
from ..core.io.IOExceptions import RemovedPointException
# --- this application's modules ---
# ------------------------------------------------------------------------------
class SQLMixin(object):
"""
Use SQL to persist a device's contents. By saving the device contents to an SQL
database, you can work with the device's data while offline, or while the device
is not available.
"""
def _read_from_sql(self, request, db_name):
"""
Using the contextlib, I hope to close the connection to database when
not in use
"""
with contextlib.closing(sqlite3.connect("{}.db".format(db_name))) as con:
return sql.read_sql(sql=request, con=con)
def dev_properties_df(self):
dic = self.properties.asdict.copy()
dic.pop("network", None)
dic.pop("pss", None)
return dic
def points_properties_df(self):
"""
Return a dictionary of point/point_properties in preparation for storage in SQL.
"""
pprops = {}
for each in self.points:
p = each.properties.asdict.copy()
p.pop("device", None)
p.pop("network", None)
p.pop("simulated", None)
p.pop("overridden", None)
pprops[each.properties.name] = p
return pd.DataFrame(pprops)
def backup_histories_df(self, resampling="1s"):
"""
Build a dataframe of the point histories
By default, dataframe will be resampled for 1sec intervals,
NaN will be forward filled then backward filled. This way, no
NaN values will remains and analytics will be easier.
Please note that this can be disabled using resampling=False
In the process of building the dataframe, analog values are
resampled using the mean() function. So we have intermediate
results between to records.
For binary values, we'll use .last() so we won't get a 0.5 value
which means nothing in this context.
If saving a DB that already exists, previous resampling will survive
the merge of old data and new data.
"""
backup = {}
if isinstance(resampling, str):
resampling_needed = True
resampling_freq = resampling
elif resampling in [0, False]:
resampling_needed = False
# print(resampling, resampling_freq, resampling_needed)
for point in self.points:
try:
if resampling_needed and "binary" in point.properties.type:
backup[point.properties.name] = (
point.history.replace(["inactive", "active"], [0, 1])
.resample(resampling_freq)
.last()
)
elif resampling_needed and "analog" in point.properties.type:
backup[point.properties.name] = point.history.resample(
resampling_freq
).mean()
else:
backup[point.properties.name] = point.history.resample(
resampling_freq
).last()
except Exception as error:
self._log.error(
"Error in resampling {} | {} (probably not enough points)".format(
point, error
)
)
if "binary" in point.properties.type:
backup[point.properties.name] = point.history.replace(
["inactive", "active"], [0, 1]
)
elif "analog" in point.properties.type:
backup[point.properties.name] = point.history.resample(
resampling_freq
).mean()
else:
backup[point.properties.name] = point.history
df = pd.DataFrame(dict([(k, pd.Series(v)) for k, v in backup.items()]))
if resampling_needed:
return (
df.resample(resampling_freq)
.last()
.fillna(method="ffill")
.fillna(method="bfill")
)
else:
return df
def save(self, filename=None, resampling=None):
"""
Save the point histories to sqlite3 database.
Save the device object properties to a pickle file so the device can be reloaded.
Resampling : valid Pandas resampling frequency. If 0 or False, dataframe will not be resampled on save.
"""
if filename:
if ".db" in filename:
filename = filename.split(".")[0]
self.properties.db_name = filename
else:
self.properties.db_name = "Device_{}".format(self.properties.device_id)
if resampling is None:
resampling = self.properties.save_resampling
# Does file exist? If so, append data
if os.path.isfile("{}.db".format(self.properties.db_name)):
his = self._read_from_sql(
'select * from "{}"'.format("history"), self.properties.db_name
)
his.index = his["index"].apply(Timestamp)
try:
last = his.index[-1]
df_to_backup = self.backup_histories_df(resampling=resampling)[last:]
except IndexError:
df_to_backup = self.backup_histories_df(resampling=resampling)
else:
self._log.debug("Creating a new backup database")
df_to_backup = self.backup_histories_df(resampling=resampling)
# DataFrames that will be saved to SQL
with contextlib.closing(
sqlite3.connect("{}.db".format(self.properties.db_name))
) as con:
try:
data = pd.read_sql("SELECT * FROM history", con)
df = pd.concat([data, df_to_backup], sort=True)
except:
df = df_to_backup
sql.to_sql(
df_to_backup,
name="history",
con=con,
index_label="index",
index=True,
if_exists="append",
)
# Saving other properties to a pickle file...
prop_backup = {"device": self.dev_properties_df()}
prop_backup["points"] = self.points_properties_df()
with open("{}.bin".format(self.properties.db_name), "wb") as file:
pickle.dump(prop_backup, file)
if self.properties.clear_history_on_save:
self.clear_histories()
self._log.info("Device saved to {}.db".format(self.properties.db_name))
def points_from_sql(self, db_name):
"""
Retrieve point list from SQL database
"""
points = self._read_from_sql("SELECT * FROM history;", db_name)
return list(points.columns.values)[1:]
def his_from_sql(self, db_name, point):
"""
Retrive point histories from SQL database
"""
his = self._read_from_sql('select * from "{}"'.format("history", db_name))
his.index = his["index"].apply(Timestamp)
return his.set_index("index")[point]
def value_from_sql(self, db_name, point):
"""
Take last known value as the value
"""
return self.his_from_sql(db_name, point).last_valid_index()
def read_point_prop(self, device_name, point):
"""
Points properties retrieved from pickle
"""
with open("{}.bin".format(device_name), "rb") as file:
try:
_point = pickle.load(file)["points"][point]
except KeyError:
raise RemovedPointException(
"{} not found (probably deleted)".format(point)
)
return _point
def read_dev_prop(self, device_name):
"""
Device properties retrieved from pickle
"""
self._log.debug("Reading prop from DB file")
with open("{}.bin".format(device_name), "rb") as file:
return pickle.load(file)["device"]
| lgpl-3.0 |
jj-umn/tools-iuc | tools/fsd/fsd.py | 17 | 44897 | #!/usr/bin/env python
# Family size distribution of SSCSs
#
# Author: Monika Heinzl, Johannes-Kepler University Linz (Austria)
# Contact: [email protected]
#
# Takes at least one TABULAR file with tags before the alignment to the SSCS, but up to 4 files can be provided, as input.
# The program produces a plot which shows the distribution of family sizes of the all SSCSs from the input files and
# a tabular file with the data of the plot, as well as a TXT file with all tags of the DCS and their family sizes.
# If only one file is provided, then a family size distribution, which is separated after SSCSs without a partner and DCSs, is produced.
# Whereas a family size distribution with multiple data in one plot is produced, when more than one file (up to 4) is given.
# USAGE: python FSD_Galaxy_1.4_commandLine_FINAL.py --inputFile1 filename --inputName1 filename --inputFile2 filename2 --inputName2 filename2 --inputFile3 filename3 --inputName3 filename3 --inputFile4 filename4 --inputName4 filename4 --log_axis --output_tabular outptufile_name_tabular --output_pdf outptufile_name_pdf
import argparse
import sys
import matplotlib.pyplot as plt
import numpy
from matplotlib.backends.backend_pdf import PdfPages
plt.switch_backend('agg')
def readFileReferenceFree(file):
with open(file, 'r') as dest_f:
data_array = numpy.genfromtxt(dest_f, skip_header=0, delimiter='\t', comments='#', dtype=str)
return(data_array)
def make_argparser():
parser = argparse.ArgumentParser(description='Family Size Distribution of duplex sequencing data')
parser.add_argument('--inputFile1', help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName1')
parser.add_argument('--inputFile2', default=None, help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName2')
parser.add_argument('--inputFile3', default=None, help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName3')
parser.add_argument('--inputFile4', default=None, help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName4')
parser.add_argument('--log_axis', action="store_false", help='Transform y axis in log scale.')
parser.add_argument('--rel_freq', action="store_false", help='If False, the relative frequencies are displayed.')
parser.add_argument('--output_pdf', default="data.pdf", type=str, help='Name of the pdf file.')
parser.add_argument('--output_tabular', default="data.tabular", type=str, help='Name of the tabular file.')
return parser
def compare_read_families(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
firstFile = args.inputFile1
name1 = args.inputName1
secondFile = args.inputFile2
name2 = args.inputName2
thirdFile = args.inputFile3
name3 = args.inputName3
fourthFile = args.inputFile4
name4 = args.inputName4
log_axis = args.log_axis
rel_freq = args.rel_freq
title_file = args.output_tabular
title_file2 = args.output_pdf
sep = "\t"
plt.rc('figure', figsize=(11.69, 8.27)) # A4 format
plt.rcParams['patch.edgecolor'] = "black"
plt.rcParams['axes.facecolor'] = "E0E0E0" # grey background color
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
list_to_plot = []
label = []
data_array_list = []
list_to_plot_original = []
colors = []
bins = numpy.arange(1, 22)
with open(title_file, "w") as output_file, PdfPages(title_file2) as pdf:
fig = plt.figure()
fig.subplots_adjust(left=0.12, right=0.97, bottom=0.23, top=0.94, hspace=0)
fig2 = plt.figure()
fig2.subplots_adjust(left=0.12, right=0.97, bottom=0.23, top=0.94, hspace=0)
if firstFile is not None:
file1 = readFileReferenceFree(firstFile)
integers = numpy.array(file1[:, 0]).astype(int) # keep original family sizes
list_to_plot_original.append(integers)
colors.append("#0000FF")
# for plot: replace all big family sizes by 22
data1 = numpy.clip(integers, bins[0], bins[-1])
name1 = name1.split(".tabular")[0]
if len(name1) > 40:
name1 = name1[:40]
list_to_plot.append(data1)
label.append(name1)
data_array_list.append(file1)
legend = "\n\n\n{}".format(name1)
fig.text(0.05, 0.11, legend, size=10, transform=plt.gcf().transFigure)
fig2.text(0.05, 0.11, legend, size=10, transform=plt.gcf().transFigure)
legend1 = "singletons:\nnr. of tags\n{:,} ({:.3f})".format(numpy.bincount(data1)[1],
float(numpy.bincount(data1)[1]) / len(data1))
fig.text(0.32, 0.11, legend1, size=10, transform=plt.gcf().transFigure)
fig2.text(0.32, 0.11, legend1, size=10, transform=plt.gcf().transFigure)
legend3b = "PE reads\n{:,} ({:.3f})".format(numpy.bincount(data1)[1],
float(numpy.bincount(data1)[1]) / sum(integers))
fig.text(0.45, 0.11, legend3b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.45, 0.11, legend3b, size=10, transform=plt.gcf().transFigure)
legend4 = "family size > 20:\nnr. of tags\n{:,} ({:.3f})".format(len(integers[integers > 20]),
float(len(integers[integers > 20])) / len(integers))
fig.text(0.58, 0.11, legend4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.58, 0.11, legend4, size=10, transform=plt.gcf().transFigure)
legend5 = "PE reads\n{:,} ({:.3f})".format(sum(integers[integers > 20]),
float(sum(integers[integers > 20])) / sum(integers))
fig.text(0.70, 0.11, legend5, size=10, transform=plt.gcf().transFigure)
fig2.text(0.70, 0.11, legend5, size=10, transform=plt.gcf().transFigure)
legend6 = "total nr. of\ntags\n{:,}".format(len(data1))
fig.text(0.82, 0.11, legend6, size=10, transform=plt.gcf().transFigure)
fig2.text(0.82, 0.11, legend6, size=10, transform=plt.gcf().transFigure)
legend6b = "PE reads\n{:,}".format(sum(integers))
fig.text(0.89, 0.11, legend6b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.89, 0.11, legend6b, size=10, transform=plt.gcf().transFigure)
if secondFile is not None:
file2 = readFileReferenceFree(secondFile)
integers2 = numpy.array(file2[:, 0]).astype(int) # keep original family sizes
list_to_plot_original.append(integers2)
colors.append("#298A08")
data2 = numpy.clip(integers2, bins[0], bins[-1])
list_to_plot.append(data2)
name2 = name2.split(".tabular")[0]
if len(name2) > 40:
name2 = name2[:40]
label.append(name2)
data_array_list.append(file2)
fig.text(0.05, 0.09, name2, size=10, transform=plt.gcf().transFigure)
fig2.text(0.05, 0.09, name2, size=10, transform=plt.gcf().transFigure)
legend1 = "{:,} ({:.3f})".format(numpy.bincount(data2)[1], float(numpy.bincount(data2)[1]) / len(data2))
fig.text(0.32, 0.09, legend1, size=10, transform=plt.gcf().transFigure)
fig2.text(0.32, 0.09, legend1, size=10, transform=plt.gcf().transFigure)
legend3 = "{:,} ({:.3f})".format(numpy.bincount(data2)[1], float(numpy.bincount(data2)[1]) / sum(integers2))
fig.text(0.45, 0.09, legend3, size=10, transform=plt.gcf().transFigure)
fig2.text(0.45, 0.09, legend3, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,} ({:.3f})".format(len(integers2[integers2 > 20]),
float(len(integers2[integers2 > 20])) / len(integers2))
fig.text(0.58, 0.09, legend4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.58, 0.09, legend4, size=10, transform=plt.gcf().transFigure)
legend5 = "{:,} ({:.3f})".format(sum(integers2[integers2 > 20]),
float(sum(integers2[integers2 > 20])) / sum(integers2))
fig.text(0.70, 0.09, legend5, size=10, transform=plt.gcf().transFigure)
fig2.text(0.70, 0.09, legend5, size=10, transform=plt.gcf().transFigure)
legend6 = "{:,}".format(len(data2))
fig.text(0.82, 0.09, legend6, size=10, transform=plt.gcf().transFigure)
fig2.text(0.82, 0.09, legend6, size=10, transform=plt.gcf().transFigure)
legend6b = "{:,}".format(sum(integers2))
fig.text(0.89, 0.09, legend6b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.89, 0.09, legend6b, size=10, transform=plt.gcf().transFigure)
if thirdFile is not None:
file3 = readFileReferenceFree(thirdFile)
integers3 = numpy.array(file3[:, 0]).astype(int) # keep original family sizes
list_to_plot_original.append(integers3)
colors.append("#DF0101")
data3 = numpy.clip(integers3, bins[0], bins[-1])
list_to_plot.append(data3)
name3 = name3.split(".tabular")[0]
if len(name3) > 40:
name3 = name3[:40]
label.append(name3)
data_array_list.append(file3)
fig.text(0.05, 0.07, name3, size=10, transform=plt.gcf().transFigure)
fig2.text(0.05, 0.07, name3, size=10, transform=plt.gcf().transFigure)
legend1 = "{:,} ({:.3f})".format(numpy.bincount(data3)[1], float(numpy.bincount(data3)[1]) / len(data3))
fig.text(0.32, 0.07, legend1, size=10, transform=plt.gcf().transFigure)
fig2.text(0.32, 0.07, legend1, size=10, transform=plt.gcf().transFigure)
legend3b = "{:,} ({:.3f})".format(numpy.bincount(data3)[1],
float(numpy.bincount(data3)[1]) / sum(integers3))
fig.text(0.45, 0.07, legend3b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.45, 0.07, legend3b, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,} ({:.3f})".format(len(integers3[integers3 > 20]),
float(len(integers3[integers3 > 20])) / len(integers3))
fig.text(0.58, 0.07, legend4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.58, 0.07, legend4, size=10, transform=plt.gcf().transFigure)
legend5 = "{:,} ({:.3f})".format(sum(integers3[integers3 > 20]),
float(sum(integers3[integers3 > 20])) / sum(integers3))
fig.text(0.70, 0.07, legend5, size=10, transform=plt.gcf().transFigure)
fig2.text(0.70, 0.07, legend5, size=10, transform=plt.gcf().transFigure)
legend6 = "{:,}".format(len(data3))
fig.text(0.82, 0.07, legend6, size=10, transform=plt.gcf().transFigure)
fig2.text(0.82, 0.07, legend6, size=10, transform=plt.gcf().transFigure)
legend6b = "{:,}".format(sum(integers3))
fig.text(0.89, 0.07, legend6b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.89, 0.07, legend6b, size=10, transform=plt.gcf().transFigure)
if fourthFile is not None:
file4 = readFileReferenceFree(fourthFile)
integers4 = numpy.array(file4[:, 0]).astype(int) # keep original family sizes
list_to_plot_original.append(integers4)
colors.append("#04cec7")
data4 = numpy.clip(integers4, bins[0], bins[-1])
list_to_plot.append(data4)
name4 = name4.split(".tabular")[0]
if len(name4) > 40:
name4 = name4[:40]
label.append(name4)
data_array_list.append(file4)
fig.text(0.05, 0.05, name4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.05, 0.05, name4, size=10, transform=plt.gcf().transFigure)
legend1 = "{:,} ({:.3f})".format(numpy.bincount(data4)[1], float(numpy.bincount(data4)[1]) / len(data4))
fig.text(0.32, 0.05, legend1, size=10, transform=plt.gcf().transFigure)
fig2.text(0.32, 0.05, legend1, size=10, transform=plt.gcf().transFigure)
legend3b = "{:,} ({:.3f})".format(numpy.bincount(data4)[1],
float(numpy.bincount(data4)[1]) / sum(integers4))
fig.text(0.45, 0.05, legend3b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.45, 0.05, legend3b, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,} ({:.3f})".format(len(integers4[integers4 > 20]),
float(len(integers4[integers4 > 20])) / len(integers4))
fig.text(0.58, 0.05, legend4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.58, 0.05, legend4, size=10, transform=plt.gcf().transFigure)
legend5 = "{:,} ({:.3f})".format(sum(integers4[integers4 > 20]),
float(sum(integers4[integers4 > 20])) / sum(integers4))
fig.text(0.70, 0.05, legend5, size=10, transform=plt.gcf().transFigure)
fig2.text(0.70, 0.05, legend5, size=10, transform=plt.gcf().transFigure)
legend6 = "{:,}".format(len(data4))
fig.text(0.82, 0.05, legend6, size=10, transform=plt.gcf().transFigure)
fig2.text(0.82, 0.05, legend6, size=10, transform=plt.gcf().transFigure)
legend6b = "{:,}".format(sum(integers4))
fig.text(0.89, 0.05, legend6b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.89, 0.05, legend6b, size=10, transform=plt.gcf().transFigure)
list_to_plot2 = list_to_plot
if rel_freq:
ylab = "Relative Frequency"
else:
ylab = "Absolute Frequency"
# PLOT FSD based on tags
fig.suptitle('Family Size Distribution (FSD) based on families', fontsize=14)
ax = fig.add_subplot(1, 1, 1)
ticks = numpy.arange(1, 22, 1)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
ax.set_xticks([], [])
if rel_freq:
w = [numpy.zeros_like(data) + 1. / len(data) for data in list_to_plot2]
counts = ax.hist(list_to_plot2, weights=w, bins=numpy.arange(1, 23), stacked=False, edgecolor="black", color=colors, linewidth=1, label=label, align="left", alpha=0.7, rwidth=0.8)
ax.set_ylim(0, 1.07)
else:
counts = ax.hist(list_to_plot2, bins=numpy.arange(1, 23), stacked=False, edgecolor="black", linewidth=1, label=label, align="left", alpha=0.7, rwidth=0.8, color=colors)
ax.set_xticks(numpy.array(ticks))
ax.set_xticklabels(ticks1)
ax.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(0.9, 1))
ax.set_ylabel(ylab, fontsize=14)
ax.set_xlabel("Family size", fontsize=14)
if log_axis:
ax.set_yscale('log')
ax.grid(b=True, which="major", color="#424242", linestyle=":")
ax.margins(0.01, None)
pdf.savefig(fig)
# PLOT FSD based on PE reads
fig2.suptitle('Family Size Distribution (FSD) based on PE reads', fontsize=14)
ax2 = fig2.add_subplot(1, 1, 1)
ticks = numpy.arange(1, 22)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
reads = []
reads_rel = []
barWidth = 0 - (len(list_to_plot) + 1) / 2 * 1. / (len(list_to_plot) + 1)
ax2.set_xticks([], [])
for i in range(len(list_to_plot2)):
x = list(numpy.arange(1, 22).astype(float))
unique, c = numpy.unique(list_to_plot2[i], return_counts=True)
y = unique * c
if sum(list_to_plot_original[i] > 20) > 0:
y[len(y) - 1] = sum(list_to_plot_original[i][list_to_plot_original[i] > 20])
y = [y[x[idx] == unique][0] if x[idx] in unique else 0 for idx in range(len(x))]
reads.append(y)
reads_rel.append(list(numpy.float_(y)) / sum(y))
if len(list_to_plot2) == 1:
x = [xi * 0.5 for xi in x]
w = 0.4
else:
x = [xi + barWidth for xi in x]
w = 1. / (len(list_to_plot) + 1)
if rel_freq:
ax2.bar(x, list(numpy.float_(y)) / numpy.sum(y), align="edge", width=w, edgecolor="black", label=label[i], linewidth=1, alpha=0.7, color=colors[i])
ax2.set_ylim(0, 1.07)
else:
ax2.bar(x, y, align="edge", width=w, edgecolor="black", label=label[i], linewidth=1, alpha=0.7, color=colors[i])
if i == len(list_to_plot2) - 1:
barWidth += 1. / (len(list_to_plot) + 1) + 1. / (len(list_to_plot) + 1)
else:
barWidth += 1. / (len(list_to_plot) + 1)
ax2.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(0.9, 1))
if len(list_to_plot2) == 1:
ax2.set_xticks(numpy.array([xi + 0.2 for xi in x]))
else:
ax2.set_xticks(numpy.array(ticks))
ax2.set_xticklabels(ticks1)
ax2.set_xlabel("Family size", fontsize=14)
ax2.set_ylabel(ylab, fontsize=14)
if log_axis:
ax2.set_yscale('log')
ax2.grid(b=True, which="major", color="#424242", linestyle=":")
ax2.margins(0.01, None)
pdf.savefig(fig2)
plt.close()
# write data to CSV file tags
counts = [numpy.bincount(di, minlength=22)[1:] for di in list_to_plot2] # original counts of family sizes
output_file.write("Values from family size distribution with all datasets based on families\n")
output_file.write("\nFamily size")
for i in label:
output_file.write("{}{}".format(sep, i))
output_file.write("\n")
j = 0
for fs in bins:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
for n in range(len(label)):
output_file.write("{}{}".format(int(counts[n][j]), sep))
output_file.write("\n")
j += 1
output_file.write("sum{}".format(sep))
for i in counts:
output_file.write("{}{}".format(int(sum(i)), sep))
# write data to CSV file PE reads
output_file.write("\n\nValues from family size distribution with all datasets based on PE reads\n")
output_file.write("\nFamily size")
for i in label:
output_file.write("{}{}".format(sep, i))
output_file.write("\n")
j = 0
for fs in bins:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
if len(label) == 1:
output_file.write("{}{}".format(int(reads[0][j]), sep))
else:
for n in range(len(label)):
output_file.write("{}{}".format(int(reads[n][j]), sep))
output_file.write("\n")
j += 1
output_file.write("sum{}".format(sep))
if len(label) == 1:
output_file.write("{}{}".format(int(sum(numpy.concatenate(reads))), sep))
else:
for i in reads:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("\n")
# Family size distribution after DCS and SSCS
for dataset, data_o, name_file in zip(list_to_plot, data_array_list, label):
tags = numpy.array(data_o[:, 2])
seq = numpy.array(data_o[:, 1])
data = numpy.array(dataset)
data_o = numpy.array(data_o[:, 0]).astype(int)
# find all unique tags and get the indices for ALL tags, but only once
u, index_unique, c = numpy.unique(numpy.array(seq), return_counts=True, return_index=True)
d = u[c > 1]
# get family sizes, tag for duplicates
duplTags_double = data[numpy.in1d(seq, d)]
duplTags_double_o = data_o[numpy.in1d(seq, d)]
duplTags = duplTags_double[0::2] # ab of DCS
duplTags_o = duplTags_double_o[0::2] # ab of DCS
duplTagsBA = duplTags_double[1::2] # ba of DCS
duplTagsBA_o = duplTags_double_o[1::2] # ba of DCS
# get family sizes for SSCS with no partner
ab = numpy.where(tags == "ab")[0]
abSeq = seq[ab]
ab_o = data_o[ab]
ab = data[ab]
ba = numpy.where(tags == "ba")[0]
baSeq = seq[ba]
ba_o = data_o[ba]
ba = data[ba]
dataAB = ab[numpy.in1d(abSeq, d, invert=True)]
dataAB_o = ab_o[numpy.in1d(abSeq, d, invert=True)]
dataBA = ba[numpy.in1d(baSeq, d, invert=True)]
dataBA_o = ba_o[numpy.in1d(baSeq, d, invert=True)]
list1 = [duplTags_double, dataAB, dataBA] # list for plotting
list1_o = [duplTags_double_o, dataAB_o, dataBA_o] # list for plotting
# information for family size >= 3
dataAB_FS3 = dataAB[dataAB >= 3]
dataAB_FS3_o = dataAB_o[dataAB_o >= 3]
dataBA_FS3 = dataBA[dataBA >= 3]
dataBA_FS3_o = dataBA_o[dataBA_o >= 3]
duplTags_FS3 = duplTags[(duplTags >= 3) & (duplTagsBA >= 3)] # ab+ba with FS>=3
duplTags_FS3_BA = duplTagsBA[(duplTags >= 3) & (duplTagsBA >= 3)] # ba+ab with FS>=3
duplTags_double_FS3 = len(duplTags_FS3) + len(duplTags_FS3_BA) # both ab and ba strands with FS>=3
# original FS
duplTags_FS3_o = duplTags_o[(duplTags_o >= 3) & (duplTagsBA_o >= 3)] # ab+ba with FS>=3
duplTags_FS3_BA_o = duplTagsBA_o[(duplTags_o >= 3) & (duplTagsBA_o >= 3)] # ba+ab with FS>=3
duplTags_double_FS3_o = sum(duplTags_FS3_o) + sum(duplTags_FS3_BA_o) # both ab and ba strands with FS>=3
fig = plt.figure()
plt.subplots_adjust(left=0.12, right=0.97, bottom=0.3, top=0.94, hspace=0)
if rel_freq:
w = [numpy.zeros_like(dj) + 1. / len(numpy.concatenate(list1)) for dj in list1]
plt.hist(list1, bins=numpy.arange(1, 23), stacked=True, label=["duplex", "ab", "ba"], weights=w, edgecolor="black", linewidth=1, align="left", color=["#FF0000", "#5FB404", "#FFBF00"], rwidth=0.8)
plt.ylim(0, 1.07)
else:
plt.hist(list1, bins=numpy.arange(1, 23), stacked=True, label=["duplex", "ab", "ba"], edgecolor="black", linewidth=1, align="left", color=["#FF0000", "#5FB404", "#FFBF00"], rwidth=0.8)
# tick labels of x axis
ticks = numpy.arange(1, 22, 1)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
plt.xticks(numpy.array(ticks), ticks1)
singl = len(data_o[data_o == 1])
last = len(data_o[data_o > 20]) # large families
if log_axis:
plt.yscale('log')
plt.legend(loc='upper right', fontsize=14, bbox_to_anchor=(0.9, 1), frameon=True)
plt.title("{}: FSD based on families".format(name_file), fontsize=14)
plt.xlabel("Family size", fontsize=14)
plt.ylabel(ylab, fontsize=14)
plt.margins(0.01, None)
plt.grid(b=True, which="major", color="#424242", linestyle=":")
# extra information beneath the plot
legend = "SSCS ab= \nSSCS ba= \nDCS (total)= \ntotal nr. of tags="
plt.text(0.1, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "nr. of tags\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(len(dataAB), len(dataBA),
len(duplTags), len(duplTags_double), (len(dataAB) + len(dataBA) + len(duplTags)),
(len(ab) + len(ba)))
plt.text(0.23, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend5 = "PE reads\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(sum(dataAB_o), sum(dataBA_o),
sum(duplTags_o), sum(duplTags_double_o),
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
(sum(ab_o) + sum(ba_o)))
plt.text(0.38, 0.09, legend5, size=10, transform=plt.gcf().transFigure)
legend = "rel. freq. of tags\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format(
float(len(dataAB)) / (len(dataAB) + len(dataBA) + len(duplTags)),
float(len(dataBA)) / (len(dataAB) + len(dataBA) + len(duplTags)),
float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)),
(len(dataAB) + len(dataBA) + len(duplTags)))
plt.text(0.54, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(float(len(dataAB)) / (len(ab) + len(ba)),
float(len(dataBA)) / (len(ab) + len(ba)),
float(len(duplTags)) / (len(ab) + len(ba)),
float(len(duplTags_double)) / (len(ab) + len(ba)),
(len(ab) + len(ba)))
plt.text(0.64, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend1 = "\nsingletons:\nfamily size > 20:"
plt.text(0.1, 0.03, legend1, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,}\n{:,}".format(singl, last)
plt.text(0.23, 0.03, legend4, size=10, transform=plt.gcf().transFigure)
legend3 = "{:.3f}\n{:.3f}".format(float(singl) / len(data), float(last) / len(data))
plt.text(0.64, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend3 = "\n\n{:,}".format(sum(data_o[data_o > 20]))
plt.text(0.38, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend3 = "{:.3f}\n{:.3f}".format(float(singl) / sum(data_o), float(sum(data_o[data_o > 20])) / sum(data_o))
plt.text(0.84, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend = "PE reads\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format(
float(sum(dataAB_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
float(sum(dataBA_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
float(sum(duplTags_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)))
plt.text(0.74, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(
float(sum(dataAB_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(dataBA_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_double_o)) / (sum(ab_o) + sum(ba_o)), (sum(ab_o) + sum(ba_o)))
plt.text(0.84, 0.09, legend, size=10, transform=plt.gcf().transFigure)
pdf.savefig(fig)
plt.close()
# PLOT FSD based on PE reads
fig3 = plt.figure()
plt.subplots_adjust(left=0.12, right=0.97, bottom=0.3, top=0.94, hspace=0)
fig3.suptitle("{}: FSD based on PE reads".format(name_file), fontsize=14)
ax2 = fig3.add_subplot(1, 1, 1)
ticks = numpy.arange(1, 22)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
reads = []
reads_rel = []
# barWidth = 0 - (len(list_to_plot) + 1) / 2 * 1. / (len(list_to_plot) + 1)
ax2.set_xticks([], [])
list_y = []
label = ["duplex", "ab", "ba"]
col = ["#FF0000", "#5FB404", "#FFBF00"]
for i in range(len(list1)):
x = list(numpy.arange(1, 22).astype(float))
unique, c = numpy.unique(list1[i], return_counts=True)
y = unique * c
if sum(list1_o[i] > 20) > 0:
y[len(y) - 1] = sum(list1_o[i][list1_o[i] > 20])
y = [y[x[idx] == unique][0] if x[idx] in unique else 0 for idx in range(len(x))]
reads.append(y)
reads_rel.append(list(numpy.float_(y)) / sum(numpy.concatenate(list1_o)))
if rel_freq:
y = list(numpy.float_(y)) / sum(numpy.concatenate(list1_o))
ax2.set_ylim(0, 1.07)
else:
y = y
list_y.append(y)
if i == 0:
ax2.bar(x, y, align="center", width=0.8, edgecolor="black", label=label[0], linewidth=1, alpha=1, color=col[0])
elif i == 1:
ax2.bar(x, y, bottom=list_y[i - 1], align="center", width=0.8, edgecolor="black", label=label[1], linewidth=1, alpha=1, color=col[1])
elif i == 2:
bars = numpy.add(list_y[0], list_y[1]).tolist()
ax2.bar(x, y, bottom=bars, align="center", width=0.8, edgecolor="black", label=label[2], linewidth=1, alpha=1, color=col[2])
ax2.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(0.9, 1))
singl = len(data_o[data_o == 1])
last = len(data_o[data_o > 20]) # large families
ax2.set_xticks(numpy.array(ticks))
ax2.set_xticklabels(ticks1)
ax2.set_xlabel("Family size", fontsize=14)
ax2.set_ylabel(ylab, fontsize=14)
if log_axis:
ax2.set_yscale('log')
ax2.grid(b=True, which="major", color="#424242", linestyle=":")
ax2.margins(0.01, None)
# extra information beneath the plot
legend = "SSCS ab= \nSSCS ba= \nDCS (total)= \ntotal nr. of tags="
plt.text(0.1, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "nr. of tags\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(len(dataAB), len(dataBA),
len(duplTags), len(duplTags_double), (len(dataAB) + len(dataBA) + len(duplTags)),
(len(ab) + len(ba)))
plt.text(0.23, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend5 = "PE reads\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(sum(dataAB_o), sum(dataBA_o),
sum(duplTags_o), sum(duplTags_double_o),
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
(sum(ab_o) + sum(ba_o)))
plt.text(0.38, 0.09, legend5, size=10, transform=plt.gcf().transFigure)
legend = "rel. freq. of tags\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format(
float(len(dataAB)) / (len(dataAB) + len(dataBA) + len(duplTags)),
float(len(dataBA)) / (len(dataAB) + len(dataBA) + len(duplTags)),
float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)),
(len(dataAB) + len(dataBA) + len(duplTags)))
plt.text(0.54, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(float(len(dataAB)) / (len(ab) + len(ba)),
float(len(dataBA)) / (len(ab) + len(ba)),
float(len(duplTags)) / (len(ab) + len(ba)),
float(len(duplTags_double)) / (len(ab) + len(ba)),
(len(ab) + len(ba)))
plt.text(0.64, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend1 = "\nsingletons:\nfamily size > 20:"
plt.text(0.1, 0.03, legend1, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,}\n{:,}".format(singl, last)
plt.text(0.23, 0.03, legend4, size=10, transform=plt.gcf().transFigure)
legend3 = "{:.3f}\n{:.3f}".format(float(singl) / len(data), float(last) / len(data))
plt.text(0.64, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend3 = "\n\n{:,}".format(sum(data_o[data_o > 20]))
plt.text(0.38, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend3 = "{:.3f}\n{:.3f}".format(float(singl) / sum(data_o), float(sum(data_o[data_o > 20])) / sum(data_o))
plt.text(0.84, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend = "PE reads\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format(
float(sum(dataAB_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
float(sum(dataBA_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
float(sum(duplTags_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)))
plt.text(0.74, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(
float(sum(dataAB_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(dataBA_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_double_o)) / (sum(ab_o) + sum(ba_o)), (sum(ab_o) + sum(ba_o)))
plt.text(0.84, 0.09, legend, size=10, transform=plt.gcf().transFigure)
pdf.savefig(fig3)
plt.close()
# write same information to a csv file
count = numpy.bincount(data_o) # original counts of family sizes
output_file.write("\nDataset:{}{}\n".format(sep, name_file))
output_file.write("max. family size:{}{}\n".format(sep, max(data_o)))
output_file.write("absolute frequency:{}{}\n".format(sep, count[len(count) - 1]))
output_file.write("relative frequency:{}{:.3f}\n\n".format(sep, float(count[len(count) - 1]) / sum(count)))
output_file.write("median family size:{}{}\n".format(sep, numpy.median(numpy.array(data_o))))
output_file.write("mean family size:{}{}\n\n".format(sep, numpy.mean(numpy.array(data_o))))
output_file.write(
"{}singletons:{}{}{}family size > 20:{}{}{}{}length of dataset:\n".format(sep, sep, sep, sep, sep, sep,
sep, sep))
output_file.write(
"{}nr. of tags{}rel. freq of tags{}rel.freq of PE reads{}nr. of tags{}rel. freq of tags{}nr. of PE reads{}rel. freq of PE reads{}total nr. of tags{}total nr. of PE reads\n".format(
sep, sep, sep, sep, sep, sep, sep, sep, sep))
output_file.write("{}{}{}{}{:.3f}{}{:.3f}{}{}{}{:.3f}{}{}{}{:.3f}{}{}{}{}\n\n".format(
name_file, sep, singl, sep, float(singl) / len(data), sep, float(singl) / sum(data_o), sep,
last, sep, float(last) / len(data), sep, sum(data_o[data_o > 20]), sep, float(sum(data_o[data_o > 20])) / sum(data_o), sep, len(data),
sep, sum(data_o)))
# information for FS >= 1
output_file.write(
"The unique frequencies were calculated from the dataset where the tags occured only once (=ab without DCS, ba without DCS)\n"
"Whereas the total frequencies were calculated from the whole dataset (=including the DCS).\n\n")
output_file.write(
"FS >= 1{}nr. of tags{}nr. of PE reads{}rel. freq of tags{}{}rel. freq of PE reads:\n".format(sep, sep,
sep, sep,
sep))
output_file.write("{}{}{}unique:{}total{}unique{}total:\n".format(sep, sep, sep, sep, sep, sep))
output_file.write("SSCS ab{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format(
sep, len(dataAB), sep, sum(dataAB_o), sep,
float(len(dataAB)) / (len(dataAB) + len(dataBA) + len(duplTags)),
sep, float(len(dataAB)) / (len(ab) + len(ba)), sep, float(sum(dataAB_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
sep, float(sum(dataAB_o)) / (sum(ab_o) + sum(ba_o))))
output_file.write("SSCS ba{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format(
sep, len(dataBA), sep, sum(dataBA_o), sep,
float(len(dataBA)) / (len(dataAB) + len(dataBA) + len(duplTags)),
sep, float(len(dataBA)) / (len(ab) + len(ba)), sep,
float(sum(dataBA_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
sep, float(sum(dataBA_o)) / (sum(ab_o) + sum(ba_o))))
output_file.write(
"DCS (total){}{} ({}){}{} ({}){}{:.3f}{}{:.3f} ({:.3f}){}{:.3f}{}{:.3f} ({:.3f})\n".format(
sep, len(duplTags), len(duplTags_double), sep, sum(duplTags_o), sum(duplTags_double_o), sep,
float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)), sep,
float(len(duplTags)) / (len(ab) + len(ba)), float(len(duplTags_double)) / (len(ab) + len(ba)), sep,
float(sum(duplTags_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep,
float(sum(duplTags_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_double_o)) / (sum(ab_o) + sum(ba_o))))
output_file.write("total nr. of tags{}{}{}{}{}{}{}{}{}{}{}{}\n".format(
sep, (len(dataAB) + len(dataBA) + len(duplTags)), sep,
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep,
(len(dataAB) + len(dataBA) + len(duplTags)), sep, (len(ab) + len(ba)), sep,
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep, (sum(ab_o) + sum(ba_o))))
# information for FS >= 3
output_file.write(
"\nFS >= 3{}nr. of tags{}nr. of PE reads{}rel. freq of tags{}{}rel. freq of PE reads:\n".format(sep,
sep,
sep,
sep,
sep))
output_file.write("{}{}{}unique:{}total{}unique{}total:\n".format(sep, sep, sep, sep, sep, sep))
output_file.write("SSCS ab{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format(
sep, len(dataAB_FS3), sep, sum(dataAB_FS3_o), sep,
float(len(dataAB_FS3)) / (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep,
float(len(dataAB_FS3)) / (len(dataBA_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
sep, float(sum(dataAB_FS3_o)) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)),
sep, float(sum(dataAB_FS3_o)) / (sum(dataBA_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o)))
output_file.write("SSCS ba{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format(
sep, len(dataBA_FS3), sep, sum(dataBA_FS3_o), sep,
float(len(dataBA_FS3)) / (len(dataBA_FS3) + len(dataBA_FS3) + len(duplTags_FS3)),
sep, float(len(dataBA_FS3)) / (len(dataBA_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
sep, float(sum(dataBA_FS3_o)) / (sum(dataBA_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)),
sep, float(sum(dataBA_FS3_o)) / (sum(dataBA_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o)))
output_file.write(
"DCS (total){}{} ({}){}{} ({}){}{:.3f}{}{:.3f} ({:.3f}){}{:.3f}{}{:.3f} ({:.3f})\n".format(
sep, len(duplTags_FS3), duplTags_double_FS3, sep, sum(duplTags_FS3_o), duplTags_double_FS3_o, sep,
float(len(duplTags_FS3)) / (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep,
float(len(duplTags_FS3)) / (len(dataAB_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
float(duplTags_double_FS3) / (len(dataAB_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
sep, float(sum(duplTags_FS3_o)) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)),
sep,
float(sum(duplTags_FS3_o)) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o),
float(duplTags_double_FS3_o) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o)))
output_file.write("total nr. of tags{}{}{}{}{}{}{}{}{}{}{}{}\n".format(
sep, (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep,
(sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)),
sep, (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep,
(len(dataAB_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
sep, (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)), sep,
(sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o)))
counts = [numpy.bincount(dk, minlength=22)[1:] for dk in list1] # original counts of family sizes
output_file.write("\nValues from family size distribution based on families\n")
output_file.write("{}duplex{}ab{}ba{}sum\n".format(sep, sep, sep, sep))
j = 0
for fs in bins:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
for n in range(3):
output_file.write("{}{}".format(int(counts[n][j]), sep))
output_file.write("{}\n".format(counts[0][j] + counts[1][j] + counts[2][j]))
j += 1
output_file.write("sum{}".format(sep))
for i in counts:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("{}\n".format(sum(counts[0] + counts[1] + counts[2])))
output_file.write("\nValues from family size distribution based on PE reads\n")
output_file.write("{}duplex{}ab{}ba{}sum\n".format(sep, sep, sep, sep))
j = 0
for fs in bins:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
for n in range(3):
output_file.write("{}{}".format(int(reads[n][j]), sep))
output_file.write("{}\n".format(reads[0][j] + reads[1][j] + reads[2][j]))
j += 1
output_file.write("sum{}".format(sep))
for i in reads:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("{}\n".format(sum(reads[0] + reads[1] + reads[2])))
print("Files successfully created!")
if __name__ == '__main__':
sys.exit(compare_read_families(sys.argv))
| mit |
alex-pirozhenko/sklearn-pmml | sklearn_pmml/test/__init__.py | 5 | 3227 | import os
from unittest import TestCase
from sklearn.base import BaseEstimator
try:
import cPickle as pickle
except:
import pickle
from sklearn_pmml.convert import *
from sklearn_pmml import pmml
class TestSerializationMeta(type):
TEST_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(TEST_DIR, 'data')
ESTIMATOR_FILE_NAME = 'estimator.pkl'
PMML_FILE_NAME = 'document.pmml'
CONTEXT_FILE_NAME = 'context.pkl'
def __new__(mcs, name, bases, d):
"""
This method overrides default behaviour for creation of new instances. For every directory abc in data it
creates a method called test_abc, with the body of load_and_compare function.
"""
def gen_test(suite_name):
def load_and_compare(self):
# load the context.pkl, document.pmml and estimator.pkl
suite_path = os.path.join(mcs.DATA_DIR, suite_name)
content = os.listdir(suite_path)
assert len(content) == 3, 'There should be exactly two files in the suite directory'
assert mcs.ESTIMATOR_FILE_NAME in content, 'Estimator should be stored in {} file'.format(mcs.ESTIMATOR_FILE_NAME)
assert mcs.PMML_FILE_NAME in content, 'PMML should be stored in {} file'.format(mcs.PMML_FILE_NAME)
assert mcs.CONTEXT_FILE_NAME in content, 'Context should be stored in {} file'.format(mcs.CONTEXT_FILE_NAME)
with open(os.path.join(suite_path, mcs.ESTIMATOR_FILE_NAME), 'r') as est_file:
est = pickle.load(est_file)
assert isinstance(est, BaseEstimator), '{} should be a trained estimator'.format(mcs.ESTIMATOR_FILE_NAME)
with open(os.path.join(suite_path, mcs.CONTEXT_FILE_NAME), 'r') as ctx_file:
ctx = pickle.load(ctx_file)
assert isinstance(ctx, TransformationContext), '{} should be a transformation context'.format(mcs.CONTEXT_FILE_NAME)
converter = find_converter(est)
assert converter is not None, 'Can not find converter for {}'.format(est)
transformed_pmml = converter(est, ctx).pmml()
with open(os.path.join(suite_path, mcs.PMML_FILE_NAME), 'r') as pmml_file:
loaded_pmml = pmml.CreateFromDocument('\n'.join(pmml_file.readlines()))
self.maxDiff = None
# make sure that the expected PMML matches the produced one
self.assertEquals(loaded_pmml.toDOM().toprettyxml(), transformed_pmml.toDOM().toprettyxml())
return load_and_compare
# for every batch in the data dir create a corresponding test method
for case in os.listdir(TestSerializationMeta.DATA_DIR):
test_name = 'test_{}'.format(case)
d[test_name] = gen_test(case)
return type.__new__(mcs, name, bases, d)
class TestSerialization(TestCase):
"""
This is an automated tester for serializers. It uses a custom metaclass to define the test cases based on the
content of the data directory. For the logic behind every check see load_and_compare method above.
"""
__metaclass__ = TestSerializationMeta
| mit |
subutai/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_macosx.py | 69 | 15397 | from __future__ import division
import os
import numpy
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2
from matplotlib.cbook import maxdict
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.mathtext import MathTextParser
from matplotlib.colors import colorConverter
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
def show():
"""Show all the figures and enter the Cocoa mainloop.
This function will not return until all windows are closed or
the interpreter exits."""
# Having a Python-level function "show" wrapping the built-in
# function "show" in the _macosx extension module allows us to
# to add attributes to "show". This is something ipython does.
_macosx.show()
class RendererMac(RendererBase):
"""
The renderer handles drawing/rendering operations. Most of the renderer's
methods forwards the command to the renderer's graphics context. The
renderer does not wrap a C object and is written in pure Python.
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self.gc = GraphicsContextMac()
self.mathtext_parser = MathTextParser('MacOSX')
def set_width_height (self, width, height):
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_path(path, transform, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_markers(marker_path, marker_trans, path, trans, rgbFace)
def draw_path_collection(self, *args):
gc = self.gc
args = args[:13]
gc.draw_path_collection(*args)
def draw_quad_mesh(self, *args):
gc = self.gc
gc.draw_quad_mesh(*args)
def new_gc(self):
self.gc.reset()
return self.gc
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
im.flipud_out()
nrows, ncols, data = im.as_rgba_str()
self.gc.draw_image(x, y, nrows, ncols, data, bbox, clippath, clippath_trans)
im.flipud_out()
def draw_tex(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = numpy.array(255.0 - Z * 255.0, numpy.uint8)
gc.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
size = prop.get_size_in_points()
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
gc.draw_mathtext(x, y, angle, 255 - image.as_array())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
gc.draw_text(x, y, unicode(s), family, size, weight, style, angle)
def get_text_width_height_descent(self, s, prop, ismath):
if ismath=='TeX':
# TODO: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: handle descent; This is based on backend_agg.py
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
return self.gc.get_text_width_height_descent(unicode(s), family, size, weight, style)
def flipy(self):
return False
def points_to_pixels(self, points):
return points/72.0 * self.dpi
def option_image_nocomposite(self):
return True
class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase):
"""
The GraphicsContext wraps a Quartz graphics context. All methods
are implemented at the C-level in macosx.GraphicsContext. These
methods set drawing properties such as the line style, fill color,
etc. The actual drawing is done by the Renderer, which draws into
the GraphicsContext.
"""
def __init__(self):
GraphicsContextBase.__init__(self)
_macosx.GraphicsContext.__init__(self)
def set_foreground(self, fg, isRGB=False):
if not isRGB:
fg = colorConverter.to_rgb(fg)
_macosx.GraphicsContext.set_foreground(self, fg)
def set_clip_rectangle(self, box):
GraphicsContextBase.set_clip_rectangle(self, box)
if not box: return
_macosx.GraphicsContext.set_clip_rectangle(self, box.bounds)
def set_clip_path(self, path):
GraphicsContextBase.set_clip_path(self, path)
if not path: return
path = path.get_fully_transformed_path()
_macosx.GraphicsContext.set_clip_path(self, path)
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
self.renderer = RendererMac(figure.dpi, width, height)
_macosx.FigureCanvas.__init__(self, width, height)
def resize(self, width, height):
self.renderer.set_width_height(width, height)
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width, height)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', **kwargs):
if dpi is None: dpi = matplotlib.rcParams['savefig.dpi']
filename = unicode(filename)
root, ext = os.path.splitext(filename)
ext = ext[1:].lower()
if not ext:
ext = "png"
filename = root + "." + ext
if ext=="jpg": ext = "jpeg"
# save the figure settings
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
# set the new parameters
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
if ext in ('jpeg', 'png', 'tiff', 'gif', 'bmp'):
width, height = self.figure.get_size_inches()
width, height = width*dpi, height*dpi
self.write_bitmap(filename, width, height)
elif ext == 'pdf':
self.write_pdf(filename)
elif ext in ('ps', 'eps'):
from backend_ps import FigureCanvasPS
# Postscript backend changes figure.dpi, but doesn't change it back
origDPI = self.figure.dpi
fc = self.switch_backends(FigureCanvasPS)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.dpi = origDPI
self.figure.set_canvas(self)
elif ext=='svg':
from backend_svg import FigureCanvasSVG
fc = self.switch_backends(FigureCanvasSVG)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.set_canvas(self)
else:
raise ValueError("Figure format not available (extension %s)" % ext)
# restore original figure settings
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbarMac(canvas)
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# This is ugly, but this is what tkagg and gtk are doing.
# It is needed to get ginput() working.
self.canvas.figure.show = lambda *args: self.show()
def show(self):
self.canvas.draw()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbarMac(_macosx.NavigationToolbar):
def __init__(self, canvas):
self.canvas = canvas
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
images = {}
for imagename in ("stock_left",
"stock_right",
"stock_up",
"stock_down",
"stock_zoom-in",
"stock_zoom-out",
"stock_save_as"):
filename = os.path.join(basedir, imagename+".ppm")
images[imagename] = self._read_ppm_image(filename)
_macosx.NavigationToolbar.__init__(self, images)
self.message = None
def _read_ppm_image(self, filename):
data = ""
imagefile = open(filename)
for line in imagefile:
if "#" in line:
i = line.index("#")
line = line[:i] + "\n"
data += line
imagefile.close()
magic, width, height, maxcolor, imagedata = data.split(None, 4)
width, height = int(width), int(height)
assert magic=="P6"
assert len(imagedata)==width*height*3 # 3 colors in RGB
return (width, height, imagedata)
def panx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.pan(direction)
self.canvas.invalidate()
def pany(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.pan(direction)
self.canvas.invalidate()
def zoomx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.zoom(direction)
self.canvas.invalidate()
def zoomy(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.zoom(direction)
self.canvas.invalidate()
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(x0, y0, x1, y1)
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerMac
| agpl-3.0 |
ocefpaf/ulmo | ulmo/ncdc/cirs/core.py | 1 | 13471 | """
ulmo.ncdc.cirs.core
~~~~~~~~~~~~~~~~~~~
This module provides direct access to the `National Climatic Data Center`_
`Climate Index Reference Sequential (CIRS)`_ drought dataset.
.. _National Climatic Data Center: http://www.ncdc.noaa.gov
.. _Climate Index Reference Sequential (CIRS): http://www1.ncdc.noaa.gov/pub/data/cirs/
"""
from builtins import str
from builtins import range
from past.builtins import basestring
import distutils
import os.path
import pandas
from ulmo import util
CIRS_DIR = util.get_ulmo_dir('ncdc/cirs')
NO_DATA_VALUES = {
'cddc': '-9999.',
'hddc': '-9999.',
'pcpn': '-9.99',
'pdsi': '-99.99',
'phdi': '-99.99',
'pmdi': '-99.99',
'sp01': '-99.99',
'sp02': '-99.99',
'sp03': '-99.99',
'sp06': '-99.99',
'sp09': '-99.99',
'sp12': '-99.99',
'sp24': '-99.99',
'tmpc': '-99.90',
'zndx': '-99.99',
}
def get_data(elements=None, by_state=False, location_names='abbr', as_dataframe=False, use_file=None):
"""Retrieves data.
Parameters
----------
elements : ``None`, str or list
The element(s) for which to get data for. If ``None`` (default), then
all elements are used. An individual element is a string, but a list or
tuple of them can be used to specify a set of elements. Elements are:
* 'cddc': Cooling Degree Days
* 'hddc': Heating Degree Days
* 'pcpn': Precipitation
* 'pdsi': Palmer Drought Severity Index
* 'phdi': Palmer Hydrological Drought Index
* 'pmdi': Modified Palmer Drought Severity Index
* 'sp01': 1-month Standardized Precipitation Index
* 'sp02': 2-month Standardized Precipitation Index
* 'sp03': 3-month Standardized Precipitation Index
* 'sp06': 6-month Standardized Precipitation Index
* 'sp09': 9-month Standardized Precipitation Index
* 'sp12': 12-month Standardized Precipitation Index
* 'sp24': 24-month Standardized Precipitation Index
* 'tmpc': Temperature
* 'zndx': ZNDX
by_state : bool
If False (default), divisional data will be retrieved. If True, then
regional data will be retrieved.
location_names : str or ``None``
This parameter defines what (if any) type of names will be added to the
values. If set to 'abbr' (default), then abbreviated location names
will be used. If 'full', then full location names will be used. If set
to None, then no location name will be added and the only identifier
will be the location_codes (this is the most memory-conservative
option).
as_dataframe : bool
If ``False`` (default), a list of values dicts is returned. If ``True``,
a dict with element codes mapped to equivalent pandas.DataFrame objects
will be returned. The pandas dataframe is used internally, so setting
this to ``True`` is faster as it skips a somewhat expensive
serialization step.
use_file : ``None``, file-like object or str
If ``None`` (default), then data will be automatically retrieved from
the web. If a file-like object or a file path string, then the file will
be used to read data from. This is intended to be used for reading in
previously-downloaded versions of the dataset.
Returns
-------
data : list or pandas.DataFrame
A list of value dicts or a pandas.DataFrame containing data. See
the ``as_dataframe`` parameter for more.
"""
if isinstance(elements, basestring):
elements = [elements]
elif elements is None:
elements = [
'cddc',
'hddc',
'pcpn',
'pdsi',
'phdi',
'pmdi',
'sp01',
'sp02',
'sp03',
'sp06',
'sp09',
'sp12',
'sp24',
'tmpc',
'zndx',
]
df = None
for element in elements:
element_file = _get_element_file(use_file, element, elements, by_state)
element_df = _get_element_data(element, by_state, element_file, location_names)
keys = ['location_code', 'year', 'month']
for append_key in ['division', 'state', 'state_code']:
if append_key in element_df.columns:
keys.append(append_key)
element_df.set_index(keys, inplace=True)
if df is None:
df = element_df
else:
df = df.join(element_df, how='outer')
df = df.reset_index()
df = _resolve_location_names(df, location_names, by_state)
if as_dataframe:
return df
else:
return list(df.T.to_dict().values())
def _get_element_data(element, by_state, use_file, location_names):
if use_file:
url = None
path = None
else:
url = _get_url(element, by_state)
filename = url.rsplit('/', 1)[-1]
path = os.path.join(CIRS_DIR, filename)
with util.open_file_for_url(url, path, use_file=use_file) as f:
element_df = _parse_values(f, by_state, location_names, element)
return element_df
def _get_element_file(use_file, element, elements, by_state):
if isinstance(use_file, basestring):
if os.path.basename(use_file) == '':
if len(elements) > 1:
assert ValueError(
"'use_file' must be a path to a directory if using "
"'use_file' with multiple elements")
return use_file + _get_filename(element, by_state, os.path.dirname(use_file))
return use_file
def _get_filename(element, by_state, dir_path):
files = os.listdir(dir_path)
return _most_recent(files, element, by_state)
def _get_url(element, by_state):
ftp_dir = "ftp://ftp.ncdc.noaa.gov/pub/data/cirs/climdiv/"
files = util.dir_list(ftp_dir)
most_recent = _most_recent(files, element, by_state)
return ftp_dir + most_recent
def _most_recent(files, element, by_state):
geographic_extent = 'st' if by_state else 'dv'
match_str = 'climdiv-{element}{geographic_extent}'.format(
element=element,
geographic_extent=geographic_extent,
)
matches = [s for s in files if s.startswith(match_str)]
return sorted(matches, key=_file_key)[0]
def _file_key(filename):
version_str = filename.split('-')[2][1:]
return distutils.version.StrictVersion(version_str)
def _parse_values(file_handle, by_state, location_names, element):
if by_state:
id_columns = [
('location_code', 0, 3, None),
#('division', 3, 3, None), # ignored in state files
#('element', 4, 6, None), # element is redundant
('year', 6, 10, None),
]
else:
id_columns = [
('location_code', 0, 2, None),
('division', 2, 4, None),
#('element', 4, 6, None), # element is redundant
('year', 6, 10, None),
]
year_col_end = id_columns[-1][2]
month_columns = [
(str(n), year_col_end - 6 + (7 * n), year_col_end + (7 * n), None)
for n in range(1, 13)
]
columns = id_columns + month_columns
na_values = [NO_DATA_VALUES.get(element)]
parsed = util.parse_fwf(file_handle, columns, na_values=na_values)
month_columns = [id_column[0] for id_column in id_columns]
melted = pandas.melt(parsed, id_vars=month_columns)\
.rename(columns={'variable': 'month'})
melted.month = melted.month.astype(int)
# throw away NaNs
melted = melted[melted['value'].notnull()]
data = melted.rename(columns={
'value': element,
})
return data
def _resolve_location_names(df, location_names, by_state):
if location_names is None:
return df
elif location_names not in ('abbr', 'full'):
raise ValueError("location_names should be set to either None, 'abbr' or 'full'")
else:
locations = _states_regions_dataframe()[location_names]
with_locations = df.join(locations, on='location_code')
if by_state:
return with_locations.rename(columns={
location_names: 'location',
})
else:
return with_locations.rename(columns={
location_names: 'state',
'location_code': 'state_code',
})
def _states_regions_dataframe():
"""returns a dataframe indexed by state/region code with columns for the
name and abbrevitation (abbr) to use
"""
STATES_REGIONS = {
# code: (full name, abbrevation)
1: ("Alabama", "AL"),
2: ("Arizona", "AZ"),
3: ("Arkansas", "AR"),
4: ("California", "CA"),
5: ("Colorado", "CO"),
6: ("Connecticut", "CT"),
7: ("Delaware", "DE"),
8: ("Florida", "FL"),
9: ("Georgia", "GA"),
10: ("Idaho", "ID"),
11: ("Illinois", "IL"),
12: ("Indiana", "IN"),
13: ("Iowa", "IA"),
14: ("Kansas", "KS"),
15: ("Kentucky", "KY"),
16: ("Louisiana", "LA"),
17: ("Maine", "ME"),
18: ("Maryland", "MD"),
19: ("Massachusetts", "MA"),
20: ("Michigan", "MI"),
21: ("Minnesota", "MN"),
22: ("Mississippi", "MS"),
23: ("Missouri", "MO"),
24: ("Montana", "MT"),
25: ("Nebraska", "NE"),
26: ("Nevada", "NV"),
27: ("New Hampshire", "NH"),
28: ("New Jersey", "NJ"),
29: ("New Mexico", "NM"),
30: ("New York", "NY"),
31: ("North Carolina", "NC"),
32: ("North Dakota", "ND"),
33: ("Ohio", "OH"),
34: ("Oklahoma", "OK"),
35: ("Oregon", "OR"),
36: ("Pennsylvania", "PA"),
37: ("Rhode Island", "RI"),
38: ("South Carolina", "SC"),
39: ("South Dakota", "SD"),
40: ("Tennessee", "TN"),
41: ("Texas", "TX"),
42: ("Utah", "UT"),
43: ("Vermont", "VT"),
44: ("Virginia", "VA"),
45: ("Washington", "WA"),
46: ("West Virginia", "WV"),
47: ("Wisconsin", "WI"),
48: ("Wyoming", "WY"),
101: ("Northeast Region", "ner"),
102: ("East North Central Region", "encr"),
103: ("Central Region", "cr"),
104: ("Southeast Region", "ser"),
105: ("West North Central Region", "wncr"),
106: ("South Region", "sr"),
107: ("Southwest Region", "swr"),
108: ("Northwest Region", "nwr"),
109: ("West Region", "wr"),
110: ("National (contiguous 48 States)", "national"),
# The following are the range of code values for the National Weather Service Regions, river basins, and agricultural regions.
111: ("NWS: Great Plains", "nws:gp"),
115: ("NWS: Southern Plains and Gulf Coast", "nws:spgc"),
120: ("NWS: US Rockies and Westward", "nws:usrw"),
121: ("NWS: Eastern Region", "nws:er"),
122: ("NWS: Southern Region", "nws:sr"),
123: ("NWS: Central Region", "nws:cr"),
124: ("NWS: Western Region", "nws:wr"),
201: ("NWS: Pacific Northwest Basin", "nws:pnwb"),
202: ("NWS: California River Basin", "nws:crb"),
203: ("NWS: Great Basin", "nws:gb"),
204: ("NWS: Lower Colorado River Basin", "nws:lcrb"),
205: ("NWS: Upper Colorado River Basin", "nws:urcb"),
206: ("NWS: Rio Grande River Basin", "nws:rgrb"),
207: ("NWS: Texas Gulf Coast River Basin", "nws:tgcrb"),
208: ("NWS: Arkansas-White-Red Basin", "nws:awrb"),
209: ("NWS: Lower Mississippi River Basin", "nws:lmrb"),
210: ("NWS: Missouri River Basin", "nws:mrb"),
211: ("NWS: Souris-Red-Rainy Basin", "nws:srrb"),
212: ("NWS: Upper Mississippi River Basin", "nws:umrb"),
213: ("NWS: Great Lakes Basin", "nws:glb"),
214: ("NWS: Tennessee River Basin", "nws:trb"),
215: ("NWS: Ohio River Basin", "nws:ohrb"),
216: ("NWS: South Atlantic-Gulf Basin", "nws:sagb"),
217: ("NWS: Mid-Atlantic Basin", "nws:mab"),
218: ("NWS: New England Basin", "nws:neb"),
220: ("NWS: Mississippi River Basin & Tributaties (N. of Memphis, TN",
"nws:mrbt"),
# below( codes are weighted by area)
250: ("Area: Spring Wheat Belt", "area:swb"),
255: ("Area: Primary Hard Red Winter Wheat Belt", "area:phrwwb"),
256: ("Area: Winter Wheat Belt", "area:wwb"),
260: ("Area: Primary Corn and Soybean Belt", "area:pcsb"),
261: ("Area: Corn Belt", "area:cb"),
262: ("Area: Soybean Belt", "area:sb"),
265: ("Area: Cotton Belt", "area:cb"),
# below( codes are weighted by productivity)
350: ("Prod: Spring Wheat Belt", "prod:swb"),
356: ("Prod: Winter Wheat Belt", "prod:wwb"),
361: ("Prod: Corn Belt", "prod:cb"),
362: ("Prod: Soybean Belt", "prod:sb"),
365: ("Prod: Cotton Belt", "prod:cb"),
# below( codes are for percent productivity in the Palmer Z Index categories)
450: ("% Prod: Spring Wheat Belt", "%prod:swb"),
456: ("% Prod: Winter Wheat Belt", "%prod:wwb"),
461: ("% Prod: Corn Belt", "%prod:cb"),
462: ("% Prod: Soybean Belt", "%prod:sb"),
465: ("% Prod: Cotton Belt", "%prod:cb"),
}
return pandas.DataFrame(STATES_REGIONS).T.rename(columns={0: 'full', 1: 'abbr'})
| bsd-3-clause |
ppham27/MLaPP-solutions | chap07/linreg.py | 1 | 1795 | import numpy as np
import matplotlib.pyplot as plt
def plot_xy(x, y, ax=None):
if ax == None:
ax = plt.gca()
ax.scatter(x, y)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_title("Training data")
ax.grid(True)
def plot_abline(slope, intercept, xmin, xmax, ax=None):
if ax == None:
ax = plt.gca()
ax.plot([xmin, xmax], [xmin*slope + intercept, xmax*slope + intercept],
linewidth=3, color='red')
class SimpleOnlineLinearRegressor:
## keep track of sufficient statistics
def __init__(self):
self.N = 0
self.x_sum = 0
self.y_sum = 0
self.x_squared_sum = 0
self.y_squared_sum = 0
self.xy_sum = 0
self.w0 = 0
self.w1 = 0
self.sigma2 = 0
def predict(self, X):
return self.w0 + self.w1*X
def fit(self, X, y):
cov = np.cov(X,y,bias=True)
self.N = len(y)
self.w1 = cov[0,1]/cov[0,0]
self.w0 = np.mean(y) - self.w1*np.mean(X)
self.sigma2 = np.dot(y - self.w0 - self.w1*X, y - self.w0 - self.w1*X)/self.N
def partial_fit(self, x, y):
self.N += 1
self.x_sum += x
self.y_sum += y
self.x_squared_sum += x*x
self.y_squared_sum += y*y
self.xy_sum += x*y
if self.N > 1:
self.w1 = (self.xy_sum - self.x_sum*self.y_sum/self.N)/(self.x_squared_sum - self.x_sum*self.x_sum/self.N)
self.w0 = (self.y_sum - self.w1*self.x_sum)/self.N
self.sigma2 = self.w0*self.w0 + (self.y_squared_sum - 2*self.w0*self.y_sum - 2*self.w1*self.xy_sum + 2*self.w0*self.w1*self.x_sum + self.w1*self.w1*self.x_squared_sum)/self.N
def get_params(self):
return {'intercept': self.w0, 'slope': self.w1, 'variance': self.sigma2}
| mit |
postvakje/sympy | examples/intermediate/mplot3d.py | 93 | 1252 | #!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x, y, z, rstride=2, cstride=2)
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
| bsd-3-clause |
larsmans/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 19 | 2844 | """
Testing for mean shift clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
"""Test estimate_bandwidth"""
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
""" Test MeanShift algorithm """
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
"""Test MeanShift.predict"""
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_unfitted():
"""Non-regression: before fit, there should be not fitted attributes."""
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
"""
Test the bin seeding technique which can be used in the mean shift
algorithm
"""
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.5, 1.5], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
test_bins = get_bin_seeds(X, 0.01, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(test_result) == 6)
| bsd-3-clause |
aburrell/davitpy | davitpy/pydarn/plotting/rti.py | 1 | 46845 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Range-time-intensity plotting
A module for generating rti plots.
Module author: AJ, 20130123
Functions
--------------------------------------------------
plot_rti range-time-intensity plot
plot_freq TX frequency data
plot_searchnoise noise panel
plot_skynoise sky noise panel
plot_cpid control program ID panel
plot_nave number of averges panel
rti_title title an rti plot
draw_axes draw empty axes
read_data read data in
rti_panel plot the main rti data
daynight_terminator calculate day/night terminator
--------------------------------------------------
"""
import logging
def plot_rti(sTime, rad, eTime=None, bmnum=7, fileType='fitacf',
params=['power', 'velocity', 'width'], scales=[], channel=None,
coords='gate', colors='lasse', yrng=-1, gsct=False,
low_gray=False, show=True, filtered=False,
fileName=None, txfreq_lims=None, myFile=None,
xtick_size=9, ytick_size=9,
xticks=None, axvlines=None,
plot_terminator=False, cpidchange_lims=20):
""" Create an rti plot for a specified radar and time period.
Parameters
----------
sTime : datetime
a datetime object indicating the start time which you would like
to plot
rad : str
the 3 letter radar code, e.g. 'bks'
eTime : Optional[datetime]
a datetime object indicating th end time you would like plotted.
If this is None, 24 hours will be plotted. default = None.
bmnum : Optional[int]
The beam to plot. default: 7
fileType : Optional[str]
The file type to be plotted, one of ['fitex', 'fitacf', 'lmfit'].
default = 'fitex'.
params : Optional[list]
a list of the fit parameters to plot, allowable values are:
['velocity', 'power', 'width', 'elevation', 'phi0']. default:
['velocity', 'power', 'width']
scales : Optional[list]
a list of the min/max values for the color scale for each param.
If omitted, default scales will be used. If present, the list
should be n x 2 where n is the number of elements in the params
list. Use an empty list for default range, e.g. [[-250,300],[],[]].
default: [[-200,200], [0,30],[0,150]]
channel : Optional[char]
the channel you wish to plot, e.g. 'a', 'b', 'c', ... default: 'a'
coords : Optional[str]
the coordinates to use for the y axis. The allowable values are
'gate', 'rng', 'geo', 'mag' default: 'gate'
colors : Optional[str]
a string indicating what color bar to use, valid inputs are
'lasse' or 'aj'. Also can be a list of matplotlib colormaps names,
for example ['jet','jet','jet'] if len(param)==3. default: 'lasse'.
yrng : Optional[list or -1]
a list indicating the min and max values for the y axis in the
chosen coordinate system, or a -1 indicating to plot everything.
default: -1.
gsct : Optional[boolean]
a flag indicating whether to plot ground scatter as gray. default:
False (ground scatter plotted normally)
low_gray : Optional[boolean]
a flag indicating whether to plot low velocity scatter as gray.
default: False (low velocity scatter plotted normally)
show : Optional[boolean]
a flag indicating whether to display the figure on the screen.
This can cause problems over ssh. default = True
filtered : Optional[boolean]
a flag indicating whether to boxcar filter the data. default:
False (no filter)
fileName : Optional[string]
If you want to plot for a specific file, indicate the name of the
file as fileName. Include the type of the file in custType.
txfreq_lims : Optional[list]
a list of the min/max values for the transmitter frequencies in
kHz. If omitted, the default band will be used. If more than
one band is specified, retfig will cause only the last one to be
returned. default: [[8000,20000]]
myFile : Optional[pydarn.sdio.radDataTypes.radDataPtr]
contains the pipeline to the data we want to plot. If specified,
data will be plotted from the file pointed to by myFile. default: None
xtick_size : Optional[int]
fontsize of xtick labels
ytick_size : Optional[int]
fontsize of ytick labels
xticks : Optional[list]
datetime.datetime objects indicating the location of xticks
axvlines : Optoinal[list]
datetime.datetime objects indicating the location vertical lines
marking the plot
plot_terminator : Optional[boolean]
Overlay the day/night terminator.
cpidchange_lims : Optional[int]
Input the limit on the amount of CPID changes for the CPID
panel. Default is 20.
Returns
-------
A list of figures of length len(tfreqbands)
Example
-------
import datetime as dt
pydarn.plotting.rti.plot_rti(dt.datetime(2013,3,16), 'bks',
eTime=dt.datetime(2013,3,16,14,30),
bmnum=12, fileType='fitacf',
scales=[[-500,500],[],[]], coords='geo',
colors='aj', filtered=True, show=True,
cpidchange_lims=2)
Written by AJ 20121002
Modified by Matt W. 20130715
Modified by Nathaniel F. 20131031 (added plot_terminator)
Modified by ASR 20150917 (refactored)
"""
import os
from davitpy import pydarn
from davitpy import utils
import numpy as np
from datetime import datetime, timedelta
from matplotlib import pyplot
from matplotlib.dates import DateFormatter
import matplotlib.cm as cm
# Time how long this is going to take
timing_start = datetime.now()
# NOTE TO DEVS: List of available params. Can be simply expanded
# as more parameters are added to SuperDARN data set (like index
# of refraction)
available_params = ['power', 'velocity', 'width', 'elevation', 'phi0',
'velocity_error']
default_scales = [[0, 30], [-200, 200], [0, 150], [0, 50],
[-np.pi, np.pi], [0, 200]]
available_text = 'Allowable parameters are '
for p in available_params:
available_text = available_text + p + ', '
available_text = available_text[:-2]
# Check the inputs
assert(isinstance(sTime, datetime)), logging.error(
'sTime must be a datetime object')
assert(isinstance(rad, str) and len(rad) == 3), logging.error(
'rad must be a string 3 chars long')
assert(isinstance(eTime, datetime) or
eTime is None), (
logging.error('eTime must be a datetime object or None'))
if eTime is None:
eTime = sTime + timedelta(days=1)
assert(sTime < eTime), logging.error("eTime must be greater than sTime!")
assert(coords == 'gate' or coords == 'rng' or coords == 'geo' or
coords == 'mag'), logging.error("coords must be one of 'gate', "
"'rng', 'geo', 'mag'")
assert(isinstance(bmnum, int)), logging.error('beam must be integer')
assert(0 < len(params) < 6), (
logging.error('must input between 1 and 5 params in LIST form'))
for i in range(0, len(params)):
assert(params[i] in available_params), (
logging.error(available_text))
for i in range(0, len(scales)):
assert(isinstance(scales[i], list)), (
logging.error('each item in scales must be a list of upper and '
'lower bounds on paramaters.'))
assert(scales == [] or
len(scales) == len(params)), (
logging.error('if present, scales must have same number of elements '
'as params'))
assert(yrng == -1 or
(isinstance(yrng, list) and
yrng[0] <= yrng[1])), (
logging.error('yrng must equal -1 or be a list with the 2nd element '
'larger than the first'))
assert((colors == 'lasse' or
colors == 'aj')) or isinstance(colors, list), (
logging.error("Valid inputs for color are 'lasse' and 'aj' or a list "
"of matplotlib colormaps"))
assert((isinstance(txfreq_lims, list) and len(txfreq_lims) == 2) or
isinstance(txfreq_lims, type(None))), (
logging.error("txfreq_lims must be a list with the start and "
"end frequencies"))
assert((isinstance(cpidchange_lims, int) and cpidchange_lims > 0)), (
logging.error("cpidchange_lims must be an integer and greater "
"than zero"))
# Assign any default color scale parameter limits.
tscales = []
for i in range(0, len(params)):
if(scales == [] or scales[i] == []):
if(params[i] in available_params):
ind = available_params.index(params[i])
tscales.append(default_scales[ind])
else: tscales.append(scales[i])
scales = tscales
# Assign default frequency band.
if txfreq_lims is None:
tband = [8000, 20000]
else:
assert(txfreq_lims[0] < txfreq_lims[1]), (
logging.error("Starting frequency must be less "
"than ending frequency!"))
tband = txfreq_lims
# Open the file if a pointer was not given to us
# if fileName is specified then it will be read.
if not myFile:
from davitpy.pydarn.sdio import radDataOpen
myFile = radDataOpen(sTime, rad, eTime, channel=channel, bmnum=bmnum,
fileType=fileType, filtered=filtered,
fileName=fileName)
# Check that we have data available now that we may have tried
# to read it using radDataOpen.
if myFile is None:
logging.error('No files available for the requested '
'time/radar/filetype combination')
return None
# Make sure that we will only plot data for the time range specified
# by sTime and eTime.
if (myFile.sTime <= sTime and myFile.eTime > sTime and
myFile.eTime >= eTime):
myFile.sTime = sTime
myFile.eTime = eTime
else:
# If the times range is not covered by the file, warn the user.
logging.warning('Data not available in myFile for the whole of '
'sTime to eTime!')
# Finally we can start reading the data file
myBeam = myFile.readRec()
if myBeam is None:
logging.error('Problem reading the data.')
return None
# Now read the data that we need to make the plots
data_dict = read_data(myFile, bmnum, params, tband)
# Check to ensure that data exists for the requested frequency
# band else continue on to the next range of frequencies
if len(data_dict['freq']) == 0:
logging.error('No data found in frequency range ' +
str(tband[0]) + ' kHz to ' +
str(tband[1]) + ' kHz')
return None
# Create a figure.
rti_fig = pyplot.figure(figsize=(11, 8.5))
# Create the axes for noise, tx freq, and cpid.
noise_pos = [.1, .88, .76, .06]
freq_pos = [.1, .82, .76, .06]
cpid_pos = [.1, .77, .76, .05]
skynoise_ax = rti_fig.add_axes(noise_pos, label='sky')
searchnoise_ax = rti_fig.add_axes(noise_pos, label='search',
frameon=False)
freq_ax = rti_fig.add_axes(freq_pos, label='freq')
nave_ax = rti_fig.add_axes(freq_pos, label='nave', frameon=False)
cpid_ax = rti_fig.add_axes(cpid_pos)
# Give the plot a title.
rti_title(rti_fig, sTime, rad, fileType, bmnum, eTime=eTime)
# Plot the sky noise.
plot_skynoise(skynoise_ax, data_dict['times'],
data_dict['nsky'])
# Plot the search noise.
plot_searchnoise(searchnoise_ax, data_dict['times'],
data_dict['nsch'])
# plot the frequency bar.
plot_freq(freq_ax, data_dict['times'],
data_dict['freq'])
# Plot the nave data.
plot_nave(nave_ax, data_dict['times'],
data_dict['nave'])
# Plot the cpid bar
plot_cpid(cpid_ax, data_dict['times'],
data_dict['cpid'], data_dict['mode'],
cpidchange_lims)
# Plot each of the parameter panels.
figtop = .77
if ((eTime - sTime) <= timedelta(days=1)) and \
(eTime.day == sTime.day):
figheight = .72 / len(params)
elif ((eTime - sTime) > timedelta(days=1)) or \
(eTime.day != sTime.day):
figheight = .70 / len(params)
for p in range(len(params)):
# Use draw_axes to create and set formatting of the axes to
# plot to.
pos = [.1, figtop - figheight * (p + 1) + .02, .76,
figheight - .02]
ax = draw_axes(rti_fig, data_dict['times'], rad,
data_dict['cpid'], bmnum,
data_dict['nrang'],
data_dict['frang'], data_dict['rsep'],
p == len(params) - 1, yrng=yrng, coords=coords,
pos=pos, xtick_size=xtick_size,
ytick_size=ytick_size, xticks=xticks,
axvlines=axvlines)
if(params[p] == 'velocity'): pArr = data_dict['vel']
elif(params[p] == 'power'): pArr = data_dict['pow']
elif(params[p] == 'width'): pArr = data_dict['wid']
elif(params[p] == 'elevation'): pArr = data_dict['elev']
elif(params[p] == 'phi0'): pArr = data_dict['phi0']
elif(params[p] == 'velocity_error'):
pArr = data_dict['velocity_error']
if(pArr == []): continue
# Generate the color map.
if colors in ['aj', 'lasse']:
cmap, norm, bounds = utils.plotUtils.genCmap(params[p], scales[p],
colors=colors,
lowGray=low_gray)
else:
from matplotlib import colors as mpl_colors
norm = mpl_colors.Normalize(vmin=scales[p][0], vmax=scales[p][1])
cmap = cm.get_cmap(colors[p])
# Plot the data to the axis object.
pcoll = rti_panel(ax, data_dict, pArr, gsct, rad, bmnum, coords, cmap,
norm, plot_terminator=plot_terminator)
# Set xaxis formatting depending on amount of data plotted.
if ((eTime - sTime) <= timedelta(days=1)) and \
(eTime.day == sTime.day):
ax.xaxis.set_major_formatter(DateFormatter('%H:%M'))
elif ((eTime - sTime) > timedelta(days=1)) or \
(eTime.day != sTime.day):
ax.xaxis.set_major_formatter(DateFormatter('%d/%m/%y \n%H:%M'))
ax.set_xlabel('UT')
# Draw the colorbar.
cb = utils.drawCB(rti_fig, pcoll, cmap, norm, map_plot=0,
pos=[pos[0] + pos[2] + .02, pos[1], 0.02,
pos[3]])
if colors in ['aj', 'lasse']:
# Label the colorbar.
l = []
# Define the colorbar labels.
for i in range(0, len(bounds)):
if(params[p] == 'phi0'):
ln = 4
if(bounds[i] == 0): ln = 3
elif(bounds[i] < 0): ln = 5
l.append(str(bounds[i])[:ln])
continue
if((i == 0 and
(params[p] == 'velocity' or
params[p] == 'velocity_error')) or i == len(bounds) - 1):
l.append(' ')
continue
l.append(str(int(bounds[i])))
cb.ax.set_yticklabels(l)
else:
# Turn off the edges that are drawn by drawCB unless we are
# doing 'aj' or 'lasse' colors
cb.dividers.set_visible(False)
# Set colorbar ticklabel size.
for t in cb.ax.get_yticklabels():
t.set_fontsize(9)
# Set colorbar label.
if(params[p] == 'velocity'):
cb.set_label('Velocity [m/s]', size=10)
if(params[p] == 'grid'): cb.set_label('Velocity [m/s]', size=10)
if(params[p] == 'power'): cb.set_label('SNR [dB]', size=10)
if(params[p] == 'width'): cb.set_label('Spec Wid [m/s]', size=10)
if(params[p] == 'elevation'): cb.set_label('Elev [deg]', size=10)
if(params[p] == 'phi0'): cb.set_label('Phi0 [rad]', size=10)
if(params[p] == 'velocity_error'):
cb.set_label('Velocity Error [m/s]', size=10)
if show:
rti_fig.show()
logging.info('plotting took:' + str(datetime.now() - timing_start))
return rti_fig
def draw_axes(myFig, times, rad, cpid, bmnum, nrang, frang, rsep, bottom,
yrng=-1, coords='gate', pos=[.1, .05, .76, .72], xtick_size=9,
ytick_size=9, xticks=None, axvlines=None):
""" Draws empty axes for an rti plot.
Parameters
----------
myFig :
the MPL figure we are plotting to
times : list
a list of datetime objects referencing the beam soundings
rad : str
3 letter radar code
cpid : list
list of the cpids or the beam soundings
bmnum : int
beam number being plotted
nrang : list
list of nrang for the beam soundings
frang : list
list of frang of the beam soundings
rsep : list
list of rsep of the beam soundings
bottom : bool
flag indicating if we are at the bottom of the figure
yrng : Optional[list]
range of y axis, -1=autoscale (default)
coords : Optional[ ]
y axis coordinate system, acceptable values are 'geo',
'mag', 'gate', 'rng'
pos : Optional[ ]
position of the plot
xtick_size : Optional[ ]
fontsize of xtick labels
ytick_size : Optional[ ]
fontsize of ytick labels
xticks : Optional[list]
datetime.datetime objects indicating the location of xticks
axvlines : Optional[list]
datetime.datetime objects indicating the location vertical
lines marking the plot
Returns
-------
ax :
an axes object
Example
-------
ax = draw_axes(myFig,times,rad,cpid,beam,nrang,frang,rsep,0)
Written by AJ 20121002
Modified by ASR 20150917 (refactored)
"""
from davitpy import pydarn
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.dates import SecondLocator, DateFormatter, date2num
from matplotlib.lines import Line2D
import numpy as np
nrecs = len(times)
# Add an axes object to the figure.
ax = myFig.add_axes(pos)
ax.yaxis.set_tick_params(direction='out')
ax.xaxis.set_tick_params(direction='out')
ax.yaxis.set_tick_params(direction='out', which='minor')
ax.xaxis.set_tick_params(direction='out', which='minor')
# Draw the axes.
ax.plot_date(date2num(times), np.arange(len(times)),
fmt='w', tz=None, xdate=True, ydate=False, alpha=0.0)
# Determine the yaxis min/max unless it's been specified
if(yrng == -1):
ymin, ymax = 99999999, -999999999
if(coords != 'gate'):
oldCpid = -99999999
for i in range(len(cpid)):
if(cpid[i] == oldCpid): continue
oldCpid = cpid[i]
if(coords == 'geo' or coords == 'mag'):
# HACK NOT SURE IF YOU CAN DO THIS(Formatting)!
site = pydarn.radar.network().getRadarByCode(rad) \
.getSiteByDate(times[i])
myFov = pydarn.radar.radFov.fov(site=site, ngates=nrang[i],
nbeams=site.maxbeam,
rsep=rsep[i],
coords=coords,
date_time=times[i])
if(myFov.latFull[bmnum].max() > ymax):
ymax = myFov.latFull[bmnum].max()
if(myFov.latFull[bmnum].min() < ymin):
ymin = myFov.latFull[bmnum].min()
else:
ymin = 0
if(nrang[i] * rsep[i] + frang[i] > ymax):
ymax = nrang[i] * rsep[i] + frang[i]
else:
ymin, ymax = 0, max(nrang)
else:
ymin, ymax = yrng[0], yrng[1]
# Format the xaxis.
xmin = date2num(times[0])
xmax = date2num(times[len(times) - 1])
xrng = (xmax - xmin)
inter = int(round(xrng / 6. * 86400.))
inter2 = int(round(xrng / 24. * 86400.))
ax.xaxis.set_minor_locator(SecondLocator(interval=inter2))
ax.xaxis.set_major_locator(SecondLocator(interval=inter))
# If axis is at bottom of figure, draw xticks.
if(not bottom):
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(0)
else:
if xticks is not None:
ax.xaxis.set_ticks(xticks)
if axvlines is not None:
for line in axvlines:
ax.axvline(line, color='0.25', ls='--')
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(xtick_size)
ax.xaxis.set_major_formatter(DateFormatter('%H:%M'))
ax.xaxis.set_label_text('UT')
# Set ytick size.
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(ytick_size)
# Format yaxis depending on coords.
if(coords == 'gate'):
ax.yaxis.set_label_text('Range gate', size=10)
ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
ax.yaxis.set_major_locator(MultipleLocator((ymax - ymin) / 5.))
ax.yaxis.set_minor_locator(MultipleLocator((ymax - ymin) / 25.))
elif(coords == 'geo' or coords == 'mag'):
if(coords == 'mag'):
ax.yaxis.set_label_text('Mag Lat [deg]', size=10)
else:
ax.yaxis.set_label_text('Geo Lat [deg]', size=10)
elif(coords == 'rng'):
ax.yaxis.set_label_text('Slant Range [km]', size=10)
ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
ax.yaxis.set_major_locator(MultipleLocator(1000))
ax.yaxis.set_minor_locator(MultipleLocator(250))
ax.set_ylim(bottom=ymin, top=ymax)
return ax
def rti_title(fig, sTime, rad, fileType, beam, eTime=None, xmin=.1, xmax=.86):
"""Draws title for an rti plot.
Parameters
----------
fig : matplotlib.figure.Figure
a matplotlib.figure.Figure object
sTime : datetime
the start time for the data being plotted as a datetime object
rad : str
the 3 letter radar code
fileType : str
the file type being plotted
beam : int
the beam number being plotted
eTime : Optional[datetime]
the end time for the data being plotted as a datetime object
xmin : Optional[ ]
minimum x value o the plot in page coords
xmax : Optional[ ]
maximum x value o the plot in page coords
Returns
-------
Nothing.
Example
-------
import datetime as dt
from matplotlib import pyplot
fig = pyplot.figure()
rti_title(fig,dt.datetime(2011,1,1),'bks','fitex',7)
Written by AJ 20121002
Modified by ASR 20150916
"""
from davitpy import pydarn
from datetime import timedelta
import calendar
# Obtain the davitpy.pydarn.radar.radStruct.radar object for rad.
r = pydarn.radar.network().getRadarByCode(rad)
# Plot the main title
fig.text(xmin, .95, r.name + ' (' + fileType + ')', ha='left', weight=550)
# Determine what time information should be plotted in the secondary title
if ((eTime is not None) and
(((eTime - sTime) > timedelta(days=1)) or
(eTime.day != sTime.day))):
title_text = str(sTime.day) + ' ' \
+ calendar.month_name[sTime.month][:3] + ' ' \
+ str(sTime.year) + ' - ' + str(eTime.day) + ' ' \
+ calendar.month_name[eTime.month][:3] + ' ' \
+ str(eTime.year)
else:
title_text = str(sTime.day) + ' ' \
+ calendar.month_name[sTime.month][:3] + ' ' \
+ str(sTime.year)
# Plot the secondary title.
fig.text((xmin + xmax) / 2., .95, title_text, weight=550,
size='large', ha='center')
fig.text(xmax, .95, 'Beam ' + str(beam), weight=550, ha='right')
def plot_cpid(ax, times, cpid, mode, cpidchange_lims):
"""Plots control program ID (cpid) panel at position pos.
Parameters
----------
ax :
a MPL axis object to plot to
times : list
a list of the times of the beam soundings
cpid : list
a list of the cpids of the beam soundings.
mode : list
a list of the ifmode param
cpidchange_lims : int
Limit on the number of times the cpid can change
Returns
-------
Nothing.
Example
-------
plot_cpid(ax,times,cpid,mode, cpidchange_lims=10)
Written by AJ 20121002
Modified by ASR 20150916
"""
from davitpy import pydarn
from matplotlib.ticker import MultipleLocator
from matplotlib.dates import SecondLocator
from matplotlib.dates import date2num
from datetime import timedelta
import numpy as np
oldCpid = -9999999
# Format the yaxis.
ax.yaxis.tick_left()
ax.yaxis.set_tick_params(direction='out')
ax.set_ylim(bottom=0, top=1)
ax.yaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_tick_params(direction='out', which='minor')
# Draw the axes.
ax.plot_date(date2num(times), np.arange(len(times)),
fmt='w', tz=None, xdate=True, ydate=False, alpha=0.0)
# Initialize CPID change counter
cpid_change = 0
# Label the CPIDs.
for i in range(0, len(times)):
if(cpid[i] != oldCpid):
cpid_change += 1
# If the cpid is changing too much, it won't be readible
if (cpid_change >= cpidchange_lims):
# Clear the current axis
ax.cla()
# Kick out error messages
diff_time = (times[-1] - times[0]).total_seconds() / 2.
cpid_time = times[0] + timedelta(seconds=diff_time)
temp = ', '.join([str(x) for x in list(set(cpid))])
cpid_text = 'CPIDs: ' + temp
ax.text(cpid_time, .5, cpid_text,
ha='center', va='center', size=10)
logging.error('CPID is changing too frequently to be '
'legibly printed. Please consider using '
'radDataOpen cp param. CPIDs found: ' +
str(list(set(cpid))))
break
ax.plot_date([date2num(times[i]), date2num(times[i])],
[0, 1], fmt='k-', tz=None, xdate=True, ydate=False)
oldCpid = cpid[i]
s = ' ' + pydarn.radar.radUtils.getCpName(oldCpid)
istr = ' '
if(mode[i] == 1): istr = ' IF'
if(mode == 0): istr = ' RF'
ax.text(times[i], .5, ' ' + str(oldCpid) + s + istr, ha='left',
va='center', size=10)
# Format the xaxis.
xmin = date2num(times[0])
xmax = date2num(times[len(times) - 1])
xrng = (xmax - xmin)
inter = int(round(xrng / 6. * 86400.))
inter2 = int(round(xrng / 24. * 86400.))
ax.xaxis.set_minor_locator(SecondLocator(interval=inter2))
ax.xaxis.set_major_locator(SecondLocator(interval=inter))
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(0)
# Identify the CPID axis with a label.
fig = ax.get_figure()
bb = ax.get_position()
x0 = bb.x0
y0 = bb.y0
height = bb.height
width = bb.width
pos = [x0, y0, width, height]
fig.text(pos[0] - .07, pos[1] + pos[3] / 2., 'CPID', ha='center',
va='center', size=8.5, rotation='vertical')
ax.set_yticks([])
def plot_skynoise(ax, times, sky, xlim=None, xticks=None):
"""Plots a noise panel at position pos.
Parameters
----------
ax :
a MPL axis object to plot to
times : list
a list of the times of the beam soundings
sky: list
a list of the noise.sky of the beam soundings
search : list
a list of the noise.search param
xlim : Optional[list]
2-element limits of the x-axis. None for default.
xticks : Optional[list]
List of xtick poisitions. None for default.
Returns
-------
Nothing
Example
-------
plot_skynoise(ax,times,sky)
Written by AJ 20121002
Modified by NAF 20131101
Modified by ASR 20150916
"""
from matplotlib.ticker import MultipleLocator
from matplotlib.dates import date2num
from matplotlib.lines import Line2D
import numpy as np
# Format the yaxis.
ax.yaxis.tick_left()
ax.yaxis.set_tick_params(direction='out')
ax.set_ylim(bottom=0, top=6)
ax.yaxis.set_minor_locator(MultipleLocator())
ax.yaxis.set_tick_params(direction='out', which='minor')
# Plot the sky noise data.
ax.plot_date(date2num(times), np.log10(sky), fmt='k-',
tz=None, xdate=True, ydate=False)
# Format the xaxis.
if xlim is not None: ax.set_xlim(xlim)
if xticks is not None: ax.set_xticks(xticks)
# Add labels to identify the noise axis.
fig = ax.get_figure()
bb = ax.get_position()
x0 = bb.x0
y0 = bb.y0
height = bb.height
width = bb.width
pos = [x0, y0, width, height]
fig.text(pos[0] - .01, pos[1] + .004, '10^0', ha='right', va='bottom',
size=8)
fig.text(pos[0] - .01, pos[1] + pos[3], '10^6', ha='right', va='top',
size=8)
fig.text(pos[0] - .07, pos[1] + pos[3] / 2., 'N.Sky', ha='center',
va='center', size=8.5, rotation='vertical')
l = Line2D([pos[0] - .06, pos[0] - .06], [pos[1] + .01,
pos[1] + pos[3] - .01], transform=fig.transFigure,
clip_on=False, ls='-', color='k', lw=1.5)
ax.add_line(l)
ax.set_xticklabels([' '])
# Only use 2 major yticks.
ax.set_yticks([0, 6])
ax.set_yticklabels([' ', ' '])
def plot_searchnoise(ax, times, search, xlim=None, xticks=None,
ytickside='right'):
"""Plots a noise panel at position pos.
Parameters
----------
ax :
a MPL axis object to plot to
times : list
a list of the times of the beam soundings
sky : list
a list of the noise.sky of the beam soundings
search : list
a list of the noise.search param
xlim : Optional[list]
2-element limits of the x-axis. None for default.
xticks : Optional[list]
List of xtick poisitions. None for default.
ytickside : Optional[string]
Default is right.
Returns
-------
Nothing
Example
-------
plot_searchnoise(ax,times,search)
Written by AJ 20121002
Modified by NAF 20131101
Modified by ASR 20150916
"""
from matplotlib.ticker import MultipleLocator
from matplotlib.dates import date2num
from matplotlib.lines import Line2D
import numpy as np
# Format the yaxis.
ax.yaxis.tick_left()
ax.yaxis.set_tick_params(direction='out')
ax.set_ylim(bottom=0, top=6)
ax.yaxis.set_minor_locator(MultipleLocator())
ax.yaxis.set_tick_params(direction='out', which='minor')
# Plot the search noise data.
ax.plot_date(date2num(times), np.log10(search),
fmt='k:', tz=None, xdate=True, ydate=False, lw=1.5)
# Format the xaxis.
if xlim is not None: ax.set_xlim(xlim)
if xticks is not None: ax.set_xticks(xticks)
# Add labels to identify the noise axis.
fig = ax.get_figure()
bb = ax.get_position()
x0 = bb.x0
y0 = bb.y0
height = bb.height
width = bb.width
pos = [x0, y0, width, height]
fig.text(pos[0] + pos[2] + .01, pos[1] + .004, '10^0', ha='left',
va='bottom', size=8)
fig.text(pos[0] + pos[2] + .01, pos[1] + pos[3], '10^6', ha='left',
va='top', size=8)
fig.text(pos[0] + pos[2] + .06, pos[1] + pos[3] / 2., 'N.Sch', ha='center',
va='center', size=8.5, rotation='vertical')
l = Line2D([pos[0] + pos[2] + .07, pos[0] + pos[2] + .07],
[pos[1] + .01, pos[1] + pos[3] - .01],
transform=fig.transFigure, clip_on=False, ls=':',
color='k', lw=1.5)
ax.add_line(l)
ax.set_xticklabels([' '])
# use only 2 major yticks
ax.set_yticks([0, 6])
ax.set_yticklabels([' ', ' '])
if ytickside == 'right':
ax.yaxis.tick_right()
def plot_freq(ax, times, freq, xlim=None, xticks=None):
"""Plots the tx frequency data to an axis object.
Parameters
----------
ax :
a MPL axis object to plot to
times : list
a list of the times of the beam soundings
freq : list
a list of the tfreq of the beam soundings
xlim : Optional[list]
2-element limits of the x-axis. None for default.
xticks : Optional[list]
List of xtick poisitions. None for default.
Returns
-------
Nothing.
Example
-------
plot_freq(ax, times, tfreq)
Written by AJ 20121002
Modified by NAF 20131101
Modified by ASR 20150916
"""
from matplotlib.ticker import MultipleLocator
from matplotlib.dates import date2num
from matplotlib.lines import Line2D
# Format the yaxis.
ax.yaxis.tick_left()
ax.yaxis.set_tick_params(direction='out')
ax.set_ylim(bottom=8, top=20)
ax.yaxis.set_minor_locator(MultipleLocator())
ax.yaxis.set_tick_params(direction='out', which='minor')
# Plot the TX frequency.
ax.plot_date(date2num(times), freq, fmt='k-',
tz=None, xdate=True, ydate=False, markersize=2)
# Format the xaxis.
if xlim is not None: ax.set_xlim(xlim)
if xticks is not None: ax.set_xticks(xticks)
# Add labels to identify the frequency axis.
fig = ax.get_figure()
bb = ax.get_position()
x0 = bb.x0
y0 = bb.y0
height = bb.height
width = bb.width
pos = [x0, y0, width, height]
fig.text(pos[0] - .01, pos[1] + .005, '10', ha='right', va='bottom',
size=8)
fig.text(pos[0] - .01, pos[1] + pos[3] - .015, '16', ha='right', va='top',
size=8)
fig.text(pos[0] - .07, pos[1] + pos[3] / 2., 'Freq', ha='center',
va='center', size=9, rotation='vertical')
fig.text(pos[0] - .05, pos[1] + pos[3] / 2., '[MHz]', ha='center',
va='center', size=7, rotation='vertical')
l = Line2D([pos[0] - .04, pos[0] - .04], [pos[1] + .01,
pos[1] + pos[3] - .01], transform=fig.transFigure,
clip_on=False, ls='-', color='k', lw=1.5)
ax.add_line(l)
ax.set_xticklabels([' '])
# use only 2 major yticks
ax.set_yticks([10, 16])
ax.set_yticklabels([' ', ' '])
def plot_nave(ax, times, nave, xlim=None, xticks=None, ytickside='right'):
"""Plots the number of averages (nave) data to an axis object.
Parameters
----------
ax :
a MPL axis object to plot to
times : list
a list of the times of the beam soundings
nave : list
a list of the nave of the beam soundings
xlim : Optional[list]
2-element limits of the x-axis. None for default.
xticks : Optional[list]
List of xtick poisitions. None for default.
ytickside : Optional[str]
Default is right.
Returns
-------
Nothing.
Example
-------
plot_nave(ax, times, nave)
Written by AJ 20121002
Modified by NAF 20131101
Modified by ASR 20150916
"""
from matplotlib.ticker import MultipleLocator
from matplotlib.dates import date2num
from matplotlib.lines import Line2D
# Format the yaxis
ax.yaxis.tick_left()
ax.yaxis.set_tick_params(direction='out')
ax.set_ylim(bottom=0, top=80)
ax.yaxis.set_minor_locator(MultipleLocator(base=5))
ax.yaxis.set_tick_params(direction='out', which='minor')
# Plot the number of averages.
ax.plot_date(date2num(times), nave, fmt='k:',
tz=None, xdate=True, ydate=False, markersize=2)
# Format the xaxis.
if xlim is not None: ax.set_xlim(xlim)
if xticks is not None: ax.set_xticks(xticks)
# Add labels to identify the nave axis.
fig = ax.get_figure()
bb = ax.get_position()
x0 = bb.x0
y0 = bb.y0
height = bb.height
width = bb.width
pos = [x0, y0, width, height]
fig.text(pos[0] + pos[2] + .01, pos[1] - .004, '0', ha='left', va='bottom',
size=8)
fig.text(pos[0] + pos[2] + .01, pos[1] + pos[3], '80', ha='left', va='top',
size=8)
fig.text(pos[0] + pos[2] + .06, pos[1] + pos[3] / 2., 'Nave', ha='center',
va='center', size=8.5, rotation='vertical')
l = Line2D([pos[0] + pos[2] + .07, pos[0] + pos[2] + .07],
[pos[1] + .01, pos[1] + pos[3] - .01],
transform=fig.transFigure, clip_on=False, ls=':',
color='k', lw=1.5)
ax.add_line(l)
ax.set_xticklabels([' '])
# use only 2 major yticks
ax.set_yticks([0, 80])
ax.set_yticklabels([' ', ' '])
if ytickside == 'right':
ax.yaxis.tick_right()
def read_data(myPtr, bmnum, params, tbands):
"""Reads data from the file pointed to by myPtr
Parameter
---------
myPtr :
a davitpy file pointer object
bmnum : int
beam number of data to read in
params : list
a list of the parameters to read
tbands : list
a list of the frequency bands to separate data into
Returns
-------
A dictionary of the data. Data is stored in lists and separated in
to tbands.
Example
-------
from davitpy import pydarn
from datetime import datetime
myPtr = pydarn.sdio.radDataOpen(datetime(2012,11,24),'sas')
myBeam = myPtr.readRec()
data_dict = read_data(myPtr, myBeam, 7, ['velocity'], [8000,20000])
Written by ASR 20150914
"""
import numpy as np
# Initialize some things.
data = dict()
data_keys = ['vel', 'pow', 'wid', 'elev', 'phi0', 'times', 'freq', 'cpid',
'nave', 'nsky', 'nsch', 'slist', 'mode', 'rsep', 'nrang',
'frang', 'gsflg', 'velocity_error']
for d in data_keys:
data[d] = []
# Read the parameters of interest.
myPtr.rewind()
myBeam = myPtr.readRec()
while(myBeam is not None):
if(myBeam.time > myPtr.eTime): break
if(myBeam.bmnum == bmnum and (myPtr.sTime <= myBeam.time)):
if (myBeam.prm.tfreq >= tbands[0] and
myBeam.prm.tfreq <= tbands[1]):
data['times'].append(myBeam.time)
data['cpid'].append(myBeam.cp)
data['nave'].append(myBeam.prm.nave)
data['nsky'].append(myBeam.prm.noisesky)
data['rsep'].append(myBeam.prm.rsep)
data['nrang'].append(myBeam.prm.nrang)
data['frang'].append(myBeam.prm.frang)
data['nsch'].append(myBeam.prm.noisesearch)
data['freq'].append(myBeam.prm.tfreq / 1e3)
data['slist'].append(myBeam.fit.slist)
data['mode'].append(myBeam.prm.ifmode)
data['gsflg'].append(myBeam.fit.gflg)
# To save time and RAM, only keep the data specified
# in params.
if('velocity' in params):
data['vel'].append(myBeam.fit.v)
if('power' in params):
data['pow'].append(myBeam.fit.p_l)
if('width' in params):
data['wid'].append(myBeam.fit.w_l)
if('elevation' in params):
data['elev'].append(myBeam.fit.elv)
if('phi0' in params):
data['phi0'].append(myBeam.fit.phi0)
if('velocity_error' in params):
data['velocity_error'].append(myBeam.fit.v_e)
myBeam = myPtr.readRec()
return data
def rti_panel(ax, data_dict, pArr, gsct, rad, bmnum, coords, cmap,
norm, plot_terminator=True):
"""Plots the data given by pArr to an axis object.
Parameters
----------
ax :
a MPL axis object to plot to
data_dict :
the data dictionary returned by pydarn.plotting.read_data
pArr : list
the list of data to be plotted (e.g. data_dict['vel'] for
velocity)
gsct : bool
a boolean stating whether to flag ground scatter data or not
rad : str
the 3 letter radar code
bmnum : int
The beam number of the data to plot
coords : str
plotting coordinates ('gate', 'range', 'geo', 'mag')
cmap :
a matplotlib.colors.ListedColormap (such as that returned
by utils.plotUtils.genCmap)
norm :
a matplotlib.colors.BoundaryNorm (such as that returned by
utils.plotUtils.genCmap)
plot_terminator : Optional[bool]
A boolean stating whether or not to plot the terminator; default
is true.
Returns
-------
pcoll
the polygon collection returned by matplotib.pyplot.pcolormesh.
Written by ASR 20150916
"""
from davitpy import pydarn
import matplotlib
from matplotlib.dates import date2num, num2date
import numpy as np
# Initialize things.
rmax = max(data_dict['nrang'])
tmax = (len(data_dict['times'])) * 2
data = np.zeros((tmax, rmax)) * np.nan
x = np.zeros(tmax)
tcnt = 0
# Build a list of datetimes to plot each data point at.
dt_list = []
for i in range(len(data_dict['times'])):
x[tcnt] = date2num(data_dict['times'][i])
dt_list.append(data_dict['times'][i])
if(i < len(data_dict['times']) - 1):
if(date2num(data_dict['times'][i + 1]) - x[tcnt] > 4. / 1440.):
tcnt += 1
# 1440 minutes in a day, hardcoded 1 minute step per data point
# but only if time between data points is > 4 minutes
x[tcnt] = x[tcnt - 1] + 1. / 1440.
dt_list.append(num2date(x[tcnt]))
tcnt += 1
if(pArr[i] == [] or pArr[i] is None): continue
if data_dict['slist'][i] is not None:
for j in range(len(data_dict['slist'][i])):
if(not gsct or data_dict['gsflg'][i][j] == 0):
data[tcnt][data_dict['slist'][i][j]] = pArr[i][j]
elif gsct and data_dict['gsflg'][i][j] == 1:
data[tcnt][data_dict['slist'][i][j]] = -100000.
# For geo or mag coords, get radar FOV lats/lons.
if (coords != 'gate' and coords != 'rng') or plot_terminator is True:
site = pydarn.radar.network().getRadarByCode(rad) \
.getSiteByDate(data_dict['times'][0])
myFov = pydarn.radar.radFov.fov(site=site, ngates=rmax,
nbeams=site.maxbeam,
rsep=data_dict['rsep'][0],
coords=coords,
date_time=data_dict['times'][0])
myLat = myFov.latCenter[bmnum]
myLon = myFov.lonCenter[bmnum]
# Determine the yaxis range limits to plot data to.
if(coords == 'gate'):
y = np.linspace(0, rmax, rmax + 1)
elif(coords == 'rng'):
y = np.linspace(data_dict['frang'][0],
rmax * data_dict['rsep'][0],
rmax + 1)
else:
y = myFov.latFull[bmnum]
# Generate a mesh of x and y coords to plot data to.
X, Y = np.meshgrid(x[:tcnt], y)
# Calculate terminator as required.
if plot_terminator:
daylight = np.ones([len(dt_list), len(myLat)], np.bool)
for tm_inx in range(len(dt_list)):
tm = dt_list[tm_inx]
term_lats, tau, dec = daynight_terminator(tm, myLon)
if dec > 0:
# NH Summer
day_inx = np.where(myLat < term_lats)[0]
else:
day_inx = np.where(myLat > term_lats)[0]
if day_inx.size != 0:
daylight[tm_inx, day_inx] = False
daylight = np.ma.array(daylight, mask=daylight)
ax.pcolormesh(X, Y, daylight.T, lw=0, alpha=0.10,
cmap=matplotlib.cm.binary_r, zorder=99)
# Mask the nan's in the data array so they aren't plotted.
Zm = np.ma.masked_where(np.isnan(data[:tcnt][:].T), data[:tcnt][:].T)
# Set colormap so that masked data (bad) is transparent.
cmap.set_bad('w', alpha=0.0)
# Now let's plot all data.
pcoll = ax.pcolormesh(X, Y, Zm, lw=0.01, edgecolors='None',
cmap=cmap, norm=norm)
return pcoll
def daynight_terminator(date, lons):
""" Return the coordinates of day/night terminator for RTI plotting.
Parameters
----------
date : datetime.datetime
a datetime.datetime object (assumed UTC)
lons : list
a numpy array of lons
Returns
-------
lat
the latitude of the day night terminator
tau
grenwich hour angle
dec
solar declination
"""
import mpl_toolkits.basemap.solar as solar
import numpy as np
dg2rad = np.pi / 180.
# compute greenwich hour angle and solar declination
# from datetime object (assumed UTC).
tau, dec = solar.epem(date)
# compute day/night terminator from hour angle, declination.
longitude = lons + tau
lats = np.arctan(-np.cos(longitude * dg2rad) /
np.tan(dec * dg2rad)) / dg2rad
return lats, tau, dec
| gpl-3.0 |
rohanp/scikit-learn | sklearn/tests/test_calibration.py | 62 | 12288 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
ignore_warnings,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@ignore_warnings
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
fallen/artiq | artiq/frontend/pdq2_client.py | 2 | 5086 | #!/usr/bin/python
# Copyright (C) 2012-2015 Robert Jordens <[email protected]>
import argparse
import time
from scipy import interpolate
import numpy as np
from artiq.protocols.pc_rpc import Client
from artiq.tools import verbosity_args, init_logger
def get_argparser():
parser = argparse.ArgumentParser(description="""PDQ2 client.
Evaluates times and voltages, interpolates and uploads
them to the controller.""")
parser.add_argument("-s", "--server", default="::1",
help="hostname or IP of the controller to connect to")
parser.add_argument("--port", default=3252, type=int,
help="TCP port to use to connect to the controller")
parser.add_argument("-c", "--channel", default=0, type=int,
help="channel: 3*board_num+dac_num [%(default)s]")
parser.add_argument("-f", "--frame", default=0, type=int,
help="frame [%(default)s]")
parser.add_argument("-t", "--times", default="np.arange(5)*1e-6",
help="sample times (s) [%(default)s]")
parser.add_argument("-u", "--voltages",
default="(1-np.cos(t/t[-1]*2*np.pi))/2",
help="sample voltages (V) [%(default)s]")
parser.add_argument("-a", "--aux", default=False, action="store_true",
help="axiliary digital output [%(default)%s]")
parser.add_argument("-o", "--order", default=3, type=int,
help="interpolation (0: const, 1: lin, 2: quad,"
" 3: cubic) [%(default)s]")
parser.add_argument("-p", "--plot", help="plot to file [%(default)s]")
parser.add_argument("-r", "--reset", default=False,
action="store_true", help="do reset before")
parser.add_argument("-m", "--dcm", default=False, action="store_true",
help="100MHz clock [%(default)s]")
parser.add_argument("-n", "--disarm", default=False, action="store_true",
help="disarm group [%(default)s]")
parser.add_argument("-e", "--free", default=False, action="store_true",
help="software trigger [%(default)s]")
parser.add_argument("-x", "--demo", default=False, action="store_true",
help="demo mode: pulse and chirp, 1V*ch+0.1V*frame"
" [%(default)s]")
parser.add_argument("-b", "--bit", default=False,
action="store_true", help="do bit test")
verbosity_args(parser)
return parser
def main():
args = get_argparser().parse_args()
init_logger(args)
dev = Client(args.server, args.port, "pdq2")
dev.init()
if args.reset:
dev.write(b"\x00\x00") # flush any escape
dev.cmd("RESET", True)
time.sleep(.1)
dev.cmd("START", False)
dev.cmd("ARM", False)
dev.cmd("DCM", args.dcm)
freq = 100e6 if args.dcm else 50e6
dev.set_freq(freq)
num_channels = dev.get_num_channels()
num_frames = dev.get_num_frames()
times = eval(args.times, globals(), {})
voltages = eval(args.voltages, globals(), dict(t=times))
if args.demo:
for ch, channel in enumerate(dev.channels):
entry = []
for fr in range(dev.channels[0].num_frames):
vi = .1*fr + ch + voltages
entry.append(channel.segment(times, vi, order=args.order,
end=False, aux=args.aux))
pi = 2*np.pi*(-.5 + .01*fr + .1*ch + 0*voltages)
fi = 10e6*times/times[-1]
channel.segment(2*times, voltages, pi, fi, trigger=False,
silence=True, aux=args.aux)
dev.write_channel(channel, entry)
elif args.bit:
v = [-1, 0, -1]
# for i in range(15):
# v.extend([(1 << i) - 1, 1 << i])
v = np.array(v)*dev.channels[0].max_out/dev.channels[0].max_val
t = np.arange(len(v))
for channel in dev.channels:
s = channel.segment(t, v, order=0, shift=15, stop=False,
trigger=False)
dev.write_channel(channel, [s for i in range(channel.num_frames)])
else:
c = dev.channels[args.channel]
map = [None] * c.num_frames
map[args.frame] = c.segment(times, voltages, order=args.order,
aux=args.aux)
dev.write_channel(c, map)
dev.cmd("START", True)
dev.cmd("ARM", not args.disarm)
dev.cmd("TRIGGER", args.free)
if args.plot:
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
ax.plot(times, voltages, "xk", label="points")
if args.order > 0:
spline = interpolate.splrep(times, voltages, k=args.order)
ttimes = np.arange(0, times[-1], 1/freq)
vvoltages = interpolate.splev(ttimes, spline)
ax.plot(ttimes, vvoltages, ",b", label="interpolation")
fig.savefig(args.plot)
if __name__ == "__main__":
main()
| gpl-3.0 |
kubeflow/examples | github_issue_summarization/Pachyderm_Example/code/preprocess_data_for_deep_learning.py | 1 | 1834 | import argparse
import dill as dpickle
import numpy as np
from ktext.preprocess import processor
import pandas as pd
# Parsing flags.
parser = argparse.ArgumentParser()
parser.add_argument("--input_traindf_csv")
parser.add_argument("--output_body_preprocessor_dpkl")
parser.add_argument("--output_title_preprocessor_dpkl")
parser.add_argument("--output_train_title_vecs_npy")
parser.add_argument("--output_train_body_vecs_npy")
args = parser.parse_args()
print(args)
# Read data.
traindf = pd.read_csv(args.input_traindf_csv)
train_body_raw = traindf.body.tolist()
train_title_raw = traindf.issue_title.tolist()
# Clean, tokenize, and apply padding / truncating such that each document
# length = 70. Also, retain only the top 8,000 words in the vocabulary and set
# the remaining words to 1 which will become common index for rare words.
body_pp = processor(keep_n=8000, padding_maxlen=70)
train_body_vecs = body_pp.fit_transform(train_body_raw)
print('Example original body:', train_body_raw[0])
print('Example body after pre-processing:', train_body_vecs[0])
# Instantiate a text processor for the titles, with some different parameters.
title_pp = processor(append_indicators=True, keep_n=4500,
padding_maxlen=12, padding='post')
# process the title data
train_title_vecs = title_pp.fit_transform(train_title_raw)
print('Example original title:', train_title_raw[0])
print('Example title after pre-processing:', train_title_vecs[0])
# Save the preprocessor.
with open(args.output_body_preprocessor_dpkl, 'wb') as f:
dpickle.dump(body_pp, f, protocol=2)
with open(args.output_title_preprocessor_dpkl, 'wb') as f:
dpickle.dump(title_pp, f, protocol=2)
# Save the processed data.
np.save(args.output_train_title_vecs_npy, train_title_vecs)
np.save(args.output_train_body_vecs_npy, train_body_vecs)
| apache-2.0 |
ankurankan/scikit-learn | sklearn/metrics/regression.py | 27 | 9558 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array-like of shape = [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples, n_outputs]
Estimated target values.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
y_type = 'continuous' if y_true.shape[1] == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred
def _average_and_variance(values, sample_weight=None):
"""
Compute the (weighted) average and variance.
Parameters
----------
values : array-like of shape = [n_samples] or [n_samples, n_outputs]
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average : float
The weighted average
variance : float
The weighted variance
"""
values = np.asarray(values)
if values.ndim == 1:
values = values.reshape((-1, 1))
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if sample_weight.ndim == 1:
sample_weight = sample_weight.reshape((-1, 1))
average = np.average(values, weights=sample_weight)
variance = np.average((values - average)**2, weights=sample_weight)
return average, variance
def mean_absolute_error(y_true, y_pred, sample_weight=None):
"""Mean absolute error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
return np.average(np.abs(y_pred - y_true).mean(axis=1),
weights=sample_weight)
def mean_squared_error(y_true, y_pred, sample_weight=None):
"""Mean squared error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
return np.average(((y_pred - y_true) ** 2).mean(axis=1),
weights=sample_weight)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred, sample_weight=None):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like
Ground truth (correct) target values.
y_pred : array-like
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
The explained variance.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if y_type != "continuous":
raise ValueError("{0} is not supported".format(y_type))
_, numerator = _average_and_variance(y_true - y_pred, sample_weight)
_, denominator = _average_and_variance(y_true, sample_weight)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
def r2_score(y_true, y_pred, sample_weight=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
The R^2 score.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(dtype=np.float64)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
| bsd-3-clause |
jaeilepp/mne-python | mne/preprocessing/tests/test_ica.py | 1 | 32127 | from __future__ import print_function
# Author: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import warnings
from nose.tools import (assert_true, assert_raises, assert_equal, assert_false,
assert_not_equal, assert_is_none)
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose)
from scipy import stats
from itertools import product
from mne import (Epochs, read_events, pick_types, create_info, EpochsArray,
EvokedArray, Annotations)
from mne.cov import read_cov
from mne.preprocessing import (ICA, ica_find_ecg_events, ica_find_eog_events,
read_ica, run_ica)
from mne.preprocessing.ica import (get_score_funcs, corrmap, _sort_components,
_ica_explained_variance)
from mne.io import read_raw_fif, Info, RawArray
from mne.io.meas_info import _kind_dict
from mne.io.pick import _DATA_CH_TYPES_SPLIT
from mne.tests.common import assert_naming
from mne.utils import (catch_logging, _TempDir, requires_sklearn, slow_test,
run_tests_if_main)
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
test_cov_name = op.join(data_dir, 'test-cov.fif')
event_id, tmin, tmax = 1, -0.2, 0.2
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 6
score_funcs_unsuited = ['pointbiserialr', 'ansari']
try:
from sklearn.utils.validation import NonBLASDotWarning
warnings.simplefilter('error', NonBLASDotWarning)
except Exception:
pass
@requires_sklearn
def test_ica_full_data_recovery():
"""Test recovery of full data when no source is rejected."""
# Most basic recovery
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
with warnings.catch_warnings(record=True): # bad proj
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
evoked = epochs.average()
n_channels = 5
data = raw._data[:n_channels].copy()
data_epochs = epochs.get_data()
data_evoked = evoked.data
raw.annotations = Annotations([0.5], [0.5], ['BAD'])
for method in ['fastica']:
stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
for n_components, n_pca_components, ok in stuff:
ica = ICA(n_components=n_components,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components,
method=method, max_iter=1)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=list(range(n_channels)))
raw2 = ica.apply(raw.copy(), exclude=[])
if ok:
assert_allclose(data[:n_channels], raw2._data[:n_channels],
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
assert_true(np.max(diff) > 1e-14)
ica = ICA(n_components=n_components,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components)
with warnings.catch_warnings(record=True):
ica.fit(epochs, picks=list(range(n_channels)))
epochs2 = ica.apply(epochs.copy(), exclude=[])
data2 = epochs2.get_data()[:, :n_channels]
if ok:
assert_allclose(data_epochs[:, :n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data_epochs[:, :n_channels] - data2)
assert_true(np.max(diff) > 1e-14)
evoked2 = ica.apply(evoked.copy(), exclude=[])
data2 = evoked2.data[:n_channels]
if ok:
assert_allclose(data_evoked[:n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(evoked.data[:n_channels] - data2)
assert_true(np.max(diff) > 1e-14)
assert_raises(ValueError, ICA, method='pizza-decomposision')
@requires_sklearn
def test_ica_rank_reduction():
"""Test recovery ICA rank reduction."""
# Most basic recovery
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
n_components = 5
max_pca_components = len(picks)
for n_pca_components in [6, 10]:
with warnings.catch_warnings(record=True): # non-convergence
warnings.simplefilter('always')
ica = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components,
method='fastica', max_iter=1).fit(raw, picks=picks)
rank_before = raw.estimate_rank(picks=picks)
assert_equal(rank_before, len(picks))
raw_clean = ica.apply(raw.copy())
rank_after = raw_clean.estimate_rank(picks=picks)
# interaction between ICA rejection and PCA components difficult
# to preduct. Rank_after often seems to be 1 higher then
# n_pca_components
assert_true(n_components < n_pca_components <= rank_after <=
rank_before)
@requires_sklearn
def test_ica_reset():
"""Test ICA resetting."""
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
run_time_attrs = (
'_pre_whitener',
'unmixing_matrix_',
'mixing_matrix_',
'n_components_',
'n_samples_',
'pca_components_',
'pca_explained_variance_',
'pca_mean_'
)
with warnings.catch_warnings(record=True): # convergence
ica = ICA(
n_components=3, max_pca_components=3, n_pca_components=3,
method='fastica', max_iter=1).fit(raw, picks=picks)
assert_true(all(hasattr(ica, attr) for attr in run_time_attrs))
assert_not_equal(ica.labels_, None)
ica._reset()
assert_true(not any(hasattr(ica, attr) for attr in run_time_attrs))
assert_not_equal(ica.labels_, None)
@requires_sklearn
def test_ica_core():
"""Test ICA on raw and epochs."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
# XXX. The None cases helped revealing bugs but are time consuming.
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
noise_cov = [None, test_cov]
# removed None cases to speed up...
n_components = [2, 1.0] # for future dbg add cases
max_pca_components = [3]
picks_ = [picks]
methods = ['fastica']
iter_ica_params = product(noise_cov, n_components, max_pca_components,
picks_, methods)
# # test init catchers
assert_raises(ValueError, ICA, n_components=3, max_pca_components=2)
assert_raises(ValueError, ICA, n_components=2.3, max_pca_components=2)
# test essential core functionality
for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
# Test ICA raw
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0, method=method, max_iter=1)
assert_raises(ValueError, ica.__contains__, 'mag')
print(ica) # to test repr
# test fit checker
assert_raises(RuntimeError, ica.get_sources, raw)
assert_raises(RuntimeError, ica.get_sources, epochs)
# test decomposition
with warnings.catch_warnings(record=True): # convergence
ica.fit(raw, picks=pcks, start=start, stop=stop)
repr(ica) # to test repr
assert_true('mag' in ica) # should now work without error
# test re-fit
unmixing1 = ica.unmixing_matrix_
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=pcks, start=start, stop=stop)
assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)
raw_sources = ica.get_sources(raw)
# test for #3804
assert_equal(raw_sources._filenames, [None])
print(raw_sources)
sources = raw_sources[:, :][0]
assert_true(sources.shape[0] == ica.n_components_)
# test preload filter
raw3 = raw.copy()
raw3.preload = False
assert_raises(ValueError, ica.apply, raw3,
include=[1, 2])
#######################################################################
# test epochs decomposition
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0)
with warnings.catch_warnings(record=True):
ica.fit(epochs, picks=picks)
data = epochs.get_data()[:, 0, :]
n_samples = np.prod(data.shape)
assert_equal(ica.n_samples_, n_samples)
print(ica) # to test repr
sources = ica.get_sources(epochs).get_data()
assert_true(sources.shape[1] == ica.n_components_)
assert_raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# test preload filter
epochs3 = epochs.copy()
epochs3.preload = False
assert_raises(ValueError, ica.apply, epochs3,
include=[1, 2])
# test for bug with whitener updating
_pre_whitener = ica._pre_whitener.copy()
epochs._data[:, 0, 10:15] *= 1e12
ica.apply(epochs.copy())
assert_array_equal(_pre_whitener, ica._pre_whitener)
# test expl. var threshold leading to empty sel
ica.n_components = 0.1
assert_raises(RuntimeError, ica.fit, epochs)
offender = 1, 2, 3,
assert_raises(ValueError, ica.get_sources, offender)
assert_raises(ValueError, ica.fit, offender)
assert_raises(ValueError, ica.apply, offender)
@slow_test
@requires_sklearn
def test_ica_additional():
"""Test additional ICA functionality."""
import matplotlib.pyplot as plt
tempdir = _TempDir()
stop2 = 500
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
raw.annotations = Annotations([0.5], [0.5], ['BAD'])
# XXX This breaks the tests :(
# raw.info['bads'] = [raw.ch_names[1]]
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
# test if n_components=None works
with warnings.catch_warnings(record=True):
ica = ICA(n_components=None,
max_pca_components=None,
n_pca_components=None, random_state=0)
ica.fit(epochs, picks=picks, decim=3)
# for testing eog functionality
picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=True, exclude='bads')
epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
baseline=(None, 0), preload=True)
test_cov2 = test_cov.copy()
ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
n_pca_components=4)
assert_true(ica.info is None)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks[:5])
assert_true(isinstance(ica.info, Info))
assert_true(ica.n_components_ < 5)
ica = ICA(n_components=3, max_pca_components=4,
n_pca_components=4)
assert_raises(RuntimeError, ica.save, '')
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)
# check passing a ch_name to find_bads_ecg
with warnings.catch_warnings(record=True): # filter length
_, scores_1 = ica.find_bads_ecg(raw)
_, scores_2 = ica.find_bads_ecg(raw, raw.ch_names[1])
assert_false(scores_1[0] == scores_2[0])
# test corrmap
ica2 = ica.copy()
ica3 = ica.copy()
corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True,
ch_type="mag")
corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False)
assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"])
assert_true(0 in ica.labels_["blinks"])
# test retrieval of component maps as arrays
components = ica.get_components()
template = components[:, 0]
EvokedArray(components, ica.info, tmin=0.).plot_topomap([0])
corrmap([ica, ica3], template, threshold='auto', label='blinks', plot=True,
ch_type="mag")
assert_true(ica2.labels_["blinks"] == ica3.labels_["blinks"])
plt.close('all')
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
ica.save(ica_badname)
read_ica(ica_badname)
assert_naming(w, 'test_ica.py', 2)
# test decim
ica = ICA(n_components=3, max_pca_components=4,
n_pca_components=4)
raw_ = raw.copy()
for _ in range(3):
raw_.append(raw_)
n_samples = raw_._data.shape[1]
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, decim=3)
assert_true(raw_._data.shape[1], n_samples)
# test expl var
ica = ICA(n_components=1.0, max_pca_components=4,
n_pca_components=4)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, decim=3)
assert_true(ica.n_components_ == 4)
ica_var = _ica_explained_variance(ica, raw, normalize=True)
assert_true(np.all(ica_var[:-1] >= ica_var[1:]))
# test ica sorting
ica.exclude = [0]
ica.labels_ = dict(blink=[0], think=[1])
ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True)
assert_equal(ica_sorted.exclude, [3])
assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2]))
# epochs extraction from raw fit
assert_raises(RuntimeError, ica.get_sources, epochs)
# test reading and writing
test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
for cov in (None, test_cov):
ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
n_pca_components=4)
with warnings.catch_warnings(record=True): # ICA does not converge
ica.fit(raw, picks=picks, start=start, stop=stop2)
sources = ica.get_sources(epochs).get_data()
assert_true(ica.mixing_matrix_.shape == (2, 2))
assert_true(ica.unmixing_matrix_.shape == (2, 2))
assert_true(ica.pca_components_.shape == (4, len(picks)))
assert_true(sources.shape[1] == ica.n_components_)
for exclude in [[], [0]]:
ica.exclude = exclude
ica.labels_ = {'foo': [0]}
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert_true(ica.exclude == ica_read.exclude)
assert_equal(ica.labels_, ica_read.labels_)
ica.exclude = []
ica.apply(raw, exclude=[1])
assert_true(ica.exclude == [])
ica.exclude = [0, 1]
ica.apply(raw, exclude=[1])
assert_true(ica.exclude == [0, 1])
ica_raw = ica.get_sources(raw)
assert_true(ica.exclude == [ica_raw.ch_names.index(e) for e in
ica_raw.info['bads']])
# test filtering
d1 = ica_raw._data[0].copy()
ica_raw.filter(4, 20, fir_design='firwin2')
assert_equal(ica_raw.info['lowpass'], 20.)
assert_equal(ica_raw.info['highpass'], 4.)
assert_true((d1 != ica_raw._data[0]).any())
d1 = ica_raw._data[0].copy()
ica_raw.notch_filter([10], trans_bandwidth=10, fir_design='firwin')
assert_true((d1 != ica_raw._data[0]).any())
ica.n_pca_components = 2
ica.method = 'fake'
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert_true(ica.n_pca_components == ica_read.n_pca_components)
assert_equal(ica.method, ica_read.method)
assert_equal(ica.labels_, ica_read.labels_)
# check type consistency
attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
'pca_explained_variance_ _pre_whitener')
def f(x, y):
return getattr(x, y).dtype
for attr in attrs.split():
assert_equal(f(ica_read, attr), f(ica, attr))
ica.n_pca_components = 4
ica_read.n_pca_components = 4
ica.exclude = []
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
'pca_mean_', 'pca_explained_variance_',
'_pre_whitener']:
assert_array_almost_equal(getattr(ica, attr),
getattr(ica_read, attr))
assert_true(ica.ch_names == ica_read.ch_names)
assert_true(isinstance(ica_read.info, Info))
sources = ica.get_sources(raw)[:, :][0]
sources2 = ica_read.get_sources(raw)[:, :][0]
assert_array_almost_equal(sources, sources2)
_raw1 = ica.apply(raw, exclude=[1])
_raw2 = ica_read.apply(raw, exclude=[1])
assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])
os.remove(test_ica_fname)
# check score funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(raw, target='EOG 061', score_func=func,
start=0, stop=10)
assert_true(ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(raw, score_func=stats.skew)
# check exception handling
assert_raises(ValueError, ica.score_sources, raw,
target=np.arange(1))
params = []
params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx params
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
eog_ch=ch_name, skew_criterion=idx,
var_criterion=idx, kurt_criterion=idx)
evoked = epochs.average()
evoked_data = evoked.data.copy()
raw_data = raw[:][0].copy()
epochs_data = epochs.get_data().copy()
with warnings.catch_warnings(record=True):
idx, scores = ica.find_bads_ecg(raw, method='ctps')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(raw, method='correlation')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_eog(raw)
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(epochs, method='ctps')
assert_equal(len(scores), ica.n_components_)
assert_raises(ValueError, ica.find_bads_ecg, epochs.average(),
method='ctps')
assert_raises(ValueError, ica.find_bads_ecg, raw,
method='crazy-coupling')
idx, scores = ica.find_bads_eog(raw)
assert_equal(len(scores), ica.n_components_)
raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
idx, scores = ica.find_bads_eog(raw)
assert_true(isinstance(scores, list))
assert_equal(len(scores[0]), ica.n_components_)
idx, scores = ica.find_bads_eog(evoked, ch_name='MEG 1441')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(evoked, method='correlation')
assert_equal(len(scores), ica.n_components_)
assert_array_equal(raw_data, raw[:][0])
assert_array_equal(epochs_data, epochs.get_data())
assert_array_equal(evoked_data, evoked.data)
# check score funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(epochs_eog, target='EOG 061',
score_func=func)
assert_true(ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(epochs, score_func=stats.skew)
# check exception handling
assert_raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# ecg functionality
ecg_scores = ica.score_sources(raw, target='MEG 1531',
score_func='pearsonr')
with warnings.catch_warnings(record=True): # filter attenuation warning
ecg_events = ica_find_ecg_events(raw,
sources[np.abs(ecg_scores).argmax()])
assert_true(ecg_events.ndim == 2)
# eog functionality
eog_scores = ica.score_sources(raw, target='EOG 061',
score_func='pearsonr')
with warnings.catch_warnings(record=True): # filter attenuation warning
eog_events = ica_find_eog_events(raw,
sources[np.abs(eog_scores).argmax()])
assert_true(eog_events.ndim == 2)
# Test ica fiff export
ica_raw = ica.get_sources(raw, start=0, stop=100)
assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
assert_equal(len(ica_raw._filenames), 1) # API consistency
ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
assert_true(ica.n_components_ == len(ica_chans))
test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
ica.n_components = np.int32(ica.n_components)
ica_raw.save(test_ica_fname, overwrite=True)
ica_raw2 = read_raw_fif(test_ica_fname, preload=True)
assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
ica_raw2.close()
os.remove(test_ica_fname)
# Test ica epochs export
ica_epochs = ica.get_sources(epochs)
assert_true(ica_epochs.events.shape == epochs.events.shape)
ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
assert_true(ica.n_components_ == len(ica_chans))
assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
assert_true(ica_epochs._raw is None)
assert_true(ica_epochs.preload is True)
# test float n pca components
ica.pca_explained_variance_ = np.array([0.2] * 5)
ica.n_components_ = 0
for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
ncomps_ = ica._check_n_pca_components(ncomps)
assert_true(ncomps_ == expected)
ica = ICA()
ica.fit(raw, picks=picks[:5])
with warnings.catch_warnings(record=True): # filter length
ica.find_bads_ecg(raw)
ica.find_bads_eog(epochs, ch_name='MEG 0121')
assert_array_equal(raw_data, raw[:][0])
raw.drop_channels(['MEG 0122'])
with warnings.catch_warnings(record=True): # filter length
assert_raises(RuntimeError, ica.find_bads_eog, raw)
assert_raises(RuntimeError, ica.find_bads_ecg, raw)
@requires_sklearn
def test_run_ica():
"""Test run_ica function."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
params = []
params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
warnings.simplefilter('always')
with warnings.catch_warnings(record=True):
run_ica(raw, n_components=2, start=0, stop=6, start_find=0,
stop_find=5, ecg_ch=ch_name, eog_ch=ch_name,
skew_criterion=idx, var_criterion=idx, kurt_criterion=idx)
@requires_sklearn
def test_ica_reject_buffer():
"""Test ICA data raw buffer rejection."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
raw._data[2, 1000:1005] = 5e-12
with catch_logging() as drop_log:
with warnings.catch_warnings(record=True):
ica.fit(raw, picks[:5], reject=dict(mag=2.5e-12), decim=2,
tstep=0.01, verbose=True, reject_by_annotation=False)
assert_true(raw._data[:5, ::2].shape[1] - 4 == ica.n_samples_)
log = [l for l in drop_log.getvalue().split('\n') if 'detected' in l]
assert_equal(len(log), 1)
@requires_sklearn
def test_ica_twice():
"""Test running ICA twice."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
picks = pick_types(raw.info, meg='grad', exclude='bads')
n_components = 0.9
max_pca_components = None
n_pca_components = 1.1
with warnings.catch_warnings(record=True):
ica1 = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components, random_state=0)
ica1.fit(raw, picks=picks, decim=3)
raw_new = ica1.apply(raw, n_pca_components=n_pca_components)
ica2 = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=1.0, random_state=0)
ica2.fit(raw_new, picks=picks, decim=3)
assert_equal(ica1.n_components_, ica2.n_components_)
@requires_sklearn
def test_fit_params():
"""Test fit_params for ICA."""
assert_raises(ValueError, ICA, fit_params=dict(extended=True))
fit_params = {}
ICA(fit_params=fit_params) # test no side effects
assert_equal(fit_params, {})
@requires_sklearn
def test_bad_channels():
"""Test exception when unsupported channels are used."""
chs = [i for i in _kind_dict]
data_chs = _DATA_CH_TYPES_SPLIT + ['eog']
chs_bad = list(set(chs) - set(data_chs))
info = create_info(len(chs), 500, chs)
data = np.random.rand(len(chs), 50)
raw = RawArray(data, info)
data = np.random.rand(100, len(chs), 50)
epochs = EpochsArray(data, info)
n_components = 0.9
ica = ICA(n_components=n_components, method='fastica')
for inst in [raw, epochs]:
for ch in chs_bad:
# Test case for only bad channels
picks_bad1 = pick_types(inst.info, meg=False,
**{str(ch): True})
# Test case for good and bad channels
picks_bad2 = pick_types(inst.info, meg=True,
**{str(ch): True})
assert_raises(ValueError, ica.fit, inst, picks=picks_bad1)
assert_raises(ValueError, ica.fit, inst, picks=picks_bad2)
assert_raises(ValueError, ica.fit, inst, picks=[])
@requires_sklearn
def test_eog_channel():
"""Test that EOG channel is included when performing ICA."""
raw = read_raw_fif(raw_fname, preload=True)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=True, ecg=False,
eog=True, exclude='bads')
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
n_components = 0.9
ica = ICA(n_components=n_components, method='fastica')
# Test case for MEG and EOG data. Should have EOG channel
for inst in [raw, epochs]:
picks1a = pick_types(inst.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:4]
picks1b = pick_types(inst.info, meg=False, stim=False, ecg=False,
eog=True, exclude='bads')
picks1 = np.append(picks1a, picks1b)
ica.fit(inst, picks=picks1)
assert_true(any('EOG' in ch for ch in ica.ch_names))
# Test case for MEG data. Should have no EOG channel
for inst in [raw, epochs]:
picks1 = pick_types(inst.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:5]
ica.fit(inst, picks=picks1)
assert_false(any('EOG' in ch for ch in ica.ch_names))
@requires_sklearn
def test_max_pca_components_none():
"""Test max_pca_components=None."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, eeg=True, meg=False)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
max_pca_components = None
n_components = 10
random_state = 12345
tempdir = _TempDir()
output_fname = op.join(tempdir, 'test_ica-ica.fif')
ica = ICA(max_pca_components=max_pca_components,
n_components=n_components, random_state=random_state)
with warnings.catch_warnings(record=True): # convergence
ica.fit(epochs)
ica.save(output_fname)
ica = read_ica(output_fname)
# ICA.fit() replaced max_pca_components, which was previously None,
# with the appropriate integer value.
assert_equal(ica.max_pca_components, epochs.info['nchan'])
assert_equal(ica.n_components, 10)
@requires_sklearn
def test_n_components_none():
"""Test n_components=None."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, eeg=True, meg=False)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
max_pca_components = 10
n_components = None
random_state = 12345
tempdir = _TempDir()
output_fname = op.join(tempdir, 'test_ica-ica.fif')
ica = ICA(max_pca_components=max_pca_components,
n_components=n_components, random_state=random_state)
with warnings.catch_warnings(record=True): # convergence
ica.fit(epochs)
ica.save(output_fname)
ica = read_ica(output_fname)
# ICA.fit() replaced max_pca_components, which was previously None,
# with the appropriate integer value.
assert_equal(ica.max_pca_components, 10)
assert_is_none(ica.n_components)
@requires_sklearn
def test_n_components_and_max_pca_components_none():
"""Test n_components and max_pca_components=None."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, eeg=True, meg=False)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
max_pca_components = None
n_components = None
random_state = 12345
tempdir = _TempDir()
output_fname = op.join(tempdir, 'test_ica-ica.fif')
ica = ICA(max_pca_components=max_pca_components,
n_components=n_components, random_state=random_state)
with warnings.catch_warnings(record=True): # convergence
ica.fit(epochs)
ica.save(output_fname)
ica = read_ica(output_fname)
# ICA.fit() replaced max_pca_components, which was previously None,
# with the appropriate integer value.
assert_equal(ica.max_pca_components, epochs.info['nchan'])
assert_is_none(ica.n_components)
run_tests_if_main()
| bsd-3-clause |
deot95/Tesis | Proyecto de Grado Ingeniería Electrónica/Workspace/RL/DDPG/plot_reward.py | 1 | 1114 | from statsmodels.nonparametric.smoothers_lowess import lowess
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
import seaborn as sns
sns.set()
sns.set_context('poster')
sns.set_style('ticks')
ans = ""
while not (ans == "y" or ans=="n"):
ans = input("Flows. y/n:\n")
flows = (ans == "y")
f2 = plt.figure(figsize=(12,8))
#data = np.load("reward_history_flows_"+str(flows).lower()+".npy")
#data = np.load("reward_history_flows_0.npy")
data = np.load("reward_small_history_flows_0.npy")
x = range(len(data))
yhat = lowess(data,x,0.08)
yhat[:,1] = np.insert(np.delete(yhat[:,1],len(yhat[:,1])-1),0,np.min(data))
yhat = lowess(yhat[:,1],x,0.03)
#plt.plot(yhat[:,0],yhat[:,1])
p=plt.plot(x,data,alpha=0.5)
plt.xlabel("Episodes")
plt.ylabel("Reward")
plt.title("Reward vs. Episodes for the DDPG algorithm")
sns.despine()
plt.text(-25, -32700, 'A',bbox={'facecolor':p[0].get_color(), 'alpha':0.9, 'pad':5})
plt.text(300, -20500, 'B',bbox={'facecolor':p[0].get_color(), 'alpha':0.9, 'pad':5})
plt.text(2000, -23000, 'C',bbox={'facecolor':p[0].get_color(), 'alpha':0.9, 'pad':5})
plt.show() | mit |
jojow950i/Appliance-Data-Analysis-and-Modification-Toolkit | server.py | 1 | 6670 | import tornado.ioloop
import tornado.web
import tornado.autoreload
from tornado.concurrent import Future
import os
import random
import time
import json
import pandas
import datetime
import uuid
import threading
from tornado import gen
import generate_order
import metadata_import
import yaml
i = 0
redd_path = '/path/to/REDD/'
greend_path = '/path/to/GREEND/'
iAWE_path = '/path/to/iAWE/'
channels = {}
checkboxes = ('REDD', 'GREEND', 'iAWE')
waiters = {}
class Channel:
def __init__(self, channel_name='', datasets={}):
self.datasets = datasets
self.name = channel_name
@property
def __repr__(self):
return self.name, self.datasets
class MainHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
def get(self):
client_id = str(uuid.uuid4())
global i
i = 0
print("Main Get!")
message = {'id': client_id}
self.render("index.html", message=message)
class EntryAllHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
def post(self):
toSend = list()
chosen = []
for checkbox in checkboxes:
if self.get_argument(checkbox) == 'true':
chosen.append(checkbox)
for c_channel in channels:
s1 = set(channels[c_channel]['datasets'].keys())
aus = set(chosen).intersection(s1)
availability = {'small': 0, 'medium': 0, 'large': 0}
for a in aus:
for part in channels[c_channel]['datasets'][a]:
availability[part] += len(channels[c_channel]['datasets'][a][part])
if aus != set([]):
message = {
"id": str(int(time.time() + int(random.random() * 10000))),
"name": c_channel,
"from": str((int(time.time() * 1000000) >> 9) + int(random.random() * 100000)),
"availability": availability,
"small": availability['small'],
"medium": availability['medium'],
"large": availability['large'],
}
message["html"] = tornado.escape.to_basestring(self.render_string("entry.html", message=message))
toSend.append(message)
self.write(dict({'v': toSend}))
class GenerateOrderHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
def post(self):
self.write('')
gotten_id = self.get_argument('id', None)
timeframe = self.get_argument('timeframe', None)
apps = json.loads(self.get_argument('appliances', None))
noise = int(self.get_argument('noise', None))
missing = int(self.get_argument('missing', None))
calcTotalComplexity = self.get_argument('calcTotalComplexity', None)
print("starting thread now...")
order = generate_order.GenerateOrder(gotten_id, timeframe, apps, noise, missing, calcTotalComplexity, waiters,
redd_path, channels)
c_thread = threading.Thread(target=order.generate, args=())
c_thread.start()
class StatusBuffer(object):
def __init__(self):
self.for_output = []
def output_status(self, message):
while len(self.for_output) == 0:
pass
for future in self.for_output:
future.set_result(message)
self.for_output = []
def wait_for_output(self):
result_future = Future()
self.for_output.append(result_future)
return result_future
class UpdateStatusHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
@gen.coroutine
def post(self):
global waiters
gotten_id = self.get_argument('id', None)
last_time = pandas.Timestamp(datetime.datetime.now())
if gotten_id in waiters.keys():
waiters[gotten_id]['last_time'] = last_time
else:
waiters.update({gotten_id: {'buffer': StatusBuffer(), 'last_time': last_time}})
buffer = waiters[gotten_id]['buffer']
future = buffer.wait_for_output()
status_update = yield future
self.write({'update': status_update})
class CheckboxesAllHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
def post(self):
to_send = list()
for checkbox in checkboxes:
message = {
"dataset": checkbox,
}
message["html"] = tornado.escape.to_basestring(self.render_string("add_checkboxes.html", message=message))
to_send.append(message)
self.write(dict({'v': to_send}))
class DownloadHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
def post(self, path):
file = open('download/' + path, 'r')
print(path)
self.set_header('Content-Type', 'text/csv')
self.set_header('Content-Disposition', 'attachment; filename=HouseData.csv')
self.write(file.read())
application = tornado.web.Application(
[
(r"/", MainHandler),
(r"/entry/all", EntryAllHandler),
(r"/checkboxes/all", CheckboxesAllHandler),
(r"/generate", GenerateOrderHandler),
(r'/update', UpdateStatusHandler),
(r'/download/(.*)', DownloadHandler), # tornado.web.StaticFileHandler, {"path": "./download"}),
],
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
)
redd_infos = json.load(open("dataset_infos/REDD_infos.json", "r"))
greend_infos = json.load(open("dataset_infos/GREEND_infos.json", "r"))
iawe_infos = json.load(open("dataset_infos/iAWE_infos.json", "r"))
if __name__ == "__main__":
print("Server Started")
channels = metadata_import.import_datasets()
print(channels)
tornado.autoreload.start(io_loop=None, check_time=500)
css = os.path.join(os.path.dirname(__file__) + "/static", "index.css")
js = os.path.join(os.path.dirname(__file__) + "/static", "main.js")
entry = os.path.join(os.path.dirname(__file__) + "/templates", "entry.html")
check = os.path.join(os.path.dirname(__file__) + "/templates", "add_checkboxes.html")
html = os.path.join(os.path.dirname(__file__) + "/templates", "index.html")
tornado.autoreload.watch(css)
tornado.autoreload.watch(html)
tornado.autoreload.watch(js)
tornado.autoreload.watch(entry)
tornado.autoreload.watch(check)
application.listen(9999)
tornado.ioloop.IOLoop.instance().start() | gpl-3.0 |
cpcloud/ibis | ibis/pandas/execution/util.py | 1 | 1599 | import operator
import toolz
import ibis
import ibis.common.exceptions as com
from ibis.pandas.core import execute
def compute_sort_key(key, data, scope=None, **kwargs):
by = key.to_expr()
try:
if isinstance(by, str):
return by, None
return by.get_name(), None
except com.ExpressionError:
new_scope = {t: data for t in by.op().root_tables()}
new_column = execute(by, scope=toolz.merge(scope, new_scope), **kwargs)
name = ibis.util.guid()
new_column.name = name
return name, new_column
def compute_sorted_frame(df, order_by, group_by=(), **kwargs):
computed_sort_keys = []
sort_keys = list(toolz.concatv(group_by, order_by))
ascending = [getattr(key.op(), 'ascending', True) for key in sort_keys]
new_columns = {}
for i, key in enumerate(map(operator.methodcaller('op'), sort_keys)):
computed_sort_key, temporary_column = compute_sort_key(
key, df, **kwargs
)
computed_sort_keys.append(computed_sort_key)
if temporary_column is not None:
new_columns[computed_sort_key] = temporary_column
result = df.assign(**new_columns)
result = result.sort_values(
computed_sort_keys, ascending=ascending, kind='mergesort'
)
# TODO: we'll eventually need to return this frame with the temporary
# columns and drop them in the caller (maybe using post_execute?)
ngrouping_keys = len(group_by)
return (
result,
computed_sort_keys[:ngrouping_keys],
computed_sort_keys[ngrouping_keys:],
)
| apache-2.0 |
slinderman/pyhawkes | examples/inference/svi_demo.py | 1 | 9080 | import numpy as np
import os
import pickle
import gzip
# np.seterr(all='raise')
import matplotlib.pyplot as plt
from sklearn.metrics import adjusted_mutual_info_score, \
adjusted_rand_score, roc_auc_score
from pyhawkes.internals.network import StochasticBlockModel
from pyhawkes.models import \
DiscreteTimeNetworkHawkesModelGammaMixture, \
DiscreteTimeStandardHawkesModel
init_with_map = True
def demo(seed=None):
"""
Fit a weakly sparse
:return:
"""
import warnings
warnings.warn("This test runs but the parameters need to be tuned. "
"Right now, the SVI algorithm seems to walk away from "
"the MAP estimate and yield suboptimal results. "
"I'm not convinced the variational inference with the "
"gamma mixture provides the best estimates of the sparsity.")
if seed is None:
seed = np.random.randint(2**32)
print("Setting seed to ", seed)
np.random.seed(seed)
###########################################################
# Load some example data.
# See data/synthetic/generate.py to create more.
###########################################################
data_path = os.path.join("data", "synthetic", "synthetic_K20_C4_T10000.pkl.gz")
with gzip.open(data_path, 'r') as f:
S, true_model = pickle.load(f)
T = S.shape[0]
K = true_model.K
B = true_model.B
dt = true_model.dt
dt_max = true_model.dt_max
###########################################################
# Initialize with MAP estimation on a standard Hawkes model
###########################################################
if init_with_map:
init_len = T
print("Initializing with BFGS on first ", init_len, " time bins.")
init_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, dt_max=dt_max, B=B,
alpha=1.0, beta=1.0)
init_model.add_data(S[:init_len, :])
init_model.initialize_to_background_rate()
init_model.fit_with_bfgs()
else:
init_model = None
###########################################################
# Create a test weak spike-and-slab model
###########################################################
# Copy the network hypers.
# Give the test model p, but not c, v, or m
network_hypers = true_model.network_hypers.copy()
network_hypers['C'] = 1
network_hypers['c'] = None
network_hypers['v'] = None
network_hypers['m'] = None
test_network = StochasticBlockModel(K=K, **network_hypers)
test_model = DiscreteTimeNetworkHawkesModelGammaMixture(K=K, dt=dt, dt_max=dt_max, B=B,
basis_hypers=true_model.basis_hypers,
bkgd_hypers=true_model.bkgd_hypers,
impulse_hypers=true_model.impulse_hypers,
weight_hypers=true_model.weight_hypers,
network=test_network)
test_model.add_data(S)
# F_test = test_model.basis.convolve_with_basis(S_test)
# Initialize with the standard model parameters
if init_model is not None:
test_model.initialize_with_standard_model(init_model)
###########################################################
# Fit the test model with stochastic variational inference
###########################################################
N_iters = 500
minibatchsize = 1000
delay = 1.0
forgetting_rate = 0.5
stepsize = (np.arange(N_iters) + delay)**(-forgetting_rate)
samples = []
for itr in range(N_iters):
print("SVI Iter: ", itr, "\tStepsize: ", stepsize[itr])
test_model.sgd_step(minibatchsize=minibatchsize, stepsize=stepsize[itr])
test_model.resample_from_mf()
samples.append(test_model.copy_sample())
###########################################################
# Analyze the samples
###########################################################
analyze_samples(true_model, init_model, samples)
# TODO: Update the plotting code as in the Gibbs demo
def initialize_plots(true_model, test_model, S):
K = true_model.K
C = true_model.C
R = true_model.compute_rate(S=S)
T = S.shape[0]
# Plot the true network
plt.ion()
plot_network(true_model.weight_model.A,
true_model.weight_model.W)
plt.pause(0.001)
# Plot the true and inferred firing rate
plt.figure(2)
plt.plot(np.arange(T), R[:,0], '-k', lw=2)
plt.ion()
ln = plt.plot(np.arange(T), test_model.compute_rate()[:,0], '-r')[0]
plt.show()
# Plot the block affiliations
plt.figure(3)
KC = np.zeros((K,C))
KC[np.arange(K), test_model.network.c] = 1.0
im_clus = plt.imshow(KC,
interpolation="none", cmap="Greys",
aspect=float(C)/K)
im_net = plot_network(np.ones((K,K)), test_model.weight_model.W_effective, vmax=0.5)
plt.pause(0.001)
plt.show()
plt.pause(0.001)
return ln, im_net, im_clus
def update_plots(itr, test_model, S, ln, im_clus, im_net):
K = test_model.K
C = test_model.C
T = S.shape[0]
plt.figure(2)
ln.set_data(np.arange(T), test_model.compute_rate()[:,0])
plt.title("\lambda_{%d}. Iteration %d" % (0, itr))
plt.pause(0.001)
plt.figure(3)
KC = np.zeros((K,C))
KC[np.arange(K), test_model.network.c] = 1.0
im_clus.set_data(KC)
plt.title("KxC: Iteration %d" % itr)
plt.pause(0.001)
plt.figure(4)
plt.title("W: Iteration %d" % itr)
im_net.set_data(test_model.weight_model.W_effective)
plt.pause(0.001)
def analyze_samples(true_model, init_model, samples):
N_samples = len(samples)
# Compute sample statistics for second half of samples
A_samples = np.array([s.weight_model.A for s in samples])
W_samples = np.array([s.weight_model.W for s in samples])
g_samples = np.array([s.impulse_model.g for s in samples])
lambda0_samples = np.array([s.bias_model.lambda0 for s in samples])
c_samples = np.array([s.network.c for s in samples])
p_samples = np.array([s.network.p for s in samples])
v_samples = np.array([s.network.v for s in samples])
offset = N_samples // 2
A_mean = A_samples[offset:, ...].mean(axis=0)
W_mean = W_samples[offset:, ...].mean(axis=0)
g_mean = g_samples[offset:, ...].mean(axis=0)
lambda0_mean = lambda0_samples[offset:, ...].mean(axis=0)
p_mean = p_samples[offset:, ...].mean(axis=0)
v_mean = v_samples[offset:, ...].mean(axis=0)
print("A true: ", true_model.weight_model.A)
print("W true: ", true_model.weight_model.W)
print("g true: ", true_model.impulse_model.g)
print("lambda0 true: ", true_model.bias_model.lambda0)
print("")
print("A mean: ", A_mean)
print("W mean: ", W_mean)
print("g mean: ", g_mean)
print("lambda0 mean: ", lambda0_mean)
print("v mean: ", v_mean)
print("p mean: ", p_mean)
# # Predictive log likelihood
# pll_init = init_model.heldout_log_likelihood(S_test)
# plt.figure()
# plt.plot(np.arange(N_samples), pll_init * np.ones(N_samples), 'k')
# plt.plot(np.arange(N_samples), plls, 'r')
# plt.xlabel("Iteration")
# plt.ylabel("Predictive log probability")
# plt.show()
# Compute the link prediction accuracy curves
if init_model is not None:
auc_init = roc_auc_score(true_model.weight_model.A.ravel(),
init_model.W.ravel())
else:
auc_init = 0.0
auc_A_mean = roc_auc_score(true_model.weight_model.A.ravel(),
A_mean.ravel())
auc_W_mean = roc_auc_score(true_model.weight_model.A.ravel(),
W_mean.ravel())
aucs = []
for A in A_samples:
aucs.append(roc_auc_score(true_model.weight_model.A.ravel(), A.ravel()))
plt.figure()
plt.plot(aucs, '-r')
plt.plot(auc_A_mean * np.ones_like(aucs), '--r')
plt.plot(auc_W_mean * np.ones_like(aucs), '--b')
plt.plot(auc_init * np.ones_like(aucs), '--k')
plt.xlabel("Iteration")
plt.ylabel("Link prediction AUC")
plt.ylim(-0.1, 1.1)
# Compute the adjusted mutual info score of the clusterings
amis = []
arss = []
for c in c_samples:
amis.append(adjusted_mutual_info_score(true_model.network.c, c))
arss.append(adjusted_rand_score(true_model.network.c, c))
plt.figure()
plt.plot(np.arange(N_samples), amis, '-r')
plt.plot(np.arange(N_samples), arss, '-b')
plt.xlabel("Iteration")
plt.ylabel("Clustering score")
plt.ioff()
plt.show()
# demo(2203329564)
# demo(2728679796)
demo(11223344)
| mit |
asurve/systemml | src/main/python/tests/test_mllearn_df.py | 12 | 5320 | #!/usr/bin/python
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# To run:
# - Python 2: `PYSPARK_PYTHON=python2 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_df.py`
# - Python 3: `PYSPARK_PYTHON=python3 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_df.py`
# Make the `systemml` package importable
import os
import sys
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
sys.path.insert(0, path)
import unittest
import numpy as np
from pyspark.ml import Pipeline
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.sql import SparkSession
from sklearn import datasets, metrics, neighbors
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import linear_model
from sklearn.metrics import accuracy_score, r2_score
from systemml.mllearn import LinearRegression, LogisticRegression, NaiveBayes, SVM
sparkSession = SparkSession.builder.getOrCreate()
# Currently not integrated with JUnit test
# ~/spark-1.6.1-scala-2.11/bin/spark-submit --master local[*] --driver-class-path SystemML.jar test.py
class TestMLLearn(unittest.TestCase):
def test_logistic_sk2(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
# Convert to DataFrame for i/o: current way to transfer data
logistic = LogisticRegression(sparkSession, transferUsingDF=True)
logistic.fit(X_train, y_train)
mllearn_predicted = logistic.predict(X_test)
sklearn_logistic = linear_model.LogisticRegression()
sklearn_logistic.fit(X_train, y_train)
self.failUnless(accuracy_score(sklearn_logistic.predict(X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_linear_regression(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='direct-solve', transferUsingDF=True)
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_linear_regression_cg(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='newton-cg', transferUsingDF=True)
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_svm_sk2(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
svm = SVM(sparkSession, is_multi_class=True, transferUsingDF=True)
mllearn_predicted = svm.fit(X_train, y_train).predict(X_test)
from sklearn import linear_model, svm
clf = svm.LinearSVC()
sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)
self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
vitaly-krugl/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backend_bases.py | 69 | 69740 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
"""
from __future__ import division
import os, warnings, time
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
from matplotlib import rcParams
class RendererBase:
"""An abstract base class to handle drawing/rendering operations.
The following methods *must* be implemented in the backend:
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_text`
* :meth:`get_text_width_height_descent`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
def open_group(self, s):
"""
Open a grouping element with label *s*. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
tpath = trans.transform_path(path)
for vertices, codes in tpath.iter_segments():
if len(vertices):
x,y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans + transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
"""
Draws a collection of paths, selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before
being applied.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
draw_path. Some backends may want to override this in order
to render each set of path data only once, and then reference
that path multiple times with the different offsets, colors,
styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
path, transform = path_id
transform = transforms.Affine2D(transform.get_matrix()).translate(xo, yo)
self.draw_path(gc, path, transform, rgbFace)
def draw_quad_mesh(self, master_transform, cliprect, clippath,
clippath_trans, meshWidth, meshHeight, coordinates,
offsets, offsetTrans, facecolors, antialiased,
showedges):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if showedges:
edgecolors = np.array([[0.0, 0.0, 0.0, 1.0]], np.float_)
linewidths = np.array([1.0], np.float_)
else:
edgecolors = facecolors
linewidths = np.array([0.0], np.float_)
return self.draw_path_collection(
master_transform, cliprect, clippath, clippath_trans,
paths, [], offsets, offsetTrans, facecolors, edgecolors,
linewidths, [], [antialiased], [None])
def _iter_collection_raw_paths(self, master_transform, paths, all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = all_transforms[i % Ntransforms]
yield path, transform + master_transform
def _iter_collection(self, path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc = self.new_gc()
gc.set_clip_rectangle(cliprect)
if clippath is not None:
clippath = transforms.TransformedPath(clippath, clippath_trans)
gc.set_clip_path(clippath)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
gc.set_foreground(edgecolors[i % Nedgecolors])
if Nlinewidths:
gc.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc.set_dashes(*linestyles[i % Nlinestyles])
if rgbFace is not None and len(rgbFace)==4:
gc.set_alpha(rgbFace[-1])
rgbFace = rgbFace[:3]
gc.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc, rgbFace
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the image instance into the current axes;
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
*bbox*
a :class:`matplotlib.transforms.Bbox` instance for clipping, or
None
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
overwrite this method for renderers that do not necessarily
want to rescale and composite raster images. (like SVG)
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
raise NotImplementedError
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text in display coords
*s*
a :class:`matplotlib.text.Text` instance
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
raise NotImplementedError
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
raise NotImplementedError
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, eg, postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
pass
def stop_rasterizing(self):
pass
class GraphicsContextBase:
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid' : (None, None),
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'miter'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0)
self._hatch = None
self._url = None
self._snap = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._url = gc._url
self._snap = gc._snap
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox` instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three floats from 0-1. color can be a
matlab format string, a html hex color string, or a rgb tuple
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
"""
self._alpha = alpha
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b: self._antialiased = 1
else: self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
assert path is None or isinstance(path, transforms.TransformedPath)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points. ``(None, None)`` specifies a solid line
"""
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGB=False):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
The :class:`GraphicsContextBase` converts colors to rgb
internally. If you know the color is rgb already, you can set
``isRGB=True`` to avoid the performace hit of the conversion
"""
if isRGB:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
self._rgb = (frac, frac, frac)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = w
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
try:
offset, dashes = self.dashd[style]
except:
raise ValueError('Unrecognized linestyle: %s' % style)
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
class Event:
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas,guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class LocationEvent(Event):
"""
A event that has a screen location
The following additional attributes are defined and shown with
their default values
In addition to the :class:`Event` attributes, the following event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y,guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas,guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
axes_list = [a for a in self.canvas.figure.get_axes() if a.in_axes(self)]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axCmp = lambda _x,_y: cmp(_x.zorder, _y.zorder)
axes_list.sort(axCmp)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
xdata, ydata = self.inaxes.transData.inverted().transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes!=self.inaxes:
# process axes enter/leave events
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event', 'button_release_event', 'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used for scroll events)
*key*
the key pressed: None, chr(range(255), 'shift', 'win', or 'control'
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- eg a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print 'on pick line:', zip(xdata[ind], ydata[ind])
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist, guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key pressed: None, chr(range(255), shift, win, or control
This interface may change slightly when better support for
modifier keys is included.
Example usage::
def on_key(event):
print 'you pressed', event.key, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase:
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event'
]
def __init__(self, figure):
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry(self.events)
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event',self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event',self.pick)
if False:
## highlight the artists that are hit
self.mpl_connect('motion_notify_event',self.onHilite)
## delete the artists that are clicked on
#self.mpl_disconnect(self.button_pick_id)
#self.mpl_connect('button_press_event',self.onRemove)
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
def sort_artists(artists):
# This depends on stable sort and artists returned
# from get_children in z order.
L = [ (h.zorder, h) for h in artists ]
L.sort()
return [ h for zorder, h in L ]
# Find the top artist under the cursor
under = sort_artists(self.figure.hitlist(ev))
h = None
if under: h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
print "Removing",h
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
if not hasattr(self,'_active'): self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
print "within:"," ".join([str(x) for x in under])
#print "entering:",[str(a) for a in enter]
#print "leaving:",[str(a) for a in leave]
# On leave restore the captured colour
for a in leave:
if hasattr(a,'get_color'):
a.set_color(self._active[a])
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a,'get_color'):
self._active[a] = a.get_color()
elif hasattr(a,'get_edgecolor'):
self._active[a] = (a.get_edgecolor(),a.get_facecolor())
else: self._active[a] = None
for a in enter:
if hasattr(a,'get_color'):
a.set_color('red')
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else: self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist, **kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
def enter_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
'call when GUI is idle'
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
filetypes = {
'emf': 'Enhanced Metafile',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'png': 'Portable Network Graphics',
'ps' : 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
# All of these print_* functions do a lazy import because
# a) otherwise we'd have cyclical imports, since all of these
# classes inherit from FigureCanvasBase
# b) so we don't import a bunch of stuff the user may never use
def print_emf(self, *args, **kwargs):
from backends.backend_emf import FigureCanvasEMF # lazy import
emf = self.switch_backends(FigureCanvasEMF)
return emf.print_emf(*args, **kwargs)
def print_eps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_eps(*args, **kwargs)
def print_pdf(self, *args, **kwargs):
from backends.backend_pdf import FigureCanvasPdf # lazy import
pdf = self.switch_backends(FigureCanvasPdf)
return pdf.print_pdf(*args, **kwargs)
def print_png(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(*args, **kwargs)
def print_ps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_ps(*args, **kwargs)
def print_raw(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_raw(*args, **kwargs)
print_bmp = print_rgb = print_raw
def print_svg(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svg(*args, **kwargs)
def print_svgz(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svgz(*args, **kwargs)
def get_supported_filetypes(self):
return self.filetypes
def get_supported_filetypes_grouped(self):
groupings = {}
for ext, name in self.filetypes.items():
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation* '
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
"""
if format is None:
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
method_name = 'print_%s' % format
if (format not in self.filetypes or
not hasattr(self, method_name)):
formats = self.filetypes.keys()
formats.sort()
raise ValueError(
'Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
if dpi is None:
dpi = rcParams['savefig.dpi']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
try:
result = getattr(self, method_name)(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
**kwargs)
finally:
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
#self.figure.canvas.draw() ## seems superfluous
return result
def get_default_filetype(self):
raise NotImplementedError
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def switch_backends(self, FigureCanvasClass):
"""
instantiate an instance of FigureCanvasClass
This is used for backend switching, eg, to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (eg, setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self,timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self,timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str,DeprecationWarning)
if timeout <= 0: timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter*timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
class FigureManagerBase:
"""
Helper class for matlab mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure nuamber
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.canvas.mpl_connect('key_press_event', self.key_press)
def destroy(self):
pass
def full_screen_toggle (self):
pass
def resize(self, w, h):
'For gui backends: resize window in pixels'
pass
def key_press(self, event):
# these bindings happen whether you are over an axes or not
#if event.key == 'q':
# self.destroy() # how cruel to have to destroy oneself!
# return
if event.key == 'f':
self.full_screen_toggle()
# *h*ome or *r*eset mnemonic
elif event.key == 'h' or event.key == 'r' or event.key == "home":
self.canvas.toolbar.home()
# c and v to enable left handed quick navigation
elif event.key == 'left' or event.key == 'c' or event.key == 'backspace':
self.canvas.toolbar.back()
elif event.key == 'right' or event.key == 'v':
self.canvas.toolbar.forward()
# *p*an mnemonic
elif event.key == 'p':
self.canvas.toolbar.pan()
# z*o*om mnemonic
elif event.key == 'o':
self.canvas.toolbar.zoom()
elif event.key == 's':
self.canvas.toolbar.save_figure(self.canvas.toolbar)
if event.inaxes is None:
return
# the mouse has to be over an axes to trigger these
if event.key == 'g':
event.inaxes.grid()
self.canvas.draw()
elif event.key == 'l':
ax = event.inaxes
scale = ax.get_yscale()
if scale=='log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale=='linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
elif event.key is not None and (event.key.isdigit() and event.key!='0') or event.key=='a':
# 'a' enables all axes
if event.key!='a':
n=int(event.key)-1
for i, a in enumerate(self.canvas.figure.get_axes()):
if event.x is not None and event.y is not None and a.in_axes(event):
if event.key=='a':
a.set_navigate(True)
else:
a.set_navigate(i==n)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
pass
# cursors
class Cursors: #namespace
HAND, POINTER, SELECT_REGION, MOVE = range(4)
cursors = Cursors()
class NavigationToolbar2:
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
self._button_pressed = None # determined by the button pressed at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
'display a message on toolbar or in status bar'
pass
def back(self, *args):
'move back up the view lim stack'
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
'draw a rectangle rubberband to indicate zoom limits'
pass
def forward(self, *args):
'move forward in the view lim stack'
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
'restore the original view'
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def mouse_move(self, event):
#print 'mouse_move', event.button
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active=='ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = self._xypress[0]
self.draw_rubberband(event, x, y, lastx, lasty)
elif (self._active=='PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
if event.inaxes and event.inaxes.get_navigate():
try: s = event.inaxes.format_coord(event.xdata, event.ydata)
except ValueError: pass
except OverflowError: pass
else:
if len(self.mode):
self.set_message('%s : %s' % (self.mode, s))
else:
self.set_message(s)
else: self.set_message(self.mode)
def pan(self,*args):
'Activate the pan/zoom tool. pan with left button, zoom with right'
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
'this will be called whenver a mouse button is pressed'
pass
def press_pan(self, event):
'the press mouse button in pan/zoom mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) and a.get_navigate():
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.drag_pan)
self.press(event)
def press_zoom(self, event):
'the press mouse button in zoom to rect mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) \
and a.get_navigate() and a.can_zoom():
self._xypress.append(( x, y, a, i, a.viewLim.frozen(), a.transData.frozen()))
self.press(event)
def push_current(self):
'push the current view limits and position onto the stack'
lims = []; pos = []
for a in self.canvas.figure.get_axes():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lims.append( (xmin, xmax, ymin, ymax) )
# Store both the original and modified positions
pos.append( (
a.get_position(True).frozen(),
a.get_position().frozen() ) )
self._views.push(lims)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
'this will be called whenever mouse button is released'
pass
def release_pan(self, event):
'the release mouse button callback in pan/zoom mode'
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress: return
self._xypress = []
self._button_pressed=None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
'the drag callback in pan/zoom mode'
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def release_zoom(self, event):
'the release mouse button callback in zoom to rect mode'
if not self._xypress: return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x-lastx)<5 or abs(y-lasty)<5:
self._xypress = None
self.release(event)
self.draw()
return
x0, y0, x1, y1 = lim.extents
# zoom to rect
inverse = a.transData.inverted()
lastx, lasty = inverse.transform_point( (lastx, lasty) )
x, y = inverse.transform_point( (x, y) )
Xmin,Xmax=a.get_xlim()
Ymin,Ymax=a.get_ylim()
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a,la): twinx=True
if a.get_shared_y_axes().joined(a,la): twiny=True
last_a.append(a)
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x<lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 < Xmin: x0=Xmin
if x1 > Xmax: x1=Xmax
else:
if x>lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 > Xmin: x0=Xmin
if x1 < Xmax: x1=Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y<lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 < Ymin: y0=Ymin
if y1 > Ymax: y1=Ymax
else:
if y>lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 > Ymin: y0=Ymin
if y1 < Ymax: y1=Ymax
if self._button_pressed == 1:
a.set_xlim((x0, x1))
a.set_ylim((y0, y1))
elif self._button_pressed == 3:
if a.get_xscale()=='log':
alpha=np.log(Xmax/Xmin)/np.log(x1/x0)
rx1=pow(Xmin/x0,alpha)*Xmin
rx2=pow(Xmax/x0,alpha)*Xmin
else:
alpha=(Xmax-Xmin)/(x1-x0)
rx1=alpha*(Xmin-x0)+Xmin
rx2=alpha*(Xmax-x0)+Xmin
if a.get_yscale()=='log':
alpha=np.log(Ymax/Ymin)/np.log(y1/y0)
ry1=pow(Ymin/y0,alpha)*Ymin
ry2=pow(Ymax/y0,alpha)*Ymin
else:
alpha=(Ymax-Ymin)/(y1-y0)
ry1=alpha*(Ymin-y0)+Ymin
ry2=alpha*(Ymax-y0)+Ymin
a.set_xlim((rx1, rx2))
a.set_ylim((ry1, ry2))
self.draw()
self._xypress = None
self._button_pressed = None
self.push_current()
self.release(event)
def draw(self):
'redraw the canvases, update the locators'
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw()
def _update_view(self):
'''update the viewlim and position from the view and
position stack for each axes
'''
lims = self._views()
if lims is None: return
pos = self._positions()
if pos is None: return
for i, a in enumerate(self.canvas.figure.get_axes()):
xmin, xmax, ymin, ymax = lims[i]
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
# Restore both the original and modified positions
a.set_position( pos[i][0], 'original' )
a.set_position( pos[i][1], 'active' )
self.draw()
def save_figure(self, *args):
'save the current figure'
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
'reset the axes stack'
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
'activate zoom to rect mode'
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress=self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease=self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event', self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event', self.release_zoom)
self.mode = 'Zoom to rect mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
'enable or disable back/forward button'
pass
| agpl-3.0 |
hainn8x/gnuradio | gr-filter/examples/fft_filter_ccc.py | 47 | 4363 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fft_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw0, bw1, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw0 = bw0
self._bw1 = bw1
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.complex_band_pass_2(1, self._fs,
self._bw0, self._bw1,
self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fft_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-S", "--start-pass", type="eng_float", default=1000,
help="Start of Passband [default=%default]")
parser.add_option("-E", "--end-pass", type="eng_float", default=2000,
help="End of Passband [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fft_filter_ccc(options.nsamples,
options.samplerate,
options.start_pass,
options.end_pass,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
nmartensen/pandas | pandas/tests/indexes/test_interval.py | 1 | 43299 | from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
compat, date_range, timedelta_range, DateOffset)
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self):
return IntervalIndex.from_breaks(np.arange(10))
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1,
closed='right')
assert expected.equals(actual)
actual = Index([Interval(0, 1), Interval(1, 2)])
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
actual = Index(expected)
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
def test_constructors_other(self):
# all-nan
result = IntervalIndex.from_intervals([np.nan])
expected = np.array([np.nan], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
# empty
result = IntervalIndex.from_intervals([])
expected = np.array([], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
def test_constructors_errors(self):
# scalar
with pytest.raises(TypeError):
IntervalIndex(5)
# not an interval
with pytest.raises(TypeError):
IntervalIndex([0, 1])
with pytest.raises(TypeError):
IntervalIndex.from_intervals([0, 1])
# invalid closed
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed
with pytest.raises(ValueError):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 10], [3, 5])
with pytest.raises(ValueError):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# no point in nesting periods in an IntervalIndex
with pytest.raises(ValueError):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
def test_constructors_datetimelike(self):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx)
expected = IntervalIndex.from_breaks(idx.values)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self):
index = self.index
assert len(index) == 2
assert index.size == 2
assert index.shape == (2, )
tm.assert_index_equal(index.left, Index([0, 1]))
tm.assert_index_equal(index.right, Index([1, 2]))
tm.assert_index_equal(index.mid, Index([0.5, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.index_with_nan
assert len(index) == 3
assert index.size == 3
assert index.shape == (3, )
tm.assert_index_equal(index.left, Index([0, np.nan, 1]))
tm.assert_index_equal(index.right, Index([1, np.nan, 2]))
tm.assert_index_equal(index.mid, Index([0.5, np.nan, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), np.nan,
Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self):
index = self.index
assert not index.hasnans
tm.assert_numpy_array_equal(index.isna(),
np.array([False, False]))
tm.assert_numpy_array_equal(index.notna(),
np.array([True, True]))
index = self.index_with_nan
assert index.hasnans
tm.assert_numpy_array_equal(index.notna(),
np.array([True, False, True]))
tm.assert_numpy_array_equal(index.isna(),
np.array([False, True, False]))
def test_copy(self):
actual = self.index.copy()
assert actual.equals(self.index)
actual = self.index.copy(deep=True)
assert actual.equals(self.index)
assert actual.left is not self.index.left
def test_ensure_copied_data(self):
# exercise the copy flag in the constructor
# not copying
index = self.index
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self):
idx = self.index
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert not idx.equals(idx.astype(object))
assert not idx.equals(np.array(idx))
assert not idx.equals(list(idx))
assert not idx.equals([1, 2])
assert not idx.equals(np.array([1, 2]))
assert not idx.equals(pd.date_range('20130101', periods=2))
def test_astype(self):
idx = self.index
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_where(self):
expected = self.index
result = self.index.where(self.index.notna())
tm.assert_index_equal(result, expected)
idx = IntervalIndex.from_breaks([1, 2])
result = idx.where([True, False])
expected = IntervalIndex.from_intervals(
[Interval(1.0, 2.0, closed='right'), np.nan])
tm.assert_index_equal(result, expected)
def test_where_array_like(self):
pass
def test_delete(self):
expected = IntervalIndex.from_breaks([1, 2])
actual = self.index.delete(0)
assert expected.equals(actual)
def test_insert(self):
expected = IntervalIndex.from_breaks(range(4))
actual = self.index.insert(2, Interval(2, 3))
assert expected.equals(actual)
pytest.raises(ValueError, self.index.insert, 0, 1)
pytest.raises(ValueError, self.index.insert, 0,
Interval(2, 3, closed='left'))
def test_take(self):
actual = self.index.take([0, 1])
assert self.index.equals(actual)
expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2])
actual = self.index.take([0, 0, 1])
assert expected.equals(actual)
def test_monotonic_and_unique(self):
assert self.index.is_monotonic
assert self.index.is_unique
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)])
assert idx.is_monotonic
assert idx.is_unique
idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 2)])
assert not idx.is_monotonic
assert idx.is_unique
idx = IntervalIndex.from_tuples([(0, 2), (0, 2)])
assert not idx.is_unique
assert idx.is_monotonic
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed='right')
assert i[0] == Interval(0.0, 1.0)
assert i[1] == Interval(1.0, 2.0)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed='right')
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed='right')
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed='right')
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self):
expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)])
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan])
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan])
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)])
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self):
other = IntervalIndex.from_arrays([2], [3])
expected = IntervalIndex.from_arrays(range(3), range(1, 4))
actual = self.index.union(other)
assert expected.equals(actual)
actual = other.union(self.index)
assert expected.equals(actual)
tm.assert_index_equal(self.index.union(self.index), self.index)
tm.assert_index_equal(self.index.union(self.index[:1]),
self.index)
def test_intersection(self):
other = IntervalIndex.from_breaks([1, 2, 3])
expected = IntervalIndex.from_breaks([1, 2])
actual = self.index.intersection(other)
assert expected.equals(actual)
tm.assert_index_equal(self.index.intersection(self.index),
self.index)
def test_difference(self):
tm.assert_index_equal(self.index.difference(self.index[:1]),
self.index[1:])
def test_symmetric_difference(self):
result = self.index[:1].symmetric_difference(self.index[1:])
expected = self.index
tm.assert_index_equal(result, expected)
def test_set_operation_errors(self):
pytest.raises(ValueError, self.index.union, self.index.left)
other = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
pytest.raises(ValueError, self.index.union, other)
def test_isin(self):
actual = self.index.isin(self.index)
tm.assert_numpy_array_equal(np.array([True, True]), actual)
actual = self.index.isin(self.index[:1])
tm.assert_numpy_array_equal(np.array([True, False]), actual)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self):
idx = pd.Index([np.nan, pd.Interval(0, 1), pd.Interval(1, 2)])
idx2 = pd.IntervalIndex.from_arrays([np.nan, 0, 1], [np.nan, 1, 2])
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays([np.nan, 0, 1], np.array([0, 1, 2]))
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self):
expected = IntervalIndex.from_breaks([1, 2, 3, 4])
actual = IntervalIndex.from_tuples([(3, 4), (1, 2),
(2, 3)]).sort_values()
tm.assert_index_equal(expected, actual)
# nan
idx = self.index_with_nan
mask = idx.isna()
tm.assert_numpy_array_equal(mask, np.array([False, True, False]))
result = idx.sort_values()
mask = result.isna()
tm.assert_numpy_array_equal(mask, np.array([False, False, True]))
result = idx.sort_values(ascending=False)
mask = result.isna()
tm.assert_numpy_array_equal(mask, np.array([True, False, False]))
def test_datetime(self):
dates = pd.date_range('2000', periods=3)
idx = IntervalIndex.from_breaks(dates)
tm.assert_index_equal(idx.left, dates[:2])
tm.assert_index_equal(idx.right, dates[-2:])
expected = pd.date_range('2000-01-01T12:00', periods=2)
tm.assert_index_equal(idx.mid, expected)
assert pd.Timestamp('2000-01-01T12') not in idx
assert pd.Timestamp('2000-01-01T12') not in idx
target = pd.date_range('1999-12-31T12:00', periods=7, freq='12H')
actual = idx.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2])
index2 = IntervalIndex.from_arrays([1, 2], [2, 3])
result = index1.append(index2)
expected = IntervalIndex.from_arrays([0, 1, 1, 2], [1, 2, 2, 3])
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays([0, 1, 0, 1, 1, 2],
[1, 2, 1, 2, 2, 3])
tm.assert_index_equal(result, expected)
def f():
index1.append(IntervalIndex.from_arrays([0, 1], [1, 2],
closed='both'))
pytest.raises(ValueError, f)
def test_is_non_overlapping_monotonic(self):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
for closed in ('left', 'right', 'neither', 'both'):
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
for closed in ('left', 'right', 'neither', 'both'):
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
for closed in ('left', 'right', 'neither', 'both'):
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', overwise True (GH16560)
idx = IntervalIndex.from_breaks(range(4), closed='both')
assert idx.is_non_overlapping_monotonic is False
for closed in ('left', 'right', 'neither'):
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
class TestIntervalRange(object):
@pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both'])
def test_construction_from_numeric(self, closed):
# combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
np.arange(0, 6), name='foo', closed=closed)
result = interval_range(start=0, end=5, name='foo', closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=5, name='foo', closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=5, periods=5, name='foo', closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with freq
expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
name='foo', closed=closed)
result = interval_range(start=0, end=6, freq=2, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=3, freq=2, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=6, periods=3, freq=2, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
name='foo', closed=closed)
result = interval_range(start=0, end=4, freq=1.5, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both'])
def test_construction_from_timestamp(self, closed):
# combinations of start/end/periods without freq
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
breaks = date_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
result = interval_range(start=start, end=end, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
result = interval_range(start=start, end=end, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2017-01-08')
result = interval_range(start=start, end=end, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with non-fixed freq
freq = 'M'
start, end = Timestamp('2017-01-01'), Timestamp('2017-12-31')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
result = interval_range(start=start, end=end, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=11, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=11, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2018-01-15')
result = interval_range(start=start, end=end, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both'])
def test_construction_from_timedelta(self, closed):
# combinations of start/end/periods without freq
start, end = Timedelta('1 day'), Timedelta('6 days')
breaks = timedelta_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
result = interval_range(start=start, end=end, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timedelta('1 day'), Timedelta('7 days')
breaks = timedelta_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
result = interval_range(start=start, end=end, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timedelta('7 days 1 hour')
result = interval_range(start=start, end=end, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
def test_constructor_coverage(self):
# float value for periods
expected = pd.interval_range(start=0, periods=10)
result = pd.interval_range(start=0, periods=10.5)
tm.assert_index_equal(result, expected)
# equivalent timestamp-like start/end
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15')
expected = pd.interval_range(start=start, end=end)
result = pd.interval_range(start=start.to_pydatetime(),
end=end.to_pydatetime())
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.tz_localize('UTC'),
end=end.tz_localize('UTC'))
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timestamp
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1),
DateOffset(days=1)]
for freq in equiv_freq:
result = pd.interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
# equivalent timedelta-like start/end
start, end = Timedelta(days=1), Timedelta(days=10)
expected = pd.interval_range(start=start, end=end)
result = pd.interval_range(start=start.to_pytimedelta(),
end=end.to_pytimedelta())
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timedelta
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1)]
for freq in equiv_freq:
result = pd.interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = ('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0)
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=5)
with tm.assert_raises_regex(ValueError, msg):
interval_range(periods=2)
with tm.assert_raises_regex(ValueError, msg):
interval_range()
# too many params
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0, end=5, periods=6)
# mixed units
msg = 'start, end, freq need to be type compatible'
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=Timestamp('20130101'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=Timedelta('1 day'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'), end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'),
end=Timedelta('1 day'), freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'),
end=Timestamp('20130110'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'), end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'),
end=Timestamp('20130110'), freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'),
end=Timedelta('10 days'), freq=2)
# invalid periods
msg = 'periods must be a number, got foo'
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, periods='foo')
# invalid start
msg = 'start must be numeric or datetime-like, got foo'
with tm.assert_raises_regex(ValueError, msg):
interval_range(start='foo', periods=10)
# invalid end
msg = 'end must be numeric or datetime-like, got \(0, 1\]'
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=Interval(0, 1), periods=10)
# invalid freq for datetime-like
msg = 'freq must be numeric or convertible to DateOffset, got foo'
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0, end=10, freq='foo')
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=Timestamp('20130101'), periods=10, freq='foo')
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=Timedelta('1 day'), periods=10, freq='foo')
class TestIntervalTree(object):
def setup_method(self, method):
gentree = lambda dtype: IntervalTree(np.arange(5, dtype=dtype),
np.arange(5, dtype=dtype) + 2)
self.tree = gentree('int64')
self.trees = {dtype: gentree(dtype)
for dtype in ['int32', 'int64', 'float32', 'float64']}
def test_get_loc(self):
for dtype, tree in self.trees.items():
tm.assert_numpy_array_equal(tree.get_loc(1),
np.array([0], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(tree.get_loc(2)),
np.array([0, 1], dtype='int64'))
with pytest.raises(KeyError):
tree.get_loc(-1)
def test_get_indexer(self):
for dtype, tree in self.trees.items():
tm.assert_numpy_array_equal(
tree.get_indexer(np.array([1.0, 5.5, 6.5])),
np.array([0, 4, -1], dtype='int64'))
with pytest.raises(KeyError):
tree.get_indexer(np.array([3.0]))
def test_get_indexer_non_unique(self):
indexer, missing = self.tree.get_indexer_non_unique(
np.array([1.0, 2.0, 6.5]))
tm.assert_numpy_array_equal(indexer[:1],
np.array([0], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(indexer[1:3]),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(indexer[3:]),
np.array([-1], dtype='int64'))
tm.assert_numpy_array_equal(missing, np.array([2], dtype='int64'))
def test_duplicates(self):
tree = IntervalTree([0, 0, 0], [1, 1, 1])
tm.assert_numpy_array_equal(np.sort(tree.get_loc(0.5)),
np.array([0, 1, 2], dtype='int64'))
with pytest.raises(KeyError):
tree.get_indexer(np.array([0.5]))
indexer, missing = tree.get_indexer_non_unique(np.array([0.5]))
tm.assert_numpy_array_equal(np.sort(indexer),
np.array([0, 1, 2], dtype='int64'))
tm.assert_numpy_array_equal(missing, np.array([], dtype='int64'))
def test_get_loc_closed(self):
for closed in ['left', 'right', 'both', 'neither']:
tree = IntervalTree([0], [1], closed=closed)
for p, errors in [(0, tree.open_left),
(1, tree.open_right)]:
if errors:
with pytest.raises(KeyError):
tree.get_loc(p)
else:
tm.assert_numpy_array_equal(tree.get_loc(p),
np.array([0], dtype='int64'))
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="int type mismatch on 32bit")
def test_get_indexer_closed(self):
x = np.arange(1000, dtype='float64')
found = x.astype('intp')
not_found = (-1 * np.ones(1000)).astype('intp')
for leaf_size in [1, 10, 100, 10000]:
for closed in ['left', 'right', 'both', 'neither']:
tree = IntervalTree(x, x + 0.5, closed=closed,
leaf_size=leaf_size)
tm.assert_numpy_array_equal(found,
tree.get_indexer(x + 0.25))
expected = found if tree.closed_left else not_found
tm.assert_numpy_array_equal(expected,
tree.get_indexer(x + 0.0))
expected = found if tree.closed_right else not_found
tm.assert_numpy_array_equal(expected,
tree.get_indexer(x + 0.5))
| bsd-3-clause |
pratapvardhan/scikit-learn | examples/manifold/plot_mds.py | 45 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
vasyarv/edx-ora2 | openassessment/assessment/worker/algorithm.py | 10 | 12616 | """
Define the ML algorithms used to train text classifiers.
"""
try:
import cPickle as pickle
except ImportError:
import pickle
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import importlib
import traceback
import base64
from django.conf import settings
DEFAULT_AI_ALGORITHMS = {
'fake': 'openassessment.assessment.worker.algorithm.FakeAIAlgorithm',
'ease': 'openassessment.assessment.worker.algorithm.EaseAIAlgorithm'
}
class AIAlgorithmError(Exception):
"""
An error occurred when using an AI algorithm.
Superclass for more specific errors below.
"""
pass
class UnknownAlgorithm(AIAlgorithmError):
"""
Algorithm ID not found in the configuration.
"""
def __init__(self, algorithm_id):
msg = u"Could not find algorithm \"{}\" in the configuration.".format(algorithm_id)
super(UnknownAlgorithm, self).__init__(msg)
class AlgorithmLoadError(AIAlgorithmError):
"""
Unable to load the algorithm class.
"""
def __init__(self, algorithm_id, algorithm_path):
msg = (
u"Could not load algorithm \"{algorithm_id}\" from \"{path}\""
).format(algorithm_id=algorithm_id, path=algorithm_path)
super(AlgorithmLoadError, self).__init__(msg)
class TrainingError(AIAlgorithmError):
"""
An error occurred while training a classifier from example essays.
"""
pass
class ScoreError(AIAlgorithmError):
"""
An error occurred while scoring an essay.
"""
pass
class InvalidClassifier(ScoreError):
"""
The classifier could not be used by this algorithm to score an essay.
"""
pass
class AIAlgorithm(object):
"""
Abstract base class for a supervised ML text classification algorithm.
"""
__metaclass__ = ABCMeta
# Example essay used as input to the training algorithm
# `text` is a unicode string representing a student essay submission.
# `score` is an integer score.
# Note that `score` is used as an arbitrary label, so you could
# have a set of examples with non-adjacent scores.
ExampleEssay = namedtuple('ExampleEssay', ['text', 'score'])
@abstractmethod
def train_classifier(self, examples):
"""
Train a classifier based on example essays and scores.
Args:
examples (list of AIAlgorithm.ExampleEssay): Example essays and scores.
Returns:
JSON-serializable: The trained classifier. This MUST be JSON-serializable.
Raises:
TrainingError: The classifier could not be trained successfully.
"""
pass
@abstractmethod
def score(self, text, classifier, cache):
"""
Score an essay using a classifier.
Args:
text (unicode): The text to classify.
classifier (JSON-serializable): A classifier, using the same format
as `train_classifier()`.
cache (dict): An in-memory cache that persists until all criteria
in the rubric have been scored.
Raises:
InvalidClassifier: The provided classifier cannot be used by this algorithm.
ScoreError: An error occurred while scoring.
"""
pass
@classmethod
def algorithm_for_id(cls, algorithm_id):
"""
Load an algorithm based on Django settings configuration.
Args:
algorithm_id (unicode): The identifier for the algorithm,
which should be specified in Django settings.
Returns:
AIAlgorithm
Raises:
UnknownAlgorithm
"""
algorithms = getattr(settings, "ORA2_AI_ALGORITHMS", DEFAULT_AI_ALGORITHMS)
cls_path = algorithms.get(algorithm_id)
if cls_path is None:
raise UnknownAlgorithm(algorithm_id)
else:
module_path, _, name = cls_path.rpartition('.')
try:
algorithm_cls = getattr(importlib.import_module(module_path), name)
return algorithm_cls()
except (ImportError, ValueError, AttributeError):
raise AlgorithmLoadError(algorithm_id, cls_path)
class FakeAIAlgorithm(AIAlgorithm):
"""
Fake AI algorithm implementation that assigns scores randomly.
We use this for testing the pipeline independently of EASE.
"""
def train_classifier(self, examples):
"""
Store the possible score labels, which will allow
us to deterministically choose scores for other essays.
"""
unique_sorted_scores = sorted(list(set(example.score for example in examples)))
return {'scores': unique_sorted_scores}
def score(self, text, classifier, cache):
"""
Choose a score for the essay deterministically based on its length.
"""
if 'scores' not in classifier or len(classifier['scores']) == 0:
raise InvalidClassifier("Classifier must provide score labels")
else:
score_index = len(text) % len(classifier['scores'])
return classifier['scores'][score_index]
class EaseAIAlgorithm(AIAlgorithm):
"""
Wrapper for the EASE library.
See https://github.com/edx/ease for more information.
Since EASE has many system dependencies, we don't include it explicitly
in edx-ora2 requirements. When testing locally, we use the fake
algorithm implementation instead.
"""
def train_classifier(self, examples):
"""
Train a text classifier using the EASE library.
The classifier is serialized as a dictionary with keys:
* 'feature_extractor': The pickled feature extractor (transforms text into a numeric feature vector).
* 'score_classifier': The pickled classifier (uses the feature vector to assign scores to essays).
Because we are using `pickle`, the serialized classifiers are unfortunately
tied to the particular version of ease/scikit-learn/numpy/scipy/nltk that we
have installed at the time of training.
Args:
examples (list of AIAlgorithm.ExampleEssay): Example essays and scores.
Returns:
dict: The serializable classifier.
Raises:
TrainingError: The classifier could not be trained successfully.
"""
feature_ext, classifier = self._train_classifiers(examples)
return self._serialize_classifiers(feature_ext, classifier)
def score(self, text, classifier, cache):
"""
Score essays using EASE.
Args:
text (unicode): The essay text to score.
classifier (dict): The serialized classifiers created during training.
cache (dict): An in-memory cache that persists until all criteria
in the rubric have been scored.
Returns:
int
Raises:
InvalidClassifier
ScoreError
"""
try:
from ease.essay_set import EssaySet # pylint:disable=F0401
except ImportError:
msg = u"Could not import EASE to grade essays."
raise ScoreError(msg)
feature_extractor, score_classifier = self._deserialize_classifiers(classifier)
# The following is a modified version of `ease.grade.grade()`,
# skipping things we don't use (cross-validation, feedback)
# and caching essay sets across criteria. This allows us to
# avoid some expensive NLTK operations, particularly tagging
# parts of speech.
try:
# Get the essay set from the cache or create it.
# Since all essays to be graded are assigned a dummy
# score of "0", we can safely re-use the essay set
# for each criterion in the rubric.
# EASE can't handle non-ASCII unicode, so we need
# to strip out non-ASCII chars.
essay_set = cache.get('grading_essay_set')
if essay_set is None:
essay_set = EssaySet(essaytype="test")
essay_set.add_essay(text.encode('ascii', 'ignore'), 0)
cache['grading_essay_set'] = essay_set
# Extract features from the text
features = feature_extractor.gen_feats(essay_set)
# Predict a score
return int(score_classifier.predict(features)[0])
except:
msg = (
u"An unexpected error occurred while using "
u"EASE to score an essay: {traceback}"
).format(traceback=traceback.format_exc())
raise ScoreError(msg)
def _train_classifiers(self, examples):
"""
Use EASE to train classifiers.
Args:
examples (list of AIAlgorithm.ExampleEssay): Example essays and scores.
Returns:
tuple of `feature_extractor` (an `ease.feature_extractor.FeatureExtractor` object)
and `classifier` (a `sklearn.ensemble.GradientBoostingClassifier` object).
Raises:
TrainingError: Could not load EASE or could not complete training.
"""
try:
from ease.create import create # pylint: disable=F0401
except ImportError:
msg = u"Could not import EASE to perform training."
raise TrainingError(msg)
input_essays = [example.text for example in examples]
input_scores = [example.score for example in examples]
try:
# Train the classifiers
# The third argument is the essay prompt, which EASE uses
# to check if an input essay is too similar to the prompt.
# Since we're not using this feature, we pass in an empty string.
results = create(input_essays, input_scores, "")
except:
msg = (
u"An unexpected error occurred while using "
u"EASE to train classifiers: {traceback}"
).format(traceback=traceback.format_exc())
raise TrainingError(msg)
if not results.get('success', False):
msg = (
u"Errors occurred while training classifiers "
u"using EASE: {errors}"
).format(errors=results.get('errors', []))
raise TrainingError(msg)
return results.get('feature_ext'), results.get('classifier')
def _serialize_classifiers(self, feature_ext, classifier):
"""
Serialize the classifier objects.
Args:
feature_extractor (ease.feature_extractor.FeatureExtractor)
classifier (sklearn.ensemble.GradientBoostingClassifier)
Returns:
dict containing the pickled classifiers
Raises:
TrainingError: Could not serialize the classifiers.
"""
try:
return {
'feature_extractor': base64.b64encode(pickle.dumps(feature_ext)),
'score_classifier': base64.b64encode(pickle.dumps(classifier)),
}
except Exception as ex:
msg = (
u"An error occurred while serializing the classifiers "
u"created by EASE: {ex}"
).format(ex=ex)
raise TrainingError(msg)
def _deserialize_classifiers(self, classifier_data):
"""
Deserialize the classifier objects.
Args:
classifier_data (dict): The serialized classifiers.
Returns:
tuple of `(feature_extractor, score_classifier)`
Raises:
InvalidClassifier
"""
if not isinstance(classifier_data, dict):
raise InvalidClassifier("Classifier must be a dictionary.")
try:
classifier_str = classifier_data.get('feature_extractor').encode('utf-8')
feature_extractor = pickle.loads(base64.b64decode(classifier_str))
except Exception as ex:
msg = (
u"An error occurred while deserializing the "
u"EASE feature extractor: {ex}"
).format(ex=ex)
raise InvalidClassifier(msg)
try:
score_classifier_str = classifier_data.get('score_classifier').encode('utf-8')
score_classifier = pickle.loads(base64.b64decode(score_classifier_str))
except Exception as ex:
msg = (
u"An error occurred while deserializing the "
u"EASE score classifier: {ex}"
).format(ex=ex)
raise InvalidClassifier(msg)
return feature_extractor, score_classifier
| agpl-3.0 |
gdetor/SI-RF-Structure | Statistics/weights-evo.py | 1 | 4698 | # Copyright (c) 2014, Georgios Is. Detorakis ([email protected]) and
# Nicolas P. Rougier ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# This script computes the evolution of the raw feed-forward weights as it is
# described in [1].
import numpy as np
import matplotlib.pylab as plt
def mad(x):
""" Returns the median absolute deviation.
**Parameters**
x : double
Data array
"""
return 1.4826 * np.median(np.abs(x - np.median(x)))
if __name__ == '__main__':
base = '/Users/gdetorak/Desktop/DNF-SOM/REF/'
m, n, q = 1024, 16, 256
if 0:
epochs = 35000
mean, std, median, mmad = [], [], [], []
for i in range(0, epochs, 50):
W = np.load(base+'weights'+str('%06d' % i)+'.npy')
mean.append(np.mean(np.mean(W, axis=1)))
std.append(np.std(np.std(W, axis=1)))
median.append(np.median(np.median(W, axis=1)))
mmad.append(np.apply_along_axis(mad, 0, np.apply_along_axis(mad, 1,
W)))
mean = np.array(mean)
std = np.array(std)
median = np.array(median)
mmad = np.array(mmad)
np.save('mean', mean)
np.save('std', std)
np.save('median', median)
np.save('mad', mmad)
if 1:
mean = np.load('mean.npy')
std = np.load('std.npy')
# median = np.load('median.npy')
# mmad = np.load('mad.npy')
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111)
M, target = 200, 100
ax.plot(mean[:M], 'b', lw=2)
ax.plot(mean[:M]+std[:M], 'r', ls='--', lw=1.3)
ax.plot(mean[:M]-std[:M], 'r', ls='--', lw=1.3)
# ax.plot(median[:M], 'k', lw=2)
# ax.plot(mmad[:M], 'm', lw=2)
ax.set_xlabel('Epochs')
ax.set_ylabel('Mean (SD)')
plt.xticks([0, 50, 100, 150, 200],
('0', '2500', '5000', '7500', '10000'))
plt.title('Evolution of'+r'$\mathbb{E}\{w_f\}$')
bx = plt.axes([.19, .70, .15, .15], axisbg='y')
W = np.load(base+'weights000000.npy')[target].reshape(n, n)
bx.imshow(W, interpolation='bicubic', cmap=plt.cm.Purples)
plt.title(r'epoch=0')
plt.setp(bx, xticks=[], yticks=[])
cx = plt.axes([.27, .45, .15, .15], axisbg='y')
W = np.load(base+'weights002500.npy')[target].reshape(n, n)
cx.imshow(W, interpolation='bicubic', cmap=plt.cm.Purples)
plt.title(r'epoch=2500')
plt.setp(cx, xticks=[], yticks=[])
dx = plt.axes([.63, .3, .15, .15], axisbg='y')
W = np.load(base+'weights007500.npy')[target].reshape(n, n)
dx.imshow(W, interpolation='bicubic', cmap=plt.cm.Purples)
plt.title(r'epoch=7500')
plt.setp(dx, xticks=[], yticks=[])
# plt.savefig('evolution-mean-weights.pdf')
plt.show()
| gpl-3.0 |
nhejazi/scikit-learn | benchmarks/bench_plot_neighbors.py | 101 | 6469 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
plt.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = plt.subplot(sbplt, yscale='log')
plt.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = plt.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = plt.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
plt.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
plt.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
plt.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
plt.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
plt.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
plt.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
plt.show()
| bsd-3-clause |
alfcrisci/word_cloud | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 52 | 8004 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString
from docscrape import FunctionDoc
from docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| mit |
INM-6/hybridLFPy | examples/Hagen_et_al_2016_cercor/Fig3/analysis_params.py | 2 | 4334 | '''
Documentation:
'''
import matplotlib.pyplot as plt
# global flag for black and white figures
bw = False
class params():
def __init__(self,
textsize=6,
titlesize=7,):
# label for merged spike files
self.pop_spike_file_label = ['population_spikes-th.gdf',
'population_spikes-0-0.gdf',
'population_spikes-0-1.gdf',
'population_spikes-1-0.gdf',
'population_spikes-1-1.gdf',
'population_spikes-2-0.gdf',
'population_spikes-2-1.gdf',
'population_spikes-3-0.gdf',
'population_spikes-3-1.gdf']
# start-up transient that is cut-off
self.cutoff = 0 # 200.
# bin size
self.tbin = 1.
# frequency filtering
self.Df = 1.
self.analysis_folder = 'data_analysis'
self.fname_psd = 'psd.h5'
self.fname_psd_uncorr = 'psd_uncorr.h5'
self.fname_meanInpCurrents = 'meanInpCurrents.h5'
self.fname_meanVoltages = 'meanVoltages.h5'
self.fname_variances = 'variances.h5'
self.scaling = 0.1
self.transient = 200
self.Df = None
self.mlab = True
self.NFFT = 256
self.noverlap = 128
self.window = plt.mlab.window_hanning
self.PLOSwidth1Col = 3.27 * 2 # in inches
self.PLOSwidth2Col = 6.83 * 2
self.inchpercm = 2.54
self.frontierswidth = 8.5 * 2
self.PLOSwidth = 6.83 * 2
self.textsize = textsize * 2
self.titlesize = titlesize * 2
# global colormap for populations
self.bw = True
# default plot parameters goes here, may be overridden by below
# functions
plt.rcdefaults()
plt.rcParams.update({
'figure.figsize': [self.frontierswidth / self.inchpercm,
self.frontierswidth / self.inchpercm],
'figure.dpi': 160,
'xtick.labelsize': self.textsize,
'ytick.labelsize': self.textsize,
'font.size': self.textsize,
'axes.labelsize': self.textsize,
'axes.titlesize': self.titlesize,
'axes.linewidth': 0.75 * 2,
'lines.linewidth': 0.5 * 2,
'legend.fontsize': self.textsize / 1.25,
'savefig.dpi': 300 / 2
})
# Set default plotting parameters
self.set_default_fig_style()
def set_default_fig_style(self):
'''default figure size'''
plt.rcParams.update({'figure.figsize': [
self.frontierswidth / self.inchpercm, self.frontierswidth / self.inchpercm], })
def set_large_fig_style(self):
'''twice width figure size'''
plt.rcParams.update({'figure.figsize': [
self.frontierswidth / self.inchpercm * 2, self.frontierswidth / self.inchpercm], })
def set_broad_fig_style(self):
'''4 times width, 1.5 times height'''
plt.rcParams.update(
{
'figure.figsize': [
self.frontierswidth /
self.inchpercm *
4,
self.frontierswidth /
self.inchpercm *
1.5],
})
def set_enormous_fig_style(self):
'''2 times width, 2 times height'''
plt.rcParams.update(
{
'figure.figsize': [
self.frontierswidth /
self.inchpercm *
2,
self.frontierswidth /
self.inchpercm *
2],
})
def set_PLOS_1column_fig_style(self, ratio=1):
'''figure size corresponding to Plos 1 column'''
plt.rcParams.update({
'figure.figsize': [self.PLOSwidth1Col, self.PLOSwidth1Col * ratio],
})
def set_PLOS_2column_fig_style(self, ratio=1):
'''figure size corresponding to Plos 2 columns'''
plt.rcParams.update({
'figure.figsize': [self.PLOSwidth2Col, self.PLOSwidth2Col * ratio],
})
| gpl-3.0 |
shenzebang/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
doylew/detectionsc | format_py/n_gram_nb.py | 1 | 5595 | ##################################################
######scikit_learn to do the classifications######
##################################################
##################################################
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
##################################################
#####Hard coded (currently) where the datasets####
#################are located######################
##################################################
attack_file = 'C:/Users/will_doyle/Documents/GitHub/datamining/format_py/single_ngram_attack.txt'
normal_file = 'C:/Users/will_doyle/Documents/GitHub/datamining/format_py/single_ngram_normal.txt'
test_file = 'C:/Users/will_doyle/Documents/GitHub/datamining/format_py/single_ngram_vali.txt'
##################################################
####Create the instances for validation testing###
##################################################
##################################################
def makeValiInstance(fileName):
if isinstance(fileName,str):
my_file = open(str(fileName),"r+")
words = my_file.read().split("\n")
my_file.close()
words.remove('')
num_instances = words.count("new")
print("Number of Instances to Validate: " + str(num_instances))
instance = []
data = []
for line in words:
if line == "new":
my_data = [data]
instance += (my_data)
data = []
data.extend([line.split()])
for i in instance:
for entry in i:
if '1' in entry:
entry.remove('1')
if '0' in entry:
entry.remove('0')
return instance
else:
return -1
##################################################
#####Create the instances for training############
##################################################
##################################################
def makeFitInstance(fileName):
if isinstance(fileName, str):
my_file = open(str(fileName), "r+")
words = my_file.read().split("\n")
my_file.close()
words.remove('')
data = []
for line in words:
data.extend([line.split()])
classi = []
for entry in data:
if entry[-1] == '1':
classi.extend('a')
entry.remove('1')
else:
classi.extend('n')
entry.remove('0')
instance = {}
instance[0] = data
instance[1] = classi
return instance
else:
return -1
##################################################
#######Calculates the class of the subsequences###
########as a ratio################################
##################################################
def calClass(svm,data):
normal = ['n']
attack = ['a']
num = 0
total_n = 0
total_a = 0
if ['new'] in data:
data.remove(['new'])
for x in data:
num += 1
if svm.predict(x) == attack:
total_a += 1
elif svm.predict(x) == normal:
total_n += 1
else:
print("OOPS")
return
nratio = (float(total_n)/float(num))
aratio = (float(total_a)/float(num))
if nratio > 0.9:
return 'normal'
else:
return 'attack'
##################################################
#########Percentage validation####################
###########of the validation data#################
##################################################
def validateClass(svm,validation_array):
validate = 0.0
num = 0.0
print("length: " + str(len(validation_array)))
for data in validation_array:
num += 1
if calClass(svm,data) == 'normal':
validate += 1
print("NUM: " + str(int(num)) + " CLASSIFIED AS: " + calClass(svm,data))
return float((validate)/(num))
##################################################
################Main##############################
##################################################
##################################################
print("Creating the training data...")
##################################################
#############Create the attack and################
#################normal data and combine them#####
##################################################
instance_a = makeFitInstance(attack_file)
instance_n = makeFitInstance(normal_file)
fit_data = instance_a[0] + instance_n[0]
fit_classes = instance_a[1] + instance_n[1]
print("Training the model....")
##################################################
##############Train the Support Vector############
######################Machine#####################
##################################################
clf = GaussianNB()
clf.fit(fit_data,fit_classes)
print("Model has been trained, building test dataset...")
##################################################
#############Create the validation data###########
##################################################
##################################################
instance_v = makeValiInstance(test_file)
print("Validating the test dataset...")
##################################################
############Validate the data with the trained####
###############Support Vector Machine#############
##################################################
print("Percentage validated correctly: " + str(validateClass(clf,instance_v)))
| mit |
lucidfrontier45/scikit-learn | sklearn/mixture/gmm.py | 1 | 26965 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import numpy as np
from ..base import BaseEstimator
from ..utils import check_random_state
from ..utils.extmath import logsumexp, pinvh
from .. import cluster
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covars : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
from scipy import linalg
U, s, V = linalg.svd(covar)
sqrtS = np.diag(np.sqrt(s))
sqrt_covar = np.dot(U, np.dot(sqrtS, V))
rand = np.dot(sqrt_covar, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
thresh : float, optional
Convergence threshold.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
`weights_` : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
`means_` : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
`covars_` : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
`converged_` : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Ininite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=1e-2, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc'):
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on `cvtype`::
(`n_states`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def eval(self, X):
"""Evaluate the model on data
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob: array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('the shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.eval(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.eval(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.eval(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in xrange(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit(self, X):
"""Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
## initialization step
X = np.asarray(X, dtype=np.float)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
for _ in range(self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
# EM algorithms
log_likelihood = []
# reset self.converged_ to False
self.converged_ = False
for i in xrange(self.n_iter):
# Expectation step
curr_log_likelihood, responsibilities = self.eval(X)
log_likelihood.append(curr_log_likelihood.sum())
# Check for convergence.
if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < \
self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
# if the results are better, keep it
if self.n_iter:
if log_likelihood[-1] > max_log_prob:
max_log_prob = log_likelihood[-1]
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
# self.n_iter == 0 occurs when using GMM within HMM
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
## some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
from scipy import linalg
n_samples, n_dim = X.shape
icv = pinvh(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1)
+ np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(X, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
from scipy import linalg
import itertools
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(itertools.izip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape"
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in xrange(gmm.n_components):
post = responsibilities[:, c]
# Underflow Errors in doing post * X.T are not important
np.seterr(under='ignore')
avg_cv = np.dot(post * X.T, X) / (post.sum() + 10 * EPS)
mu = gmm.means_[c][np.newaxis]
cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features))
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
n_features = X.shape[1]
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
return (avg_X2 - avg_means2 + min_covar * np.eye(n_features)) / X.shape[0]
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
jstoxrocky/statsmodels | statsmodels/datasets/ccard/data.py | 25 | 1635 | """Bill Greene's credit scoring data."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission of the original author, who
retains all rights."""
TITLE = __doc__
SOURCE = """
William Greene's `Econometric Analysis`
More information can be found at the web site of the text:
http://pages.stern.nyu.edu/~wgreene/Text/econometricanalysis.htm
"""
DESCRSHORT = """William Greene's credit scoring data"""
DESCRLONG = """More information on this data can be found on the
homepage for Greene's `Econometric Analysis`. See source.
"""
NOTE = """::
Number of observations - 72
Number of variables - 5
Variable name definitions - See Source for more information on the
variables.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""Load the credit card data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""Load the credit card data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/ccard.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
reuk/wayverb | demo/evaluation/room_materials/rt60.py | 2 | 1970 | #!/usr/local/bin/python
import numpy as np
import matplotlib
render = True
if render:
matplotlib.use('pgf')
import matplotlib.pyplot as plt
from string import split
import scipy.signal as signal
import wave
import math
import os
import re
import json
import sys
sys.path.append('python')
def get_frequency_rt30_tuple(line):
split = line.split()
return (float(split[0]), float(split[6]))
def read_rt30(fname):
with open(fname) as f:
lines = f.readlines()
return [get_frequency_rt30_tuple(line) for line in lines[14:22]]
def main():
files = [
("0.02", "0.02.txt"),
("0.04", "0.04.txt"),
("0.08", "0.08.txt"),
]
for label, fname in files:
tuples = read_rt30(fname)
x = [freq for freq, _ in tuples]
y = [time for _, time in tuples]
min_time = min(y)
max_time = max(y)
average = (max_time - min_time) * 100.0 / ((max_time + min_time) * 0.5)
print('file: {}, min: {}, max: {}, average: {}'.format(
fname, min_time, max_time, average))
plt.plot(x, y, label=label, marker='o', linestyle='--')
plt.xscale('log')
plt.axvline(x=500)
plt.annotate(xy=(520, 1.4), s='waveguide cutoff')
plt.legend(loc='lower center', ncol=3, bbox_to_anchor=(0, -0.05, 1, 1), bbox_transform=plt.gcf().transFigure)
plt.title('Octave-band T30 Measurements for Different Surface Absorption Coefficients')
plt.xlabel('frequency / Hz')
plt.ylabel('time / s')
plt.tight_layout()
#plt.subplots_adjust(top=0.9)
plt.show()
if render:
plt.savefig('room_absorption_rt30.svg', bbox_inches='tight', dpi=96, format='svg')
if __name__ == '__main__':
pgf_with_rc_fonts = {
'font.family': 'serif',
'font.serif': [],
'font.sans-serif': ['Helvetica Neue'],
'legend.fontsize': 12,
}
matplotlib.rcParams.update(pgf_with_rc_fonts)
main()
| gpl-2.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/spyder/utils/introspection/test/test_jedi_plugin.py | 1 | 2930 | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
"""Tests for jedi_plugin.py"""
from textwrap import dedent
import pytest
from spyder.utils.introspection.manager import CodeInfo
from spyder.utils.introspection import jedi_plugin
try:
import numpydoc
except ImportError:
numpydoc = None
try:
import numpy
except ImportError:
numpy = None
try:
import matplotlib
except ImportError:
matplotlib = None
p = jedi_plugin.JediPlugin()
p.load_plugin()
def test_get_info():
source_code = "import os; os.walk("
docs = p.get_info(CodeInfo('info', source_code, len(source_code)))
assert docs['calltip'].startswith('walk(') and docs['name'] == 'walk'
def test_get_completions():
source_code = "import o"
completions = p.get_completions(CodeInfo('completions', source_code,
len(source_code)))
assert ('os', 'module') in completions
def test_get_definition():
source_code = "import os; os.walk"
path, line_nr = p.get_definition(CodeInfo('definition', source_code,
len(source_code)))
assert 'os.py' in path
def test_get_path():
source_code = 'from spyder.utils.introspection.manager import CodeInfo'
path, line_nr = p.get_definition(CodeInfo('definition', source_code,
len(source_code), __file__))
assert 'utils' in path and 'introspection' in path
def test_get_docstring():
source_code = dedent('''
def test(a, b):
"""Test docstring"""
pass
test(1,''')
path, line = p.get_definition(CodeInfo('definition', source_code,
len(source_code), 'dummy.txt',
is_python_like=True))
assert line == 2
docs = p.get_info(CodeInfo('info', source_code, len(source_code),
__file__))
assert 'Test docstring' in docs['docstring']
@pytest.mark.skipif(not(numpy and numpydoc),
reason="numpy and numpydoc required")
def test_numpy_returns():
source_code = dedent('''
import numpy as np
x = np.array([1,2,3])
x.a''')
completions = p.get_completions(CodeInfo('completions', source_code,
len(source_code)))
assert ('argmax', 'function') in completions
@pytest.mark.skipif(not(matplotlib and numpydoc),
reason="matplotlib required")
def test_matplotlib_fig_returns():
source_code = dedent('''
import matplotlib.pyplot as plt
fig = plt.figure()
fig.''')
completions = p.get_completions(CodeInfo('completions', source_code,
len(source_code)))
assert ('add_axes', 'function') in completions
if __name__ == '__main__':
pytest.main()
| gpl-3.0 |
ucloud/uai-sdk | examples/caffe/train/faster-rcnn/code/tools/train_svms.py | 16 | 13480 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(xrange(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in xrange(len(roidb)):
for j in xrange(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in xrange(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in xrange(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print 'get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time)
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in xrange(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
self.pos_cur = 0
self.pos = np.zeros((count, self.dim), dtype=np.float32)
def append_pos(self, feat):
num = feat.shape[0]
self.pos[self.pos_cur:self.pos_cur + num, :] = feat
self.pos_cur += num
def train(self):
print('>>> Updating {} detector <<<'.format(self.cls))
num_pos = self.pos.shape[0]
num_neg = self.neg.shape[0]
print('Cache holds {} pos examples and {} neg examples'.
format(num_pos, num_neg))
X = np.vstack((self.pos, self.neg)) * self.feature_scale
y = np.hstack((np.ones(num_pos),
-np.ones(num_neg)))
self.svm.fit(X, y)
w = self.svm.coef_
b = self.svm.intercept_[0]
scores = self.svm.decision_function(X)
pos_scores = scores[:num_pos]
neg_scores = scores[num_pos:]
pos_loss = (self.C * self.pos_weight *
np.maximum(0, 1 - pos_scores).sum())
neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum()
reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2
tot_loss = pos_loss + neg_loss + reg_loss
self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss))
for i, losses in enumerate(self.loss_history):
print((' {:d}: obj val: {:.3f} = {:.3f} '
'(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses))
# Sanity check
scores_ret = (
X * 1.0 / self.feature_scale).dot(w.T * self.feature_scale) + b
assert np.allclose(scores, scores_ret[:, 0], atol=1e-5), \
"Scores from returned model don't match decision function"
return ((w * self.feature_scale, b), pos_scores, neg_scores)
def append_neg_and_retrain(self, feat=None, force=False):
if feat is not None:
num = feat.shape[0]
self.neg = np.vstack((self.neg, feat))
self.num_neg_added += num
if self.num_neg_added > self.retrain_limit or force:
self.num_neg_added = 0
new_w_b, pos_scores, neg_scores = self.train()
# scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1]
# easy_inds = np.where(neg_scores < self.evict_thresh)[0]
not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0]
if len(not_easy_inds) > 0:
self.neg = self.neg[not_easy_inds, :]
# self.neg = np.delete(self.neg, easy_inds)
print(' Pruning easy negatives')
print(' Cache holds {} pos examples and {} neg examples'.
format(self.pos.shape[0], self.neg.shape[0]))
print(' {} pos support vectors'.format((pos_scores <= 1).sum()))
print(' {} neg support vectors'.format((neg_scores >= -1).sum()))
return new_w_b
else:
return None
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Must turn this off to prevent issues when digging into the net blobs to
# pull out features (tricky!)
cfg.DEDUP_BOXES = 0
# Must turn this on because we use the test im_detect() method to harvest
# hard negatives
cfg.TEST.SVM = True
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm'
out_dir = os.path.dirname(args.caffemodel)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# enhance roidb to contain flipped examples
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
SVMTrainer(net, imdb).train()
filename = '{}/{}.caffemodel'.format(out_dir, out)
net.save(filename)
print 'Wrote svm model to: {:s}'.format(filename)
| apache-2.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/io/tests/test_json/test_ujson.py | 9 | 54415 | # -*- coding: utf-8 -*-
from unittest import TestCase
try:
import json
except ImportError:
import simplejson as json
import math
import nose
import platform
import sys
import time
import datetime
import calendar
import re
import decimal
from functools import partial
from pandas.compat import range, zip, StringIO, u
import pandas.json as ujson
import pandas.compat as compat
import numpy as np
from numpy.testing import (assert_array_almost_equal_nulp,
assert_approx_equal)
import pytz
import dateutil
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex
import pandas.util.testing as tm
def _skip_if_python_ver(skip_major, skip_minor=None):
major, minor = sys.version_info[:2]
if major == skip_major and (skip_minor is None or minor == skip_minor):
raise nose.SkipTest("skipping Python version %d.%d" % (major, minor))
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
class UltraJSONTests(TestCase):
def test_encodeDecimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
self.assertEqual(decoded, 1337.1337)
def test_encodeStringConversion(self):
input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t <\\/script> &"'
html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t \\u003c\\/script\\u003e \\u0026"'
def helper(expected_output, **encode_kwargs):
output = ujson.encode(input, **encode_kwargs)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, expected_output)
self.assertEqual(input, ujson.decode(output))
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded, ensure_ascii=True)
helper(not_html_encoded, ensure_ascii=False)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, ensure_ascii=True, encode_html_chars=False)
helper(not_html_encoded, ensure_ascii=False, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, ensure_ascii=True, encode_html_chars=True)
helper(html_encoded, ensure_ascii=False, encode_html_chars=True)
def test_doubleLongIssue(self):
sut = {u('a'): -4342969734183514}
encoded = json.dumps(sut)
decoded = json.loads(encoded)
self.assertEqual(sut, decoded)
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
self.assertEqual(sut, decoded)
def test_doubleLongDecimalIssue(self):
sut = {u('a'): -12345678901234.56789012}
encoded = json.dumps(sut)
decoded = json.loads(encoded)
self.assertEqual(sut, decoded)
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
self.assertEqual(sut, decoded)
def test_encodeNonCLocale(self):
import locale
savedlocale = locale.getlocale(locale.LC_NUMERIC)
try:
locale.setlocale(locale.LC_NUMERIC, 'it_IT.UTF-8')
except:
try:
locale.setlocale(locale.LC_NUMERIC, 'Italian_Italy')
except:
raise nose.SkipTest('Could not set locale for testing')
self.assertEqual(ujson.loads(ujson.dumps(4.78e60)), 4.78e60)
self.assertEqual(ujson.loads('4.78', precise_float=True), 4.78)
locale.setlocale(locale.LC_NUMERIC, savedlocale)
def test_encodeDecodeLongDecimal(self):
sut = {u('a'): -528656961.4399388}
encoded = ujson.dumps(sut, double_precision=15)
ujson.decode(encoded)
def test_decimalDecodeTestPrecise(self):
sut = {u('a'): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
self.assertEqual(sut, decoded)
def test_encodeDoubleTinyExponential(self):
if compat.is_platform_windows() and not compat.PY3:
raise nose.SkipTest("buggy on win-64 for py2")
num = 1e-40
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = 1e-100
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = -1e-45
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = -1e-145
self.assertTrue(np.allclose(num, ujson.decode(ujson.encode(num))))
def test_encodeDictWithUnicodeKeys(self):
input = {u("key1"): u("value1"), u("key1"):
u("value1"), u("key1"): u("value1"),
u("key1"): u("value1"), u("key1"):
u("value1"), u("key1"): u("value1")}
output = ujson.encode(input)
input = {u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1")}
output = ujson.encode(input)
pass
def test_encodeDoubleConversion(self):
input = math.pi
output = ujson.encode(input)
self.assertEqual(round(input, 5), round(json.loads(output), 5))
self.assertEqual(round(input, 5), round(ujson.decode(output), 5))
def test_encodeWithDecimal(self):
input = 1.0
output = ujson.encode(input)
self.assertEqual(output, "1.0")
def test_encodeDoubleNegConversion(self):
input = -math.pi
output = ujson.encode(input)
self.assertEqual(round(input, 5), round(json.loads(output), 5))
self.assertEqual(round(input, 5), round(ujson.decode(output), 5))
def test_encodeArrayOfNestedArrays(self):
input = [[[[]]]] * 20
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
#self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
input = np.array(input)
tm.assert_numpy_array_equal(input, ujson.decode(output, numpy=True, dtype=input.dtype))
def test_encodeArrayOfDoubles(self):
input = [ 31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
#self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
tm.assert_numpy_array_equal(np.array(input), ujson.decode(output, numpy=True))
def test_doublePrecisionTest(self):
input = 30.012345678901234
output = ujson.encode(input, double_precision = 15)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
output = ujson.encode(input, double_precision = 9)
self.assertEqual(round(input, 9), json.loads(output))
self.assertEqual(round(input, 9), ujson.decode(output))
output = ujson.encode(input, double_precision = 3)
self.assertEqual(round(input, 3), json.loads(output))
self.assertEqual(round(input, 3), ujson.decode(output))
def test_invalidDoublePrecision(self):
input = 30.12345678901234567890
self.assertRaises(ValueError, ujson.encode, input, double_precision = 20)
self.assertRaises(ValueError, ujson.encode, input, double_precision = -1)
# will throw typeError
self.assertRaises(TypeError, ujson.encode, input, double_precision = '9')
# will throw typeError
self.assertRaises(TypeError, ujson.encode, input, double_precision = None)
def test_encodeStringConversion(self):
input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, '"A string \\\\ \\/ \\b \\f \\n \\r \\t"')
self.assertEqual(input, ujson.decode(output))
pass
def test_decodeUnicodeConversion(self):
pass
def test_encodeUnicodeConversion1(self):
input = "Räksmörgås اسامة بن محمد بن عوض بن لادن"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeControlEscaping(self):
input = "\x19"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(input, dec)
self.assertEqual(enc, json_unicode(input))
def test_encodeUnicodeConversion2(self):
input = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicodeSurrogatePair(self):
_skip_if_python_ver(2, 5)
_skip_if_python_ver(2, 6)
input = "\xf0\x90\x8d\x86"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicode4BytesUTF8(self):
_skip_if_python_ver(2, 5)
_skip_if_python_ver(2, 6)
input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicode4BytesUTF8Highest(self):
_skip_if_python_ver(2, 5)
_skip_if_python_ver(2, 6)
input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeArrayInArray(self):
input = [[[[]]]]
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
tm.assert_numpy_array_equal(np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeIntConversion(self):
input = 31337
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeIntNegConversion(self):
input = -31337
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeLongNegConversion(self):
input = -9223372036854775808
output = ujson.encode(input)
outputjson = json.loads(output)
outputujson = ujson.decode(output)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeListConversion(self):
input = [ 1, 2, 3, 4 ]
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
tm.assert_numpy_array_equal(np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeDictConversion(self):
input = { "k1": 1, "k2": 2, "k3": 3, "k4": 4 }
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeNoneConversion(self):
input = None
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeTrueConversion(self):
input = True
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeFalseConversion(self):
input = False
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
def test_encodeDatetimeConversion(self):
ts = time.time()
input = datetime.datetime.fromtimestamp(ts)
output = ujson.encode(input, date_unit='s')
expected = calendar.timegm(input.utctimetuple())
self.assertEqual(int(expected), json.loads(output))
self.assertEqual(int(expected), ujson.decode(output))
def test_encodeDateConversion(self):
ts = time.time()
input = datetime.date.fromtimestamp(ts)
output = ujson.encode(input, date_unit='s')
tup = (input.year, input.month, input.day, 0, 0, 0)
expected = calendar.timegm(tup)
self.assertEqual(int(expected), json.loads(output))
self.assertEqual(int(expected), ujson.decode(output))
def test_encodeTimeConversion(self):
tests = [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
datetime.time(10, 12, 15, 343243, pytz.utc),
# datetime.time(10, 12, 15, 343243, dateutil.tz.gettz('UTC')), # this segfaults! No idea why.
]
for test in tests:
output = ujson.encode(test)
expected = '"%s"' % test.isoformat()
self.assertEqual(expected, output)
def test_nat(self):
input = NaT
assert ujson.encode(input) == 'null', "Expected null"
def test_npy_nat(self):
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < '1.7.0':
raise nose.SkipTest("numpy version < 1.7.0, is "
"{0}".format(np.__version__))
input = np.datetime64('NaT')
assert ujson.encode(input) == 'null', "Expected null"
def test_datetime_units(self):
from pandas.lib import Timestamp
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
self.assertEqual(roundtrip, stamp.value // 10**9)
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
self.assertEqual(roundtrip, stamp.value // 10**6)
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
self.assertEqual(roundtrip, stamp.value // 10**3)
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
self.assertEqual(roundtrip, stamp.value)
self.assertRaises(ValueError, ujson.encode, val, date_unit='foo')
def test_encodeToUTF8(self):
_skip_if_python_ver(2, 5)
input = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(input, ensure_ascii=False)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input, ensure_ascii=False))
self.assertEqual(dec, json.loads(enc))
def test_decodeFromUnicode(self):
input = u("{\"obj\": 31337}")
dec1 = ujson.decode(input)
dec2 = ujson.decode(str(input))
self.assertEqual(dec1, dec2)
def test_encodeRecursionMax(self):
# 8 is the max recursion depth
class O2:
member = 0
pass
class O1:
member = 0
pass
input = O1()
input.member = O2()
input.member.member = input
try:
output = ujson.encode(input)
assert False, "Expected overflow exception"
except(OverflowError):
pass
def test_encodeDoubleNan(self):
input = np.nan
assert ujson.encode(input) == 'null', "Expected null"
def test_encodeDoubleInf(self):
input = np.inf
assert ujson.encode(input) == 'null', "Expected null"
def test_encodeDoubleNegInf(self):
input = -np.inf
assert ujson.encode(input) == 'null', "Expected null"
def test_decodeJibberish(self):
input = "fdsa sda v9sa fdsa"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenArrayStart(self):
input = "["
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenObjectStart(self):
input = "{"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenArrayEnd(self):
input = "]"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeArrayDepthTooBig(self):
input = '[' * (1024 * 1024)
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenObjectEnd(self):
input = "}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeObjectDepthTooBig(self):
input = '{' * (1024 * 1024)
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringUnterminated(self):
input = "\"TESTING"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringUntermEscapeSequence(self):
input = "\"TESTING\\\""
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringBadEscape(self):
input = "\"TESTING\\\""
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeTrueBroken(self):
input = "tru"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeFalseBroken(self):
input = "fa"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeNullBroken(self):
input = "n"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenDictKeyTypeLeakTest(self):
input = '{{1337:""}}'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except ValueError as e:
continue
assert False, "Wrong exception"
def test_decodeBrokenDictLeakTest(self):
input = '{{"key":"}'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
continue
assert False, "Wrong exception"
def test_decodeBrokenListLeakTest(self):
input = '[[[true'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
continue
assert False, "Wrong exception"
def test_decodeDictWithNoKey(self):
input = "{{{{31337}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeDictWithNoColonOrValue(self):
input = "{{{{\"key\"}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeDictWithNoValue(self):
input = "{{{{\"key\":}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeNumericIntPos(self):
input = "31337"
self.assertEqual(31337, ujson.decode(input))
def test_decodeNumericIntNeg(self):
input = "-31337"
self.assertEqual(-31337, ujson.decode(input))
def test_encodeUnicode4BytesUTF8Fail(self):
_skip_if_python_ver(3)
input = "\xfd\xbf\xbf\xbf\xbf\xbf"
try:
enc = ujson.encode(input)
assert False, "Expected exception"
except OverflowError:
pass
def test_encodeNullCharacter(self):
input = "31337 \x00 1337"
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
input = "\x00"
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
self.assertEqual('" \\u0000\\r\\n "', ujson.dumps(u(" \u0000\r\n ")))
pass
def test_decodeNullCharacter(self):
input = "\"31337 \\u0000 31337\""
self.assertEqual(ujson.decode(input), json.loads(input))
def test_encodeListLongConversion(self):
input = [9223372036854775807, 9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807, 9223372036854775807 ]
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
tm.assert_numpy_array_equal(np.array(input), ujson.decode(output, numpy=True,
dtype=np.int64))
pass
def test_encodeLongConversion(self):
input = 9223372036854775807
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_numericIntExp(self):
input = "1337E40"
output = ujson.decode(input)
self.assertEqual(output, json.loads(input))
def test_numericIntFrcExp(self):
input = "1.337E40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpEPLUS(self):
input = "1337E+9"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpePLUS(self):
input = "1.337e+40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpE(self):
input = "1337E40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpe(self):
input = "1337e40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpEMinus(self):
input = "1.337E-4"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpeMinus(self):
input = "1.337e-4"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_dumpToFile(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
self.assertEqual("[1,2,3]", f.getvalue())
def test_dumpToFileLikeObject(self):
class filelike:
def __init__(self):
self.bytes = ''
def write(self, bytes):
self.bytes += bytes
f = filelike()
ujson.dump([1, 2, 3], f)
self.assertEqual("[1,2,3]", f.bytes)
def test_dumpFileArgsError(self):
try:
ujson.dump([], '')
except TypeError:
pass
else:
assert False, 'expected TypeError'
def test_loadFile(self):
f = StringIO("[1,2,3,4]")
self.assertEqual([1, 2, 3, 4], ujson.load(f))
f = StringIO("[1,2,3,4]")
tm.assert_numpy_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileLikeObject(self):
class filelike:
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
f = filelike()
self.assertEqual([1, 2, 3, 4], ujson.load(f))
f = filelike()
tm.assert_numpy_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileArgsError(self):
try:
ujson.load("[]")
except TypeError:
pass
else:
assert False, "expected TypeError"
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encodeNumericOverflow(self):
try:
ujson.encode(12839128391289382193812939)
except OverflowError:
pass
else:
assert False, "expected OverflowError"
def test_encodeNumericOverflowNested(self):
for n in range(0, 100):
class Nested:
x = 12839128391289382193812939
nested = Nested()
try:
ujson.encode(nested)
except OverflowError:
pass
else:
assert False, "expected OverflowError"
def test_decodeNumberWith32bitSignBit(self):
#Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
boundary1 = 2**31
boundary2 = 2**32
docs = (
'{"id": 3590016419}',
'{"id": %s}' % 2**31,
'{"id": %s}' % 2**32,
'{"id": %s}' % ((2**32)-1),
)
results = (3590016419, 2**31, 2**32, 2**32-1)
for doc,result in zip(docs, results):
self.assertEqual(ujson.decode(doc)['id'], result)
def test_encodeBigEscape(self):
for x in range(10):
if compat.PY3:
base = '\u00e5'.encode('utf-8')
else:
base = "\xc3\xa5"
input = base * 1024 * 1024 * 2
output = ujson.encode(input)
def test_decodeBigEscape(self):
for x in range(10):
if compat.PY3:
base = '\u00e5'.encode('utf-8')
else:
base = "\xc3\xa5"
quote = compat.str_to_bytes("\"")
input = quote + (base * 1024 * 1024 * 2) + quote
output = ujson.decode(input)
def test_toDict(self):
d = {u("key"): 31337}
class DictTest:
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
self.assertEqual(dec, d)
def test_defaultHandler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
self.assertRaises(OverflowError, ujson.encode, _TestObject("foo"))
self.assertEqual('"foo"', ujson.encode(_TestObject("foo"),
default_handler=str))
def my_handler(obj):
return "foobar"
self.assertEqual('"foobar"', ujson.encode(_TestObject("foo"),
default_handler=my_handler))
def my_handler_raises(obj):
raise TypeError("I raise for anything")
with tm.assertRaisesRegexp(TypeError, "I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(obj):
return 42
self.assertEqual(
42, ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_int_handler)))
def my_obj_handler(obj):
return datetime.datetime(2013, 2, 3)
self.assertEqual(
ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))),
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
l = [_TestObject("foo"), _TestObject("bar")]
self.assertEqual(json.loads(json.dumps(l, default=str)),
ujson.decode(ujson.encode(l, default_handler=str)))
class NumpyJSONTests(TestCase):
def testBool(self):
b = np.bool(True)
self.assertEqual(ujson.decode(ujson.encode(b)), b)
def testBoolArray(self):
inpt = np.array([True, False, True, True, False, True, False , False],
dtype=np.bool)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=np.bool)
tm.assert_numpy_array_equal(inpt, outp)
def testInt(self):
num = np.int(2562010)
self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
num = np.int8(127)
self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
num = np.int16(2562010)
self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
num = np.int32(2562010)
self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
num = np.int64(2562010)
self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
num = np.uint8(255)
self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
num = np.uint16(2562010)
self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
num = np.uint32(2562010)
self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
num = np.uint64(2562010)
self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
def testIntArray(self):
arr = np.arange(100, dtype=np.int)
dtypes = (np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32, np.uint64)
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=dtype)
tm.assert_numpy_array_equal(inpt, outp)
def testIntMax(self):
num = np.int(np.iinfo(np.int).max)
self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
num = np.int8(np.iinfo(np.int8).max)
self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
num = np.int16(np.iinfo(np.int16).max)
self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
num = np.int32(np.iinfo(np.int32).max)
self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
num = np.uint8(np.iinfo(np.uint8).max)
self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
num = np.uint16(np.iinfo(np.uint16).max)
self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
num = np.uint32(np.iinfo(np.uint32).max)
self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
if platform.architecture()[0] != '32bit':
num = np.int64(np.iinfo(np.int64).max)
self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
# uint64 max will always overflow as it's encoded to signed
num = np.uint64(np.iinfo(np.int64).max)
self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
def testFloat(self):
num = np.float(256.2013)
self.assertEqual(np.float(ujson.decode(ujson.encode(num))), num)
num = np.float32(256.2013)
self.assertEqual(np.float32(ujson.decode(ujson.encode(num))), num)
num = np.float64(256.2013)
self.assertEqual(np.float64(ujson.decode(ujson.encode(num))), num)
def testFloatArray(self):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
dtypes = (np.float, np.float32, np.float64)
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(inpt, double_precision=15)), dtype=dtype)
assert_array_almost_equal_nulp(inpt, outp)
def testFloatMax(self):
num = np.float(np.finfo(np.float).max/10)
assert_approx_equal(np.float(ujson.decode(ujson.encode(num, double_precision=15))), num, 15)
num = np.float32(np.finfo(np.float32).max/10)
assert_approx_equal(np.float32(ujson.decode(ujson.encode(num, double_precision=15))), num, 15)
num = np.float64(np.finfo(np.float64).max/10)
assert_approx_equal(np.float64(ujson.decode(ujson.encode(num, double_precision=15))), num, 15)
def testArrays(self):
arr = np.arange(100)
arr = arr.reshape((10, 10))
tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((5, 5, 4))
tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((100, 1))
tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
l = ['a', list(), dict(), dict(), list(),
42, 97.8, ['a', 'b'], {'key': 'val'}]
arr = np.array(l)
tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
arr = np.arange(100.202, 200.202, 1, dtype=np.float32)
arr = arr.reshape((5, 5, 4))
outp = np.array(ujson.decode(ujson.encode(arr)), dtype=np.float32)
assert_array_almost_equal_nulp(arr, outp)
outp = ujson.decode(ujson.encode(arr), numpy=True, dtype=np.float32)
assert_array_almost_equal_nulp(arr, outp)
def testOdArray(self):
def will_raise():
ujson.encode(np.array(1))
self.assertRaises(TypeError, will_raise)
def testArrayNumpyExcept(self):
input = ujson.dumps([42, {}, 'a'])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(TypeError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps(['a', 'b', [], 'c'])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([['a'], 42])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([42, ['a'], 42])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{}, []])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([42, None])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(TypeError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{'a': 'b'}])
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps({'a': {'b': {'c': 42}}})
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{'a': 42, 'b': 23}, {'c': 17}])
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
def testArrayNumpyLabelled(self):
input = {'a': []}
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
self.assertTrue((np.empty((1, 0)) == output[0]).all())
self.assertTrue((np.array(['a']) == output[1]).all())
self.assertTrue(output[2] is None)
input = [{'a': 42}]
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
self.assertTrue((np.array([42]) == output[0]).all())
self.assertTrue(output[1] is None)
self.assertTrue((np.array([u('a')]) == output[2]).all())
# Write out the dump explicitly so there is no dependency on iteration order GH10837
input_dumps = '[{"a": 42, "b":31}, {"a": 24, "c": 99}, {"a": 2.4, "b": 78}]'
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
self.assertTrue((expectedvals == output[0]).all())
self.assertTrue(output[1] is None)
self.assertTrue((np.array([u('a'), 'b']) == output[2]).all())
input_dumps = '{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, "3": {"a": 2.4, "b": 78}}'
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
self.assertTrue((expectedvals == output[0]).all())
self.assertTrue((np.array(['1', '2', '3']) == output[1]).all())
self.assertTrue((np.array(['a', 'b']) == output[2]).all())
class PandasJSONTests(TestCase):
def testDataFrame(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df)))
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
tm.assert_numpy_array_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split")))
outp = DataFrame(**dec)
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
tm.assert_numpy_array_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="records")))
outp.index = df.index
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="values")))
outp.index = df.index
self.assertTrue((df.values == outp.values).all())
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index")))
self.assertTrue((df.transpose() == outp).values.all())
tm.assert_numpy_array_equal(df.transpose().columns, outp.columns)
tm.assert_numpy_array_equal(df.transpose().index, outp.index)
def testDataFrameNumpy(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df), numpy=True))
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
tm.assert_numpy_array_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split"),
numpy=True))
outp = DataFrame(**dec)
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
tm.assert_numpy_array_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index"), numpy=True))
self.assertTrue((df.transpose() == outp).values.all())
tm.assert_numpy_array_equal(df.transpose().columns, outp.columns)
tm.assert_numpy_array_equal(df.transpose().index, outp.index)
def testDataFrameNested(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
nested = {'df1': df, 'df2': df.copy()}
exp = {'df1': ujson.decode(ujson.encode(df)),
'df2': ujson.decode(ujson.encode(df))}
self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="index")),
'df2': ujson.decode(ujson.encode(df, orient="index"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="index")) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="records")),
'df2': ujson.decode(ujson.encode(df, orient="records"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="records")) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="values")),
'df2': ujson.decode(ujson.encode(df, orient="values"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="values")) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="split")),
'df2': ujson.decode(ujson.encode(df, orient="split"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="split")) == exp)
def testDataFrameNumpyLabelled(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(*ujson.decode(ujson.encode(df), numpy=True, labelled=True))
self.assertTrue((df.T == outp).values.all())
tm.assert_numpy_array_equal(df.T.columns, outp.columns)
tm.assert_numpy_array_equal(df.T.index, outp.index)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="records"), numpy=True, labelled=True))
outp.index = df.index
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="index"), numpy=True, labelled=True))
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
tm.assert_numpy_array_equal(df.index, outp.index)
def testSeries(self):
s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15]).sort_values()
# column indexed
outp = Series(ujson.decode(ujson.encode(s))).sort_values()
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s), numpy=True)).sort_values()
self.assertTrue((s == outp).values.all())
dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split")))
outp = Series(**dec)
self.assertTrue((s == outp).values.all())
self.assertTrue(s.name == outp.name)
dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split"),
numpy=True))
outp = Series(**dec)
self.assertTrue((s == outp).values.all())
self.assertTrue(s.name == outp.name)
outp = Series(ujson.decode(ujson.encode(s, orient="records"), numpy=True))
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="records")))
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="values"), numpy=True))
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="values")))
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="index"))).sort_values()
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="index"), numpy=True)).sort_values()
self.assertTrue((s == outp).values.all())
def testSeriesNested(self):
s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15]).sort_values()
nested = {'s1': s, 's2': s.copy()}
exp = {'s1': ujson.decode(ujson.encode(s)),
's2': ujson.decode(ujson.encode(s))}
self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="split")),
's2': ujson.decode(ujson.encode(s, orient="split"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="split")) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="records")),
's2': ujson.decode(ujson.encode(s, orient="records"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="records")) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="values")),
's2': ujson.decode(ujson.encode(s, orient="values"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="values")) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="index")),
's2': ujson.decode(ujson.encode(s, orient="index"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="index")) == exp)
def testIndex(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# column indexed
outp = Index(ujson.decode(ujson.encode(i)))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i), numpy=True))
self.assertTrue(i.equals(outp))
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
outp = Index(**dec)
self.assertTrue(i.equals(outp))
self.assertTrue(i.name == outp.name)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
numpy=True))
outp = Index(**dec)
self.assertTrue(i.equals(outp))
self.assertTrue(i.name == outp.name)
outp = Index(ujson.decode(ujson.encode(i, orient="values")))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="values"), numpy=True))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="records")))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="records"), numpy=True))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="index")))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="index"), numpy=True))
self.assertTrue(i.equals(outp))
def test_datetimeindex(self):
from pandas.tseries.index import date_range
rng = date_range('1/1/2000', periods=20)
encoded = ujson.encode(rng, date_unit='ns')
decoded = DatetimeIndex(np.array(ujson.decode(encoded)))
self.assertTrue(rng.equals(decoded))
ts = Series(np.random.randn(len(rng)), index=rng)
decoded = Series(ujson.decode(ujson.encode(ts, date_unit='ns')))
idx_values = decoded.index.values.astype(np.int64)
decoded.index = DatetimeIndex(idx_values)
tm.assert_series_equal(ts, decoded)
def test_decodeArrayTrailingCommaFail(self):
input = "[31337,]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayLeadingCommaFail(self):
input = "[,31337]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayOnlyCommaFail(self):
input = "[,]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayUnmatchedBracketFail(self):
input = "[]]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayEmpty(self):
input = "[]"
ujson.decode(input)
def test_decodeArrayOneItem(self):
input = "[31337]"
ujson.decode(input)
def test_decodeBigValue(self):
input = "9223372036854775807"
ujson.decode(input)
def test_decodeSmallValue(self):
input = "-9223372036854775808"
ujson.decode(input)
def test_decodeTooBigValue(self):
try:
input = "9223372036854775808"
ujson.decode(input)
except ValueError as e:
pass
else:
assert False, "expected ValueError"
def test_decodeTooSmallValue(self):
try:
input = "-90223372036854775809"
ujson.decode(input)
except ValueError as e:
pass
else:
assert False, "expected ValueError"
def test_decodeVeryTooBigValue(self):
try:
input = "9223372036854775808"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeVeryTooSmallValue(self):
try:
input = "-90223372036854775809"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeWithTrailingWhitespaces(self):
input = "{}\n\t "
ujson.decode(input)
def test_decodeWithTrailingNonWhitespaces(self):
try:
input = "{}\n\t a"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayWithBigInt(self):
try:
ujson.loads('[18446098363113800555]')
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayFaultyUnicode(self):
try:
ujson.loads('[18446098363113800555]')
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeFloatingPointAdditionalTests(self):
places = 15
self.assertAlmostEqual(-1.1234567893, ujson.loads("-1.1234567893"), places=places)
self.assertAlmostEqual(-1.234567893, ujson.loads("-1.234567893"), places=places)
self.assertAlmostEqual(-1.34567893, ujson.loads("-1.34567893"), places=places)
self.assertAlmostEqual(-1.4567893, ujson.loads("-1.4567893"), places=places)
self.assertAlmostEqual(-1.567893, ujson.loads("-1.567893"), places=places)
self.assertAlmostEqual(-1.67893, ujson.loads("-1.67893"), places=places)
self.assertAlmostEqual(-1.7893, ujson.loads("-1.7893"), places=places)
self.assertAlmostEqual(-1.893, ujson.loads("-1.893"), places=places)
self.assertAlmostEqual(-1.3, ujson.loads("-1.3"), places=places)
self.assertAlmostEqual(1.1234567893, ujson.loads("1.1234567893"), places=places)
self.assertAlmostEqual(1.234567893, ujson.loads("1.234567893"), places=places)
self.assertAlmostEqual(1.34567893, ujson.loads("1.34567893"), places=places)
self.assertAlmostEqual(1.4567893, ujson.loads("1.4567893"), places=places)
self.assertAlmostEqual(1.567893, ujson.loads("1.567893"), places=places)
self.assertAlmostEqual(1.67893, ujson.loads("1.67893"), places=places)
self.assertAlmostEqual(1.7893, ujson.loads("1.7893"), places=places)
self.assertAlmostEqual(1.893, ujson.loads("1.893"), places=places)
self.assertAlmostEqual(1.3, ujson.loads("1.3"), places=places)
def test_encodeBigSet(self):
s = set()
for x in range(0, 100000):
s.add(x)
ujson.encode(s)
def test_encodeEmptySet(self):
s = set()
self.assertEqual("[]", ujson.encode(s))
def test_encodeSet(self):
s = set([1,2,3,4,5,6,7,8,9])
enc = ujson.encode(s)
dec = ujson.decode(enc)
for v in dec:
self.assertTrue(v in s)
def _clean_dict(d):
return dict((str(k), v) for k, v in compat.iteritems(d))
if __name__ == '__main__':
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
kobejean/tensorflow | tensorflow/examples/tutorials/input_fn/boston.py | 76 | 2920 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def get_input_fn(data_set, num_epochs=None, shuffle=True):
return tf.estimator.inputs.pandas_input_fn(
x=pd.DataFrame({k: data_set[k].values for k in FEATURES}),
y=pd.Series(data_set[LABEL].values),
num_epochs=num_epochs,
shuffle=shuffle)
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Train
regressor.train(input_fn=get_input_fn(training_set), steps=5000)
# Evaluate loss over one epoch of test_set.
ev = regressor.evaluate(
input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False))
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions over a slice of prediction_set.
y = regressor.predict(
input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False))
# .predict() returns an iterator of dicts; convert to a list and print
# predictions
predictions = list(p["predictions"] for p in itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
rodluger/everest | everest/standalone.py | 1 | 22154 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`standalone.py` - Standalone de-trending
------------------------------------------------
Provides the :py:func:`DetrendFITS` function for
manual de-trending of user-provided `K2` FITS files.
'''
from __future__ import division, print_function, absolute_import
import os
import shutil
import numpy as np
import everest
from everest.mathutils import Interpolate, SavGol
from everest.utils import AP_COLLAPSED_PIXEL, AP_SATURATED_PIXEL, DataContainer
from everest.config import EVEREST_DAT
from everest.missions.k2.utils import GetHiResImage, GetSources, \
SaturationFlux, RemoveBackground
from tempfile import NamedTemporaryFile
import matplotlib
from matplotlib.widgets import Slider
from matplotlib.ticker import FuncFormatter
import matplotlib.pyplot as pl
from scipy.ndimage import zoom
try:
import pyfits
except ImportError:
try:
import astropy.io.fits as pyfits
except ImportError:
raise Exception('Please install the `pyfits` package.')
import logging
matplotlib.rcParams['xtick.direction'] = 'in'
matplotlib.rcParams['ytick.direction'] = 'in'
log = logging.getLogger(__name__)
def DetrendFITS(fitsfile, raw=False, season=None, clobber=False, **kwargs):
"""
De-trend a K2 FITS file using :py:class:`everest.detrender.rPLD`.
:param str fitsfile: The full path to the FITS file
:param ndarray aperture: A 2D integer array corresponding to the \
desired photometric aperture (1 = in aperture, 0 = outside \
aperture). Default is to interactively select an aperture.
:param kwargs: Any kwargs accepted by :py:class:`everest.detrender.rPLD`.
:returns: An :py:class:`everest.Everest` instance.
"""
# Get info
EPIC = pyfits.getheader(fitsfile, 0)['KEPLERID']
if season is None:
season = pyfits.getheader(fitsfile, 0)['CAMPAIGN']
if season is None or season == "":
season = 0
everestfile = os.path.join(
everest.missions.k2.TargetDirectory(EPIC, season),
everest.missions.k2.FITSFile(EPIC, season))
# De-trend?
if clobber or not os.path.exists(everestfile):
# Get raw data
data = GetData(fitsfile, EPIC, season, clobber=clobber, **kwargs)
# De-trend
model = everest.rPLD(EPIC,
data=data,
season=season, debug=True,
clobber=clobber, **kwargs)
# Publish it
everest.fits.MakeFITS(model)
shutil.copyfile(os.path.join(model.dir, model.name + '.pdf'),
os.path.join(model.dir,
model._mission.DVSFile(model.ID,
model.season,
model.cadence)))
# Return an Everest instance
return everest.Everest(EPIC, season=season)
class ApertureSelector(object):
'''
'''
def __init__(self, time, images, title='Aperture'):
'''
'''
self.cadence = 0
self.time = time
self.fig, self.ax = pl.subplots(1, figsize=(10, 7))
self.fig.subplots_adjust(left=0.1, bottom=0.25, top=0.925, right=0.45)
self.images = images
self.nt, self.ny, self.nx = self.images.shape
self.x = np.arange(0, self.nx)
self.y = np.arange(0, self.ny)
self.aperture = np.zeros((self.ny, self.nx), dtype=int)
self.aperture[self.ny // 2 - 2:self.ny // 2 +
2][:, self.nx // 2 - 2:self.nx // 2 + 2] = 1
self.contour = None
self.last_j = None
self.last_i = None
self.title = title
# Slider
self.axslider = pl.axes([0.105, 0.2, 0.34, 0.03])
self.slider = Slider(self.axslider, '', 0,
self.nt - 1, valinit=0, valfmt='%d')
self.slider.valtext.set_x(0.5)
self.slider.valtext.set_ha('center')
self.slider.on_changed(self.replot)
# Background
self.axbkg = pl.axes([0.105, 0.05, 0.34, 0.125])
bkg = self.colbkg
self.bkgplot1, = self.axbkg.plot(self.x, bkg, 'ro')
self.bkgplot2, = self.axbkg.plot(self.x, bkg, 'r-', alpha=0.3)
pad = 0.2 * (bkg.max() - bkg.min())
self.axbkg.set_ylim(bkg.min() - pad, bkg.max() + pad)
self.axbkg.set_xlim(-0.7, self.nx - 0.3)
for tick in self.axbkg.get_yticklabels():
tick.set_fontsize(7)
self.axbkg.get_yaxis().set_major_formatter(
FuncFormatter(lambda x, p: '%.2f' % x))
self.axbkg.set_ylabel('Bkg (%)', fontsize=9)
# Light curve
self.axlc = pl.axes([0.5, 0.5, 0.4, 0.425])
self.lcplot, = self.axlc.plot(
self.time, self.flux, 'k.', alpha=0.3, ms=3)
self.axlc.set_xticklabels([])
self.axlc.yaxis.tick_right()
self.axlc.set_ylabel('Light curve', fontsize=14)
self.lcstdtxt = self.axlc.annotate('%.2f ppm' % self.lcstd,
xy=(0.025, 0.975),
xycoords='axes fraction',
ha='left', va='top',
fontsize=12, color='r')
# Light curve background
self.axlcbkg = pl.axes([0.5, 0.05, 0.4, 0.425])
self.lcbkgplot, = self.axlcbkg.plot(
self.time, self.lcbkg, 'k.', alpha=0.3, ms=3)
self.axlcbkg.yaxis.tick_right()
self.axlcbkg.set_ylabel('Background', fontsize=14)
self.bkgstdtxt = self.axlcbkg.annotate('%.2f ppm' % self.bkgstd,
xy=(0.025, 0.975),
xycoords='axes fraction',
ha='left', va='top',
fontsize=12, color='r')
# Trackers
self.tracker1 = self.axlc.axvline(
self.time[self.cadence], color='r', alpha=0.5, lw=1)
self.tracker2 = self.axlcbkg.axvline(
self.time[self.cadence], color='r', alpha=0.5, lw=1)
# Appearance
self.fig.canvas.set_window_title('Select an aperture')
self.ax.axis('off')
self.ax.set_xlim(-0.7, self.nx - 0.3)
self.ax.set_ylim(-0.7, self.ny - 0.3)
self.ax.set_title(title, fontsize=18)
# Plot the image
try:
plasma = pl.get_cmap('plasma')
except ValueError:
plasma = pl.get_cmap('Greys')
plasma.set_bad(alpha=0)
self.implot = self.ax.imshow(self.images[self.cadence],
aspect='auto', interpolation='nearest',
cmap=plasma, picker=True)
self.fig.canvas.mpl_connect('motion_notify_event', self.mouse_drag)
self.fig.canvas.mpl_connect('pick_event', self.mouse_click)
# Update the contour
self.update()
# Enter interactive mode
pl.show()
@property
def colbkg(self):
'''
'''
# Flux in background pixels
bkg = np.zeros(self.nx)
for col in range(self.nx):
b = np.where(self.aperture[:, col] == 0)
bkg[col] = np.nanmedian(self.images[self.cadence][b, col])
return 100 * (bkg / np.mean(bkg) - 1.)
@property
def lcbkg(self):
'''
'''
binds = np.where(self.aperture ^ 1)
bkg = np.nanmedian(
np.array([f[binds] for f in self.images], dtype='float64'), axis=1)
return bkg.reshape(-1, 1)
@property
def flux(self):
'''
'''
ap = np.where(self.aperture & 1)
fpix2D = np.array([f[ap] for f in self.images], dtype='float64')
return np.sum(fpix2D - self.lcbkg, axis=1)
@property
def lcstd(self):
'''
'''
return everest.k2.CDPP(self.flux)
@property
def bkgstd(self):
'''
'''
return everest.k2.CDPP(self.lcbkg)
def update_bkg(self):
'''
'''
bkg = self.colbkg
self.bkgplot1.set_ydata(bkg)
self.bkgplot2.set_ydata(bkg)
pad = 0.2 * (bkg.max() - bkg.min())
self.axbkg.set_ylim(bkg.min() - pad, bkg.max() + pad)
self.axbkg.set_xlim(-0.7, self.nx - 0.3)
def update_lc(self):
'''
'''
flux = self.flux
self.lcplot.set_ydata(flux)
pad = 0.2 * (flux.max() - flux.min())
self.axlc.set_ylim(flux.min() - pad, flux.max() + pad)
self.axlc.set_xlim(self.time[0], self.time[-1])
self.lcstdtxt.set_text('%.2f ppm' % self.lcstd)
def update_lcbkg(self):
'''
'''
lcbkg = self.lcbkg
self.lcbkgplot.set_ydata(lcbkg)
pad = 0.2 * (lcbkg.max() - lcbkg.min())
self.axlcbkg.set_ylim(lcbkg.min() - pad, lcbkg.max() + pad)
self.axlcbkg.set_xlim(self.time[0], self.time[-1])
self.bkgstdtxt.set_text('%.2f ppm' % self.bkgstd)
def PadWithZeros(self, vector, pad_width, iaxis, kwargs):
'''
'''
vector[:pad_width[0]] = 0
vector[-pad_width[1]:] = 0
return vector
def mouse_drag(self, event):
'''
'''
if event.inaxes == self.ax and event.button == 1:
# Index of nearest point
i = np.nanargmin(((event.xdata - self.x) / self.nx) ** 2)
j = np.nanargmin(((event.ydata - self.y) / self.ny) ** 2)
if (i == self.last_i) and (j == self.last_j):
return
else:
self.last_i = i
self.last_j = j
# Toggle pixel
if self.aperture[j, i]:
self.aperture[j, i] = 0
else:
self.aperture[j, i] = 1
# Update the contour
self.update()
def mouse_click(self, event):
'''
'''
if event.mouseevent.inaxes == self.ax:
# Index of nearest point
i = np.nanargmin(
((event.mouseevent.xdata - self.x) / self.nx) ** 2)
j = np.nanargmin(
((event.mouseevent.ydata - self.y) / self.ny) ** 2)
self.last_i = i
self.last_j = j
# Toggle pixel
if self.aperture[j, i]:
self.aperture[j, i] = 0
else:
self.aperture[j, i] = 1
# Update the contour
self.update()
def update(self):
'''
'''
# Update plot
contour = np.zeros((self.ny, self.nx))
contour[np.where(self.aperture)] = 1
contour = np.lib.pad(contour, 1, self.PadWithZeros)
highres = zoom(contour, 100, order=0, mode='nearest')
extent = np.array([-1, self.nx, -1, self.ny])
if self.contour is not None:
for coll in self.contour.collections:
self.ax.collections.remove(coll)
self.contour = self.ax.contour(highres, levels=[0.5], extent=extent,
origin='lower', colors='r',
linewidths=2)
self.update_bkg()
self.update_lc()
self.update_lcbkg()
self.fig.canvas.draw()
def replot(self, val):
'''
'''
# Update plot
self.cadence = int(val)
self.implot.set_data(self.images[int(val)])
self.implot.set_clim(vmin=np.nanmin(
self.images[int(val)]), vmax=np.nanmax(self.images[int(val)]))
self.tracker1.set_xdata(
[self.time[self.cadence], self.time[self.cadence]])
self.tracker2.set_xdata(
[self.time[self.cadence], self.time[self.cadence]])
self.update_bkg()
self.update_lc()
self.update_lcbkg()
self.fig.canvas.draw()
def GetData(fitsfile, EPIC, campaign, clobber=False,
saturation_tolerance=-0.1,
bad_bits=[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17],
get_hires=False, get_nearby=False,
aperture=None, **kwargs):
'''
Returns a :py:obj:`DataContainer` instance with the
raw data for the target.
:param str fitsfile: The full raw target pixel file path
:param bool clobber: Overwrite existing files? Default :py:obj:`False`
:param float saturation_tolerance: Target is considered saturated \
if flux is within this fraction of the pixel well depth. \
Default -0.1
:param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \
outliers when computing the model. \
Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]`
:param bool get_hires: Download a high resolution image of the target? \
Default :py:obj:`True`
:param bool get_nearby: Retrieve location of nearby sources? \
Default :py:obj:`True`
'''
# Get the npz file name
filename = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % campaign,
('%09d' % EPIC)[:4] +
'00000', ('%09d' % EPIC)[4:],
'data.npz')
# Create the dir
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
# Check for saved data
if not os.path.exists(filename) or clobber:
log.info("Fetching data for target...")
# Load the tpf
with pyfits.open(fitsfile) as f:
qdata = f[1].data
# Get the header info
fitsheader = [pyfits.getheader(fitsfile, 0).cards,
pyfits.getheader(fitsfile, 1).cards,
pyfits.getheader(fitsfile, 2).cards]
# Get a hi res image of the target
if get_hires:
try:
hires = GetHiResImage(EPIC)
except ValueError:
hires = None
else:
hires = None
# Get nearby sources
if get_nearby:
try:
nearby = GetSources(EPIC)
except ValueError:
nearby = []
else:
nearby = []
# Get the arrays
cadn = np.array(qdata.field('CADENCENO'), dtype='int32')
time = np.array(qdata.field('TIME'), dtype='float64')
fpix = np.array(qdata.field('FLUX'), dtype='float64')
fpix_err = np.array(qdata.field('FLUX_ERR'), dtype='float64')
qual = np.array(qdata.field('QUALITY'), dtype=int)
# Get rid of NaNs in the time array by interpolating
naninds = np.where(np.isnan(time))
time = Interpolate(np.arange(0, len(time)), naninds, time)
# Get the motion vectors (if available!)
pc1 = np.array(qdata.field('POS_CORR1'), dtype='float64')
pc2 = np.array(qdata.field('POS_CORR2'), dtype='float64')
if not np.all(np.isnan(pc1)) and not np.all(np.isnan(pc2)):
pc1 = Interpolate(time, np.where(np.isnan(pc1)), pc1)
pc2 = Interpolate(time, np.where(np.isnan(pc2)), pc2)
else:
pc1 = None
pc2 = None
# Get the static pixel images for plotting
pixel_images = [fpix[0], fpix[len(fpix) // 2], fpix[len(fpix) - 1]]
# Get the aperture interactively
if aperture is None:
aperture = ApertureSelector(time[::10], fpix[::10],
title='EPIC %d' % EPIC).aperture
if np.sum(aperture) == 0:
raise ValueError("Empty aperture!")
# Atomically write to disk.
# http://stackoverflow.com/questions/2333872/
# atomic-writing-to-file-with-python
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
f = NamedTemporaryFile("wb", delete=False)
np.savez_compressed(f, cadn=cadn, time=time, fpix=fpix,
fpix_err=fpix_err,
qual=qual, aperture=aperture,
pc1=pc1, pc2=pc2, fitsheader=fitsheader,
pixel_images=pixel_images, nearby=nearby,
hires=hires)
f.flush()
os.fsync(f.fileno())
f.close()
shutil.move(f.name, filename)
# Load
data = np.load(filename)
aperture = data['aperture'][()]
pixel_images = data['pixel_images']
nearby = data['nearby'][()]
hires = data['hires'][()]
fitsheader = data['fitsheader']
cadn = data['cadn']
time = data['time']
fpix = data['fpix']
fpix_err = data['fpix_err']
qual = data['qual']
pc1 = data['pc1']
pc2 = data['pc2']
# Compute the saturation flux and the 97.5th percentile
# flux in each pixel of the aperture. We're going
# to compare these to decide if the star is saturated.
satflx = SaturationFlux(EPIC, campaign=campaign) * \
(1. + saturation_tolerance)
f97 = np.zeros((fpix.shape[1], fpix.shape[2]))
for i in range(fpix.shape[1]):
for j in range(fpix.shape[2]):
if aperture[i, j]:
# Let's remove NaNs...
tmp = np.delete(fpix[:, i, j], np.where(
np.isnan(fpix[:, i, j])))
# ... and really bad outliers...
if len(tmp):
f = SavGol(tmp)
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
bad = np.where((f > med + 10. * MAD) |
(f < med - 10. * MAD))[0]
np.delete(tmp, bad)
# ... so we can compute the 97.5th percentile flux
i97 = int(0.975 * len(tmp))
tmp = tmp[np.argsort(tmp)[i97]]
f97[i, j] = tmp
# Check if any of the pixels are actually saturated
if np.nanmax(f97) <= satflx:
log.info("No saturated columns detected.")
saturated = False
aperture[np.isnan(fpix[0])] = 0
ap = np.where(aperture & 1)
fpix2D = np.array([f[ap] for f in fpix], dtype='float64')
fpix_err2D = np.array([p[ap] for p in fpix_err], dtype='float64')
else:
# We need to collapse the saturated columns
saturated = True
ncol = 0
fpixnew = []
ferrnew = []
for j in range(aperture.shape[1]):
if np.any(f97[:, j] > satflx):
marked = False
collapsed = np.zeros(len(fpix[:, 0, 0]))
collapsed_err2 = np.zeros(len(fpix[:, 0, 0]))
for i in range(aperture.shape[0]):
if aperture[i, j]:
if not marked:
aperture[i, j] = AP_COLLAPSED_PIXEL
marked = True
else:
aperture[i, j] = AP_SATURATED_PIXEL
collapsed += fpix[:, i, j]
collapsed_err2 += fpix_err[:, i, j] ** 2
if np.any(collapsed):
fpixnew.append(collapsed)
ferrnew.append(np.sqrt(collapsed_err2))
ncol += 1
else:
for i in range(aperture.shape[0]):
if aperture[i, j]:
fpixnew.append(fpix[:, i, j])
ferrnew.append(fpix_err[:, i, j])
fpix2D = np.array(fpixnew).T
fpix_err2D = np.array(ferrnew).T
log.info("Collapsed %d saturated column(s)." % ncol)
# Compute the background
binds = np.where(aperture ^ 1)
if RemoveBackground(EPIC, campaign=campaign) and (len(binds[0]) > 0):
bkg = np.nanmedian(np.array([f[binds]
for f in fpix], dtype='float64'), axis=1)
# Uncertainty of the median:
# http://davidmlane.com/hyperstat/A106993.html
bkg_err = 1.253 * np.nanmedian(np.array([e[binds] for e in fpix_err],
dtype='float64'), axis=1) \
/ np.sqrt(len(binds[0]))
bkg = bkg.reshape(-1, 1)
bkg_err = bkg_err.reshape(-1, 1)
else:
bkg = 0.
bkg_err = 0.
# Make everything 2D and remove the background
fpix = fpix2D - bkg
fpix_err = np.sqrt(fpix_err2D ** 2 + bkg_err ** 2)
flux = np.sum(fpix, axis=1)
# Get NaN data points
nanmask = np.where(np.isnan(flux) | (flux == 0))[0]
# Get flagged data points -- we won't train our model on them
badmask = []
for b in bad_bits:
badmask += list(np.where(qual & 2 ** (b - 1))[0])
# Flag >10 sigma outliers -- same thing.
tmpmask = np.array(list(set(np.concatenate([badmask, nanmask]))))
t = np.delete(time, tmpmask)
f = np.delete(flux, tmpmask)
f = SavGol(f)
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0]
badmask.extend([np.argmax(time == t[i]) for i in bad])
# Campaign 2 hack: the first day or two are screwed up
if campaign == 2:
badmask.extend(np.where(time < 2061.5)[0])
# Finalize the mask
badmask = np.array(sorted(list(set(badmask))))
# Interpolate the nans
fpix = Interpolate(time, nanmask, fpix)
fpix_err = Interpolate(time, nanmask, fpix_err)
# Return
data = DataContainer()
data.ID = EPIC
data.campaign = campaign
data.cadn = cadn
data.time = time
data.fpix = fpix
data.fpix_err = fpix_err
data.nanmask = nanmask
data.badmask = badmask
data.aperture = aperture
data.aperture_name = 'custom'
data.apertures = dict(custom=aperture)
data.quality = qual
data.Xpos = pc1
data.Ypos = pc2
data.meta = fitsheader
data.mag = fitsheader[0]['KEPMAG'][1]
if type(data.mag) is pyfits.card.Undefined:
data.mag = np.nan
data.pixel_images = pixel_images
data.nearby = nearby
data.hires = hires
data.saturated = saturated
data.bkg = bkg
return data
| mit |
ArvinPan/opencog | opencog/python/spatiotemporal/temporal_events/animation.py | 34 | 4896 | from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
from numpy.core.multiarray import zeros
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.time_intervals import TimeInterval
from matplotlib import pyplot as plt
from matplotlib import animation
__author__ = 'keyvan'
x_axis = xrange(13)
zeros_13 = zeros(13)
class Animation(object):
def __init__(self, event_a, event_b, event_c, plt=plt):
self.event_a = event_a
self.event_c = event_c
self.event_b_length_beginning = event_b.beginning - event_b.a
self.event_b_length_middle = self.event_b_length_beginning + event_b.ending - event_b.beginning
self.event_b_length_total = event_b.b - event_b.ending
self.plt = plt
self.fig = plt.figure(1)
self.ax_a_b = self.fig.add_subplot(4, 1, 1)
self.ax_b_c = self.fig.add_subplot(4, 1, 2)
self.ax_a_c = self.fig.add_subplot(4, 1, 3)
self.ax_relations = self.fig.add_subplot(4, 1, 4)
self.ax_a_b.set_xlim(0, 13)
self.ax_a_b.set_ylim(0, 1)
self.ax_b_c.set_xlim(0, 13)
self.ax_b_c.set_ylim(0, 1)
self.ax_a_c.set_xlim(0, 13)
self.ax_a_c.set_ylim(0, 1)
self.rects_a_b = self.ax_a_b.bar(x_axis, zeros_13)
self.rects_b_c = self.ax_b_c.bar(x_axis, zeros_13)
self.rects_a_c = self.ax_a_c.bar(x_axis, zeros_13)
self.line_a = Line2D([], [])
self.line_b = Line2D([], [])
self.line_c = Line2D([], [])
self.ax_relations.add_line(self.line_a)
self.ax_relations.add_line(self.line_b)
self.ax_relations.add_line(self.line_c)
a = min(event_a.a, event_c.a) - self.event_b_length_total
b = max(event_a.b, event_c.b)
self.ax_relations.set_xlim(a, b + self.event_b_length_total)
self.ax_relations.set_ylim(0, 1.1)
# self.interval = TimeInterval(a, b, 150)
self.interval = TimeInterval(a, b, 2)
self.ax_a_b.xaxis.set_minor_formatter(self.ax_a_b.xaxis.get_major_formatter())
self.ax_a_b.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_b.xaxis.set_ticklabels('poDedOP')
self.ax_a_b.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_b_c.xaxis.set_minor_formatter(self.ax_b_c.xaxis.get_major_formatter())
self.ax_b_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_b_c.xaxis.set_ticklabels('poDedOP')
self.ax_b_c.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_a_c.xaxis.set_minor_formatter(self.ax_a_c.xaxis.get_major_formatter())
self.ax_a_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_c.xaxis.set_ticklabels('poDedOP')
self.ax_a_c.xaxis.set_ticklabels('mFsSfM', minor=True)
def init(self):
artists = []
self.line_a.set_data(self.event_a, self.event_a.membership_function)
self.line_b.set_data([], [])
self.line_c.set_data(self.event_c, self.event_c.membership_function)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
for rect, h in zip(self.rects_a_b, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_a_c, (self.event_a * self.event_c).to_list()):
rect.set_height(h)
artists.append(rect)
return artists
def animate(self, t):
interval = self.interval
B = TemporalEventTrapezium(interval[t], interval[t] + self.event_b_length_total,
interval[t] + self.event_b_length_beginning,
interval[t] + self.event_b_length_middle)
plt.figure()
B.plot().show()
a_b = (self.event_a * B).to_list()
b_c = (B * self.event_c).to_list()
self.line_b.set_data(B, B.membership_function)
artists = []
for rect, h in zip(self.rects_a_b, a_b):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, b_c):
rect.set_height(h)
artists.append(rect)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
return artists
def show(self):
fr = len(self.interval) - 1
anim = animation.FuncAnimation(self.fig, self.animate, init_func=self.init,
frames=fr, interval=fr, blit=True)
self.plt.show()
if __name__ == '__main__':
anim = Animation(TemporalEventTrapezium(4, 8, 5, 7),
TemporalEventTrapezium(0, 10, 6, 9),
TemporalEventTrapezium(0.5, 11, 1, 3))
# anim.show()
| agpl-3.0 |
mcdaniel67/sympy | sympy/interactive/printing.py | 19 | 16124 | """Tools for setting up printing in interactive sessions. """
from __future__ import print_function, division
import sys
from distutils.version import LooseVersion as V
from io import BytesIO
from sympy import latex as default_latex
from sympy import preview
from sympy.core.compatibility import integer_types
from sympy.utilities.misc import debug
def _init_python_printing(stringify_func):
"""Setup printing in Python interactive session. """
import sys
from sympy.core.compatibility import builtins
def _displayhook(arg):
"""Python's pretty-printer display hook.
This function was adapted from:
http://www.python.org/dev/peps/pep-0217/
"""
if arg is not None:
builtins._ = None
print(stringify_func(arg))
builtins._ = arg
sys.displayhook = _displayhook
def _init_ipython_printing(ip, stringify_func, use_latex, euler, forecolor,
backcolor, fontsize, latex_mode, print_builtin,
latex_printer):
"""Setup printing in IPython interactive session. """
try:
from IPython.lib.latextools import latex_to_png
except ImportError:
pass
preamble = "\\documentclass[%s]{article}\n" \
"\\pagestyle{empty}\n" \
"\\usepackage{amsmath,amsfonts}%s\\begin{document}"
if euler:
addpackages = '\\usepackage{euler}'
else:
addpackages = ''
preamble = preamble % (fontsize, addpackages)
imagesize = 'tight'
offset = "0cm,0cm"
resolution = 150
dvi = r"-T %s -D %d -bg %s -fg %s -O %s" % (
imagesize, resolution, backcolor, forecolor, offset)
dvioptions = dvi.split()
debug("init_printing: DVIOPTIONS:", dvioptions)
debug("init_printing: PREAMBLE:", preamble)
latex = latex_printer or default_latex
def _print_plain(arg, p, cycle):
"""caller for pretty, for use in IPython 0.11"""
if _can_print_latex(arg):
p.text(stringify_func(arg))
else:
p.text(IPython.lib.pretty.pretty(arg))
def _preview_wrapper(o):
exprbuffer = BytesIO()
try:
preview(o, output='png', viewer='BytesIO',
outputbuffer=exprbuffer, preamble=preamble,
dvioptions=dvioptions)
except Exception as e:
# IPython swallows exceptions
debug("png printing:", "_preview_wrapper exception raised:",
repr(e))
raise
return exprbuffer.getvalue()
def _matplotlib_wrapper(o):
# mathtext does not understand certain latex flags, so we try to
# replace them with suitable subs
o = o.replace(r'\operatorname', '')
o = o.replace(r'\overline', r'\bar')
# mathtext can't render some LaTeX commands. For example, it can't
# render any LaTeX environments such as array or matrix. So here we
# ensure that if mathtext fails to render, we return None.
try:
return latex_to_png(o)
except ValueError as e:
debug('matplotlib exception caught:', repr(e))
return None
def _can_print_latex(o):
"""Return True if type o can be printed with LaTeX.
If o is a container type, this is True if and only if every element of
o can be printed with LaTeX.
"""
from sympy import Basic
from sympy.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
if isinstance(o, (list, tuple, set, frozenset)):
return all(_can_print_latex(i) for i in o)
elif isinstance(o, dict):
return all(_can_print_latex(i) and _can_print_latex(o[i]) for i in o)
elif isinstance(o, bool):
return False
# TODO : Investigate if "elif hasattr(o, '_latex')" is more useful
# to use here, than these explicit imports.
elif isinstance(o, (Basic, MatrixBase, Vector, Dyadic)):
return True
elif isinstance(o, (float, integer_types)) and print_builtin:
return True
return False
def _print_latex_png(o):
"""
A function that returns a png rendered by an external latex
distribution, falling back to matplotlib rendering
"""
if _can_print_latex(o):
s = latex(o, mode=latex_mode)
try:
return _preview_wrapper(s)
except RuntimeError as e:
debug('preview failed with:', repr(e),
' Falling back to matplotlib backend')
if latex_mode != 'inline':
s = latex(o, mode='inline')
return _matplotlib_wrapper(s)
def _print_latex_matplotlib(o):
"""
A function that returns a png rendered by mathtext
"""
if _can_print_latex(o):
s = latex(o, mode='inline')
return _matplotlib_wrapper(s)
def _print_latex_text(o):
"""
A function to generate the latex representation of sympy expressions.
"""
if _can_print_latex(o):
s = latex(o, mode='plain')
s = s.replace(r'\dag', r'\dagger')
s = s.strip('$')
return '$$%s$$' % s
def _result_display(self, arg):
"""IPython's pretty-printer display hook, for use in IPython 0.10
This function was adapted from:
ipython/IPython/hooks.py:155
"""
if self.rc.pprint:
out = stringify_func(arg)
if '\n' in out:
print
print(out)
else:
print(repr(arg))
import IPython
if V(IPython.__version__) >= '0.11':
from sympy.core.basic import Basic
from sympy.matrices.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
printable_types = [Basic, MatrixBase, float, tuple, list, set,
frozenset, dict, Vector, Dyadic] + list(integer_types)
plaintext_formatter = ip.display_formatter.formatters['text/plain']
for cls in printable_types:
plaintext_formatter.for_type(cls, _print_plain)
png_formatter = ip.display_formatter.formatters['image/png']
if use_latex in (True, 'png'):
debug("init_printing: using png formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_png)
elif use_latex == 'matplotlib':
debug("init_printing: using matplotlib formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_matplotlib)
else:
debug("init_printing: not using any png formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#png_formatter.for_type(cls, None)
if cls in png_formatter.type_printers:
png_formatter.type_printers.pop(cls)
latex_formatter = ip.display_formatter.formatters['text/latex']
if use_latex in (True, 'mathjax'):
debug("init_printing: using mathjax formatter")
for cls in printable_types:
latex_formatter.for_type(cls, _print_latex_text)
else:
debug("init_printing: not using text/latex formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#latex_formatter.for_type(cls, None)
if cls in latex_formatter.type_printers:
latex_formatter.type_printers.pop(cls)
else:
ip.set_hook('result_display', _result_display)
def _is_ipython(shell):
"""Is a shell instance an IPython shell?"""
# shortcut, so we don't import IPython if we don't have to
if 'IPython' not in sys.modules:
return False
try:
from IPython.core.interactiveshell import InteractiveShell
except ImportError:
# IPython < 0.11
try:
from IPython.iplib import InteractiveShell
except ImportError:
# Reaching this points means IPython has changed in a backward-incompatible way
# that we don't know about. Warn?
return False
return isinstance(shell, InteractiveShell)
def init_printing(pretty_print=True, order=None, use_unicode=None,
use_latex=None, wrap_line=None, num_columns=None,
no_global=False, ip=None, euler=False, forecolor='Black',
backcolor='Transparent', fontsize='10pt',
latex_mode='equation*', print_builtin=True,
str_printer=None, pretty_printer=None,
latex_printer=None):
"""
Initializes pretty-printer depending on the environment.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify or the provided pretty
printer; if False, use sstrrepr to stringify or the provided string
printer.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: string, boolean, or None
If True, use default latex rendering in GUI interfaces (png and
mathjax);
if False, do not use latex rendering;
if 'png', enable latex rendering with an external latex compiler,
falling back to matplotlib if external compilation fails;
if 'matplotlib', enable latex rendering with matplotlib;
if 'mathjax', enable latex text generation, for example MathJax
rendering in IPython notebook or text rendering in LaTeX documents
wrap_line: boolean
If True, lines will wrap at the end; if False, they will not wrap
but continue as one line. This is only relevant if `pretty_print` is
True.
num_columns: int or None
If int, number of columns before wrapping is set to num_columns; if
None, number of columns before wrapping is set to terminal width.
This is only relevant if `pretty_print` is True.
no_global: boolean
If True, the settings become system wide;
if False, use just for this console/session.
ip: An interactive console
This can either be an instance of IPython,
or a class that derives from code.InteractiveConsole.
euler: boolean, optional, default=False
Loads the euler package in the LaTeX preamble for handwritten style
fonts (http://www.ctan.org/pkg/euler).
forecolor: string, optional, default='Black'
DVI setting for foreground color.
backcolor: string, optional, default='Transparent'
DVI setting for background color.
fontsize: string, optional, default='10pt'
A font size to pass to the LaTeX documentclass function in the
preamble.
latex_mode: string, optional, default='equation*'
The mode used in the LaTeX printer. Can be one of:
{'inline'|'plain'|'equation'|'equation*'}.
print_builtin: boolean, optional, default=True
If true then floats and integers will be printed. If false the
printer will only print SymPy types.
str_printer: function, optional, default=None
A custom string printer function. This should mimic
sympy.printing.sstrrepr().
pretty_printer: function, optional, default=None
A custom pretty printer. This should mimic sympy.printing.pretty().
latex_printer: function, optional, default=None
A custom LaTeX printer. This should mimic sympy.printing.latex()
This should mimic sympy.printing.latex().
Examples
========
>>> from sympy.interactive import init_printing
>>> from sympy import Symbol, sqrt
>>> from sympy.abc import x, y
>>> sqrt(5)
sqrt(5)
>>> init_printing(pretty_print=True) # doctest: +SKIP
>>> sqrt(5) # doctest: +SKIP
___
\/ 5
>>> theta = Symbol('theta') # doctest: +SKIP
>>> init_printing(use_unicode=True) # doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
>>> init_printing(use_unicode=False) # doctest: +SKIP
>>> theta # doctest: +SKIP
theta
>>> init_printing(order='lex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grlex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grevlex') # doctest: +SKIP
>>> str(y * x**2 + x * y**2) # doctest: +SKIP
x**2*y + x*y**2
>>> init_printing(order='old') # doctest: +SKIP
>>> str(x**2 + y**2 + x + y) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(num_columns=10) # doctest: +SKIP
>>> x**2 + x + y**2 + y # doctest: +SKIP
x + y +
x**2 + y**2
"""
import sys
from sympy.printing.printer import Printer
if pretty_print:
if pretty_printer is not None:
stringify_func = pretty_printer
else:
from sympy.printing import pretty as stringify_func
else:
if str_printer is not None:
stringify_func = str_printer
else:
from sympy.printing import sstrrepr as stringify_func
# Even if ip is not passed, double check that not in IPython shell
in_ipython = False
if ip is None:
try:
ip = get_ipython()
except NameError:
pass
else:
in_ipython = (ip is not None)
if ip and not in_ipython:
in_ipython = _is_ipython(ip)
if in_ipython and pretty_print:
try:
import IPython
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if V(IPython.__version__) >= '1.0':
from IPython.terminal.interactiveshell import TerminalInteractiveShell
else:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
from code import InteractiveConsole
except ImportError:
pass
else:
# This will be True if we are in the qtconsole or notebook
if not isinstance(ip, (InteractiveConsole, TerminalInteractiveShell)) \
and 'ipython-console' not in ''.join(sys.argv):
if use_unicode is None:
debug("init_printing: Setting use_unicode to True")
use_unicode = True
if use_latex is None:
debug("init_printing: Setting use_latex to True")
use_latex = True
if not no_global:
Printer.set_global_settings(order=order, use_unicode=use_unicode,
wrap_line=wrap_line, num_columns=num_columns)
else:
_stringify_func = stringify_func
if pretty_print:
stringify_func = lambda expr: \
_stringify_func(expr, order=order,
use_unicode=use_unicode,
wrap_line=wrap_line,
num_columns=num_columns)
else:
stringify_func = lambda expr: _stringify_func(expr, order=order)
if in_ipython:
_init_ipython_printing(ip, stringify_func, use_latex, euler,
forecolor, backcolor, fontsize, latex_mode,
print_builtin, latex_printer)
else:
_init_python_printing(stringify_func)
| bsd-3-clause |
xubenben/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
OTWillems/GEO1005 | SpatialDecision/external/networkx/drawing/nx_pylab.py | 20 | 30247 | """
**********
Matplotlib
**********
Draw networks with matplotlib.
See Also
--------
matplotlib: http://matplotlib.org/
pygraphviz: http://pygraphviz.github.io/
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
__author__ = """Aric Hagberg ([email protected])"""
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell',
'draw_graphviz']
def draw(G, pos=None, ax=None, hold=None, **kwds):
"""Draw the graph G with Matplotlib.
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
hold : bool, optional
Set the Matplotlib hold state. If True subsequent draw
commands will be added to the current axes.
**kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
With pyplot use
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
cf = plt.gcf()
else:
cf = ax.get_figure()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax = cf.add_axes((0, 0, 1, 1))
else:
ax = cf.gca()
if 'with_labels' not in kwds:
kwds['with_labels'] = 'labels' in kwds
b = plt.ishold()
# allow callers to override the hold state by passing hold=True|False
h = kwds.pop('hold', None)
if h is not None:
plt.hold(h)
try:
draw_networkx(G, pos=pos, ax=ax, **kwds)
ax.set_axis_off()
plt.draw_if_interactive()
except:
plt.hold(b)
raise
plt.hold(b)
return
def draw_networkx(G, pos=None, arrows=True, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node and edge transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (default='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import matplotlib.pyplot as plt
>>> limits=plt.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos = nx.drawing.spring_layout(G) # default to spring layout
node_collection = draw_networkx_nodes(G, pos, **kwds)
edge_collection = draw_networkx_edges(G, pos, arrows=arrows, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
plt.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label=None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Returns
-------
matplotlib.collections.PathCollection
`PathCollection` of the nodes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if nodelist is None:
nodelist = G.nodes()
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
try:
xy = numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
node_collection = ax.scatter(xy[:, 0], xy[:, 1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=1.0,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float, or array of floats
Line width of edges (default=1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Returns
-------
matplotlib.collection.LineCollection
`LineCollection` of the edges
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter, Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edgelist is None:
edgelist = G.edges()
if not edgelist or len(edgelist) == 0: # no edges!
return None
# set edge positions
edge_pos = numpy.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color) == 1:
edge_colors = (colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
arrow_collection = None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos = []
p = 1.0-0.25 # make head segment 25 percent of edge length
for src, dst in edge_pos:
x1, y1 = src
x2, y2 = dst
dx = x2-x1 # x offset
dy = y2-y1 # y offset
d = numpy.sqrt(float(dx**2 + dy**2)) # length of edge
if d == 0: # source and target at same position
continue
if dx == 0: # vertical edge
xa = x2
ya = dy*p+y1
if dy == 0: # horizontal edge
ya = y2
xa = dx*p+x1
else:
theta = numpy.arctan2(dy, dx)
xa = p*d*numpy.cos(theta)+x1
ya = p*d*numpy.sin(theta)+y1
a_pos.append(((xa, ya), (x2, y2)))
arrow_collection = LineCollection(a_pos,
colors=arrow_colors,
linewidths=[4*ww for ww in lw],
antialiaseds=(1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
arrow_collection.set_label(label)
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:, :, 0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:, :, 0]))
miny = numpy.amin(numpy.ravel(edge_pos[:, :, 1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:, :, 1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim(corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Returns
-------
dict
`dict` of labels keyed on the nodes
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if labels is None:
labels = dict((n, n) for n in G.nodes())
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = pos[n]
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True,
)
text_items[n] = t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Returns
-------
dict
`dict` of labels keyed on the edges
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edge_labels is None:
labels = dict(((u, v), d) for u, v, d in G.edges(data=True))
else:
labels = edge_labels
text_items = {}
for (n1, n2), label in labels.items():
(x1, y1) = pos[n1]
(x2, y2) = pos[n2]
(x, y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle = numpy.arctan2(y2-y1, x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle -= 180
if angle < - 90:
angle += 180
# transform data coordinate angle to screen coordinate angle
xy = numpy.array((x, y))
trans_angle = ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1, 2)))[0]
else:
trans_angle = 0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform=ax.transData,
bbox=bbox,
zorder=1,
clip_on=True,
)
text_items[(n1, n2)] = t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, circular_layout(G), **kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, random_layout(G), **kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spectral_layout(G), **kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spring_layout(G), **kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
nlist = kwargs.get('nlist', None)
if nlist is not None:
del(kwargs['nlist'])
draw(G, shell_layout(G, nlist=nlist), **kwargs)
def draw_graphviz(G, prog="neato", **kwargs):
"""Draw networkx graph with graphviz layout.
Parameters
----------
G : graph
A networkx graph
prog : string, optional
Name of Graphviz layout program
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
"""
pos = nx.drawing.graphviz_layout(G, prog)
draw(G, pos, **kwargs)
def draw_nx(G, pos, **kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G, pos, **kwds)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS', warn=False)
import matplotlib.pyplot as plt
except:
raise SkipTest("matplotlib not available")
| gpl-2.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/sphinxext/mathmpl.py | 12 | 3822 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import sys
from hashlib import md5
from docutils import nodes
from docutils.parsers.rst import directives
import warnings
from matplotlib import rcParams
from matplotlib.mathtext import MathTextParser
rcParams['mathtext.fontset'] = 'cm'
mathtext_parser = MathTextParser("Bitmap")
# Define LaTeX math node:
class latex_math(nodes.General, nodes.Element):
pass
def fontset_choice(arg):
return directives.choice(arg, ['cm', 'stix', 'stixsans'])
options_spec = {'fontset': fontset_choice}
def math_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
i = rawtext.find('`')
latex = rawtext[i+1:-1]
node = latex_math(rawtext)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node], []
math_role.options = options_spec
def math_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
latex = ''.join(content)
node = latex_math(block_text)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node]
# This uses mathtext to render the expression
def latex2png(latex, filename, fontset='cm'):
latex = "$%s$" % latex
orig_fontset = rcParams['mathtext.fontset']
rcParams['mathtext.fontset'] = fontset
if os.path.exists(filename):
depth = mathtext_parser.get_depth(latex, dpi=100)
else:
try:
depth = mathtext_parser.to_png(filename, latex, dpi=100)
except:
warnings.warn("Could not render math expression %s" % latex,
Warning)
depth = 0
rcParams['mathtext.fontset'] = orig_fontset
sys.stdout.write("#")
sys.stdout.flush()
return depth
# LaTeX to HTML translation stuff:
def latex2html(node, source):
inline = isinstance(node.parent, nodes.TextElement)
latex = node['latex']
name = 'math-%s' % md5(latex.encode()).hexdigest()[-10:]
destdir = os.path.join(setup.app.builder.outdir, '_images', 'mathmpl')
if not os.path.exists(destdir):
os.makedirs(destdir)
dest = os.path.join(destdir, '%s.png' % name)
path = '/'.join((setup.app.builder.imgpath, 'mathmpl'))
depth = latex2png(latex, dest, node['fontset'])
if inline:
cls = ''
else:
cls = 'class="center" '
if inline and depth != 0:
style = 'style="position: relative; bottom: -%dpx"' % (depth + 1)
else:
style = ''
return '<img src="%s/%s.png" %s%s/>' % (path, name, cls, style)
def setup(app):
setup.app = app
# Add visit/depart methods to HTML-Translator:
def visit_latex_math_html(self, node):
source = self.document.attributes['source']
self.body.append(latex2html(node, source))
def depart_latex_math_html(self, node):
pass
# Add visit/depart methods to LaTeX-Translator:
def visit_latex_math_latex(self, node):
inline = isinstance(node.parent, nodes.TextElement)
if inline:
self.body.append('$%s$' % node['latex'])
else:
self.body.extend(['\\begin{equation}',
node['latex'],
'\\end{equation}'])
def depart_latex_math_latex(self, node):
pass
app.add_node(latex_math,
html=(visit_latex_math_html, depart_latex_math_html),
latex=(visit_latex_math_latex, depart_latex_math_latex))
app.add_role('math', math_role)
app.add_directive('math', math_directive,
True, (0, 0, 0), **options_spec)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
| mit |
cactorium/UCFBrainStuff | seniordesign/emokit/fft2.py | 2 | 1744 | # This is an example of popping a packet from the Emotiv class's packet queue
# and printing the gyro x and y values to the console.
from emokit.emotiv import Emotiv
import platform
if platform.system() == "Windows":
import socket # Needed to prevent gevent crashing on Windows. (surfly / gevent issue #459)
import gevent
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
is_running = True
# TODO: is_running is not working as expected. But it DOES work!
def evt_main():
global is_running
ring_buf = np.zeros(x.size)
headset = Emotiv()
gevent.spawn(headset.setup)
gevent.sleep(0)
pos = 0
try:
while is_running:
packet = headset.dequeue()
print packet.gyro_x, packet.gyro_y
ring_buf[pos] = packet.sensors["O1"]["value"]
pos = (pos + 1) % ring_buf.size
if pos % 4 == 0:
yield np.concatenate((ring_buf[pos:ring_buf.size:1], ring_buf[0:pos:1]))
gevent.sleep(0)
except KeyboardInterrupt:
headset.close()
finally:
is_running = False
headset.close()
x = np.arange(0, 1024)
test_buf = np.zeros(x.size)
fig, ax = plt.subplots()
line, = ax.plot(x, test_buf)
plt.axis([0, x.size - 1, 0, 80])
def init():
line.set_ydata(np.ma.array(x, mask=True))
return line,
def animate(rb):
dft = np.fft.fft(rb)
line.set_ydata(20*np.log10(np.absolute(dft)))
return line,
def counter():
global is_running
i = 0
while is_running:
yield i
i = i + 1
ani = animation.FuncAnimation(fig, animate, evt_main, init_func=init, interval=20, blit=True)
plt.show()
is_running = False
while True:
gevent.sleep(0)
| mit |
victorbergelin/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
zhongdai/csv2td | csv2td/csv2td.py | 1 | 4292 | # coding=utf-8
import csv
import os
import argparse
import configparser
import pandas as pd
import numpy as np
import re
from . import __version__
from .filetemp import INI_FILE, INI_FILE_NAME, FASTLOAD_CTL
from .filetemp import INI_FILE_KEYS
def correct_object_name(object_name):
"""
Make sure the field name is valid in Teradata, this is also for table name
- No more than 32 characters
- Not start with numeric
- No speical characters
- No spaces
"""
if not isinstance(object_name, str) or len(object_name) == 0:
raise TypeError('Please make sure the field_name is a str and not null')
MAX_LENGTH = 30
REGEX = re.compile(r"[^a-zA-Z0-9]")
no_special_chars = REGEX.sub('_', object_name)
if re.match(r"\d+", no_special_chars) is not None:
no_special_chars = '_' + no_special_chars
return no_special_chars[:MAX_LENGTH]
def guess_date_format(list_of_date):
"""
Check the format for a date string, return one of following
yyyymmdd
yyyy-mm-dd
dd/mm/yyyy
ddmmyyyy
dd-mm-yyyy
or return None if not match any
"""
assert isinstance(list_of_date,list)
matchers = {
'yyyymmdd':re.compile(r"\d{4}[0|1]\d[0|1|2|3]\d"),
'yyyy-mm-dd':re.compile(r"\d{4}\-[0|1]\d\-[0|1|2|3]\d"),
'dd/mm/yyyy':re.compile(r"[0|1|2|3]\d\/[0|1]\d\/\d{4}"),
'dd-mm-yyyy':re.compile(r"[0|1|2|3]\d\-[0|1]\d\-\d{4}")
}
return_format = None
for name, m in matchers.items():
for item in list_of_date:
if m.match(item) is None:
# if found any not matching (in the middle), set to None
# and break the inner loop
return_format = None
break
else:
return_format = name
# if we matched to any format, just break the outer loop
if return_format is not None:
break
return return_format
class CSVField:
def __init__(self, name,
dtype=None,
min_len=None,
max_len=None,
date_format=None):
assert isinstance(name, str)
self.name = name
self.dtype = dtype
self.min_len = min_len
self.max_len = max_len
self.date_format = date_format
def __repr__(self):
return 'CSVField({})'.format(self.name)
def get_parser():
parser = argparse.ArgumentParser(description='Generate Fastload script')
parser.add_argument('filename',type=str,
help='The filename of CSV file to be loaded to Teradata')
return parser
def get_config():
cfg = configparser.ConfigParser()
error_message = """
The configuration file name should be csv2td.ini, please make sure it has all
required format / data.
Please execute `csv2tdinit` to generate a template file under same folder.
"""
try:
cfg.read(INI_FILE_NAME)
except Exception as e:
raise
sections = cfg.sections()
if len(sections) != 1:
raise SystemExit(error_message)
section_name = sections[0]
for key in INI_FILE_KEYS:
if key not in cfg[section_name]:
raise SystemExit(error_message)
return cfg[section_name]
def generate_init_file():
current_dir = os.getcwd()
with open(os.path.join(current_dir, INI_FILE_NAME), 'w') as f:
f.write(INI_FILE)
print('The template configuration file [{}] has been generated at current folder'.format(INI_FILE_NAME))
return
def command_line_runner():
parser = get_parser()
args = vars(parser.parse_args())
# File only can be placed under the current folder
csv_filename = args['filename']
full_path = os.path.join(os.getcwd(),csv_filename)
if not os.path.isfile(full_path):
raise ValueError('{} is not a file, please check'.format(full_path))
s = csv.Sniffer()
with open(full_path) as f:
if not s.has_header(f.read(1024)):
has_header = False
else:
has_header = True
if not has_header:
raise ValueError('please make sure you have headers in the file')
# Get the config object
section = get_config()
# process data
if __name__ == '__main__':
command_line_runner()
| mit |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/scipy/stats/kde.py | 9 | 17416 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy.lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
kde.integrate_gaussian(mean, cov) : float
Multiply pdf with a specified Gaussian and integrate over the whole
domain.
kde.integrate_box_1d(low, high) : float
Integrate pdf (1D only) between two bounds.
kde.integrate_box(low_bounds, high_bounds) : float
Integrate pdf over a rectangular space between low_bounds and
high_bounds.
kde.integrate_kde(other_kde) : float
Integrate two kernel density estimates multiplied together.
kde.resample(size=None) : ndarray
Randomly sample a dataset from the estimated pdf.
kde.set_bandwidth(bw_method='scott') : None
Computes the bandwidth, i.e. the coefficient that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
.. versionadded:: 0.11.0
kde.covariance_factor : float
Computes the coefficient (`kde.factor`) that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
The default is `scotts_factor`. A subclass can overwrite this method
to provide a different method, or set it through a call to
`kde.set_bandwidth`.
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
>>> "Measurement model, return two coupled measurements."
>>> m1 = np.random.normal(size=n)
>>> m2 = np.random.normal(scale=0.5, size=n)
>>> return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=np.float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError :
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
diff = self.dataset - mean
tdiff = dot(linalg.inv(sum_cov), diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / sqrt(linalg.det(2 * pi *
sum_cov)) / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = dot(linalg.inv(sum_cov), diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
result /= sqrt(linalg.det(2 * pi * sum_cov)) * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
| agpl-3.0 |
jmschrei/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
oliverlee/pydy | examples/chaos_pendulum/chaos_pendulum.py | 3 | 3976 | #!/usr/bin/env python
# This script generates the equations of motion for a double pendulum where the
# bob rotates about the pendulum rod. It can be shown to be chaotic when
# simulated.
# import sympy and the mechanics module
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
import sympy.physics.mechanics as me
from pydy.system import System
from pydy.viz import Cylinder, Plane, VisualizationFrame, Scene
# Enable pretty printing.
me.init_vprinting()
# declare the constants #
# gravity
g = sym.symbols('g')
mA, mB, lB = sym.symbols('m_A, m_B, L_B')
# plate dimensions
w, h = sym.symbols('w, h')
# declare the coordinates and speeds and their derivatives #
# theta : angle of the rod
# phi : angle of the plate relative to the rod
# omega : angular speed of the rod
# alpha : angular speed of the plate
theta, phi, omega, alpha = me.dynamicsymbols('theta phi omega alpha')
# reference frames #
# create a Newtonian reference frame
N = me.ReferenceFrame('N')
# create a reference for the rod, A, and the plate, B
A = me.ReferenceFrame('A')
B = me.ReferenceFrame('B')
# orientations #
# the rod rotates with respect to the Newtonian reference frame about the x
# axis
A.orient(N, 'Axis', (theta, N.y))
# the plate rotates about the rod's primay axis
B.orient(A, 'Axis', (phi, A.z))
# positions #
# origin of the Newtonian reference frame
No = me.Point('No')
# create a point for the mass centers of the two bodies
Ao = me.Point('Ao')
Bo = me.Point('Bo')
# define the positions of the mass centers relative to the Newtonian origin
lA = (lB - h / 2) / 2
Ao.set_pos(No, lA * A.z)
Bo.set_pos(No, lB * A.z)
# kinematical differential equations #
kinDiffs = (omega - theta.diff(),
alpha - phi.diff())
# angular velocities
A.set_ang_vel(N, omega * N.y)
B.set_ang_vel(A, alpha * A.z)
# linear velocities and accelerations #
No.set_vel(N, 0) # the newtonian origin is fixed
Ao.v2pt_theory(No, N, A)
Bo.v2pt_theory(No, N, A)
# central inertia
IAxx = sym.S(1) / 12 * mA * (2 * lA)**2
IAyy = IAxx
IAzz = 0
IA = (me.inertia(A, IAxx, IAyy, IAzz), Ao)
IBxx = sym.S(1) / 12 * mB * h**2
IByy = sym.S(1) / 12 * mB * (w**2 + h**2)
IBzz = sym.S(1) / 12 * mB * w**2
IB = (me.inertia(B, IBxx, IByy, IBzz), Bo)
# rigid bodies
rod = me.RigidBody('rod', Ao, A, mA, IA)
plate = me.RigidBody('plate', Bo, B, mB, IB)
# forces #
# add the gravitional force to each body
rod_gravity = (Ao, mA * g * N.z)
plate_gravity = (Bo, mB * g * N.z)
# equations of motion with Kane's method
# make a tuple of the bodies and forces
bodies = (rod, plate)
loads = (rod_gravity, plate_gravity)
# create a Kane object with respect to the Newtonian reference frame
kane = me.KanesMethod(N, q_ind=(theta, phi), u_ind=(omega, alpha),
kd_eqs=kinDiffs)
# calculate Kane's equations
fr, frstar = kane.kanes_equations(loads, bodies)
sys = System(kane)
sys.constants = {lB: 0.2, # m
h: 0.1, # m
w: 0.2, # m
mA: 0.01, # kg
mB: 0.1, # kg
g: 9.81, # m/s**2
}
sys.initial_conditions = {theta: np.deg2rad(90.0),
phi: np.deg2rad(0.5),
omega: 0,
alpha: 0}
sys.times = np.linspace(0, 10, 500)
x = sys.integrate()
plt.plot(sys.times, x)
plt.legend([sym.latex(s, mode='inline') for s in sys.coordinates + sys.speeds])
# visualize
rod_shape = Cylinder(2 * lA, 0.005, color='red')
plate_shape = Plane(h, w, color='blue')
v1 = VisualizationFrame('rod',
A.orientnew('rod', 'Axis', (sym.pi / 2, A.x)),
Ao,
rod_shape)
v2 = VisualizationFrame('plate',
B.orientnew('plate', 'Body',
(sym.pi / 2, sym.pi / 2, 0), 'XZX'),
Bo,
plate_shape)
scene = Scene(N, No, v1, v2, system=sys)
scene.display()
| bsd-3-clause |
jzt5132/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
nguyentu1602/numpy | numpy/lib/twodim_base.py | 83 | 26903 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause |
pianomania/scikit-learn | sklearn/model_selection/tests/test_search.py | 2 | 51430 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from sklearn.externals.joblib._compat import PY3_OR_LATER
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import in1d
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge, SGDClassifier
from sklearn.model_selection.tests.common import OneTimeSplitter
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.classes_ = np.unique(Y)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
predict_log_proba = predict
decision_function = predict
transform = predict
inverse_transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
assert_array_equal(grid_search.cv_results_["param_foo_param"].data,
[1, 2, 3])
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
def check_hyperparameter_searcher_with_fit_params(klass, **klass_kwargs):
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam', 'eggs'])
searcher = klass(clf, {'foo_param': [1, 2, 3]}, cv=2, **klass_kwargs)
# The CheckingClassifer generates an assertion error if
# a parameter is missing or has length != len(X).
assert_raise_message(AssertionError,
"Expected fit parameter(s) ['eggs'] not seen.",
searcher.fit, X, y, spam=np.ones(10))
assert_raise_message(AssertionError,
"Fit parameter spam has length 1; expected 4.",
searcher.fit, X, y, spam=np.ones(1),
eggs=np.zeros(10))
searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))
def test_grid_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(GridSearchCV)
def test_random_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(RandomizedSearchCV, n_iter=1)
def test_grid_search_fit_params_deprecation():
# NOTE: Remove this test in v0.21
# Use of `fit_params` in the class constructor is deprecated,
# but will still work until v0.21.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_warns(DeprecationWarning, grid_search.fit, X, y)
def test_grid_search_fit_params_two_places():
# NOTE: Remove this test in v0.21
# If users try to input fit parameters in both
# the constructor (deprecated use) and the `fit`
# method, we'll ignore the values passed to the constructor.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
# The "spam" array is too short and will raise an
# error in the CheckingClassifier if used.
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(1)})
expected_warning = ('Ignoring fit_params passed as a constructor '
'argument in favor of keyword arguments to '
'the "fit" method.')
assert_warns_message(RuntimeWarning, expected_warning,
grid_search.fit, X, y, spam=np.ones(10))
# Verify that `fit` prefers its own kwargs by giving valid
# kwargs in the constructor and invalid in the method call
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_raise_message(AssertionError, "Fit parameter spam has length 1",
grid_search.fit, X, y, spam=np.ones(1))
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = search_no_scoring.score(X, y)
score_accuracy = search_accuracy.score(X, y)
score_no_score_auc = search_no_score_method_auc.score(X, y)
score_auc = search_auc.score(X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_groups():
# Check if ValueError (when groups is None) propagates to GridSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The groups parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_classes__property():
# Test that classes_ property matches best_estimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_,
grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute before it's fit
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute without a refit
grid_search = GridSearchCV(LinearSVC(random_state=0),
{'C': Cs}, refit=False)
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
def test_trivial_cv_results_attr():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_no_refit():
# Test that GSCV can be used for model selection alone without refitting
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(not hasattr(grid_search, "best_estimator_") and
hasattr(grid_search, "best_index_") and
hasattr(grid_search, "best_params_"))
# Make sure the predict/transform etc fns raise meaningfull error msg
for fn_name in ('predict', 'predict_proba', 'predict_log_proba',
'transform', 'inverse_transform'):
assert_raise_message(NotFittedError,
('refit=False. %s is available only after '
'refitting on the best parameters' % fn_name),
getattr(grid_search, fn_name), X)
def test_grid_search_error():
# Test that grid search will capture errors on data with different length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_when_param_grid_includes_range():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = None
if PY3_OR_LATER:
grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)})
else:
grid_search = GridSearchCV(clf, {'foo_param': xrange(1, 4)})
grid_search.fit(X, y)
assert_equal(grid_search.best_estimator_.foo_param, 2)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a non-empty sequence.",
GridSearchCV, clf, param_dict)
param_dict = {"C": "1,2,3"}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "cv_results_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='fowlkes_mallows_score')
grid_search.fit(X, y)
# So can FMS ;)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def check_cv_results_array_types(cv_results, param_keys, score_keys):
# Check if the search `cv_results`'s array are of correct types
assert_true(all(isinstance(cv_results[param], np.ma.MaskedArray)
for param in param_keys))
assert_true(all(cv_results[key].dtype == object for key in param_keys))
assert_false(any(isinstance(cv_results[key], np.ma.MaskedArray)
for key in score_keys))
assert_true(all(cv_results[key].dtype == np.float64
for key in score_keys if not key.startswith('rank')))
assert_true(cv_results['rank_test_score'].dtype == np.int32)
def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand):
# Test the search.cv_results_ contains all the required results
assert_array_equal(sorted(cv_results.keys()),
sorted(param_keys + score_keys + ('params',)))
assert_true(all(cv_results[key].shape == (n_cand,)
for key in param_keys + score_keys))
def check_cv_results_grid_scores_consistency(search):
# TODO Remove in 0.20
cv_results = search.cv_results_
res_scores = np.vstack(list([cv_results["split%d_test_score" % i]
for i in range(search.n_splits_)])).T
res_means = cv_results["mean_test_score"]
res_params = cv_results["params"]
n_cand = len(res_params)
grid_scores = assert_warns(DeprecationWarning, getattr,
search, 'grid_scores_')
assert_equal(len(grid_scores), n_cand)
# Check consistency of the structure of grid_scores
for i in range(n_cand):
assert_equal(grid_scores[i].parameters, res_params[i])
assert_array_equal(grid_scores[i].cv_validation_scores,
res_scores[i, :])
assert_array_equal(grid_scores[i].mean_validation_score, res_means[i])
def test_grid_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4,
random_state=42)
n_splits = 3
n_grid_points = 6
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
grid_search = GridSearchCV(SVC(), cv=n_splits, iid=False,
param_grid=params)
grid_search.fit(X, y)
grid_search_iid = GridSearchCV(SVC(), cv=n_splits, iid=True,
param_grid=params)
grid_search_iid.fit(X, y)
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_candidates = n_grid_points
for search, iid in zip((grid_search, grid_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check if score and timing are reasonable
assert_true(all(cv_results['rank_test_score'] >= 1))
assert_true(all(cv_results[k] >= 0) for k in score_keys
if k is not 'rank_test_score')
assert_true(all(cv_results[k] <= 1) for k in score_keys
if 'time' not in k and
k is not 'rank_test_score')
# Check cv_results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)
# Check masking
cv_results = grid_search.cv_results_
n_candidates = len(grid_search.cv_results_['params'])
assert_true(all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear'))
assert_true(all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf'))
check_cv_results_grid_scores_consistency(search)
def test_random_search_cv_results():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# scipy.stats dists now supports `seed` but we still support scipy 0.12
# which doesn't support the seed. Hence the assertions in the test for
# random_search alone should not depend on randomization.
n_splits = 3
n_search_iter = 30
params = dict(C=expon(scale=10), gamma=expon(scale=0.1))
random_search = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=False,
param_distributions=params)
random_search.fit(X, y)
random_search_iid = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=True,
param_distributions=params)
random_search_iid.fit(X, y)
param_keys = ('param_C', 'param_gamma')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_cand = n_search_iter
for search, iid in zip((random_search, random_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
# For random_search, all the param array vals should be unmasked
assert_false(any(cv_results['param_C'].mask) or
any(cv_results['param_gamma'].mask))
check_cv_results_grid_scores_consistency(search)
def test_search_iid_param():
# Test the IID parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(SVC(), param_grid={'C': [1, 10]}, cv=cv)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv)
for search in (grid_search, random_search):
search.fit(X, y)
assert_true(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s_i][0]
for s_i in range(search.n_splits_)))
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
# Test the first candidate
assert_equal(search.cv_results_['param_C'][0], 1)
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
assert_array_almost_equal(train_cv_scores, [1, 1])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average and weighted std
expected_test_mean = 1 * 1. / 4. + 1. / 3. * 3. / 4.
expected_test_std = np.sqrt(1. / 4 * (expected_test_mean - 1) ** 2 +
3. / 4 * (expected_test_mean - 1. / 3.) **
2)
assert_almost_equal(test_mean, expected_test_mean)
assert_almost_equal(test_std, expected_test_std)
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
# once with iid=False
grid_search = GridSearchCV(SVC(),
param_grid={'C': [1, 10]},
cv=cv, iid=False)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv, iid=False)
for search in (grid_search, random_search):
search.fit(X, y)
assert_false(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s][0]
for s in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s][0]
for s in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
assert_equal(search.cv_results_['param_C'][0], 1)
# scores are the same as above
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
# Unweighted mean/std is used
assert_almost_equal(test_mean, np.mean(test_cv_scores))
assert_almost_equal(test_std, np.std(test_cv_scores))
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
def test_search_cv_results_rank_tie_breaking():
X, y = make_blobs(n_samples=50, random_state=42)
# The two C values are close enough to give similar models
# which would result in a tie of their mean cv-scores
param_grid = {'C': [1, 1.001, 0.001]}
grid_search = GridSearchCV(SVC(), param_grid=param_grid)
random_search = RandomizedSearchCV(SVC(), n_iter=3,
param_distributions=param_grid)
for search in (grid_search, random_search):
search.fit(X, y)
cv_results = search.cv_results_
# Check tie breaking strategy -
# Check that there is a tie in the mean scores between
# candidates 1 and 2 alone
assert_almost_equal(cv_results['mean_test_score'][0],
cv_results['mean_test_score'][1])
assert_almost_equal(cv_results['mean_train_score'][0],
cv_results['mean_train_score'][1])
try:
assert_almost_equal(cv_results['mean_test_score'][1],
cv_results['mean_test_score'][2])
except AssertionError:
pass
try:
assert_almost_equal(cv_results['mean_train_score'][1],
cv_results['mean_train_score'][2])
except AssertionError:
pass
# 'min' rank should be assigned to the tied candidates
assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3])
def test_search_cv_results_none_param():
X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]
estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())
est_parameters = {"random_state": [0, None]}
cv = KFold(random_state=0)
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv).fit(X, y)
assert_array_equal(grid_search.cv_results_['param_random_state'],
[0, None])
@ignore_warnings()
def test_search_cv_timing():
svc = LinearSVC(random_state=0)
X = [[1, ], [2, ], [3, ], [4, ]]
y = [0, 1, 1, 0]
gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0)
rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)
for search in (gs, rs):
search.fit(X, y)
for key in ['mean_fit_time', 'std_fit_time']:
# NOTE The precision of time.time in windows is not high
# enough for the fit/score times to be non-zero for trivial X and y
assert_true(np.all(search.cv_results_[key] >= 0))
assert_true(np.all(search.cv_results_[key] < 1))
for key in ['mean_score_time', 'std_score_time']:
assert_true(search.cv_results_[key][1] >= 0)
assert_true(search.cv_results_[key][0] == 0.0)
assert_true(np.all(search.cv_results_[key] < 1))
def test_grid_search_correct_score_results():
# test that correct scores are used
n_splits = 3
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits)
cv_results = grid_search.fit(X, y).cv_results_
# Test scorer names
result_keys = list(cv_results.keys())
expected_keys = (("mean_test_score", "rank_test_score") +
tuple("split%d_test_score" % cv_i
for cv_i in range(n_splits)))
assert_true(all(in1d(expected_keys, result_keys)))
cv = StratifiedKFold(n_splits=n_splits)
n_splits = grid_search.n_splits_
for candidate_i, C in enumerate(Cs):
clf.set_params(C=C)
cv_scores = np.array(
list(grid_search.cv_results_['split%d_test_score'
% s][candidate_i]
for s in range(n_splits)))
for i, (train, test) in enumerate(cv.split(X, y)):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, cv_scores[i])
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
grid_search_pickled = pickle.loads(pickle.dumps(grid_search))
assert_array_almost_equal(grid_search.predict(X),
grid_search_pickled.predict(X))
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
random_search_pickled = pickle.loads(pickle.dumps(random_search))
assert_array_almost_equal(random_search.predict(X),
random_search_pickled.predict(X))
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
res_params = grid_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
grid_search.cv_results_['split%d_test_score' % i][cand_i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
res_params = random_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
random_search.cv_results_['split%d_test_score'
% i][cand_i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
def get_cand_scores(i):
return np.array(list(gs.cv_results_['split%d_test_score' % s][i]
for s in range(gs.n_splits_)))
assert all((np.all(get_cand_scores(cand_i) == 0.0)
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER))
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
assert all(np.all(np.isnan(get_cand_scores(cand_i)))
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
def test_stochastic_gradient_loss_param():
# Make sure the predict_proba works when loss is specified
# as one of the parameters in the param_grid.
param_grid = {
'loss': ['log'],
}
X = np.arange(24).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
# When the estimator is not fitted, `predict_proba` is not available as the
# loss is 'hinge'.
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
clf.predict_proba(X)
clf.predict_log_proba(X)
# Make sure `predict_proba` is not available when setting loss=['hinge']
# in param_grid
param_grid = {
'loss': ['hinge'],
}
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
assert_false(hasattr(clf, "predict_proba"))
def test_search_train_scores_set_to_false():
X = np.arange(6).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = LinearSVC(random_state=0)
gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]},
return_train_score=False)
gs.fit(X, y)
def test_grid_search_cv_splits_consistency():
# Check if a one time iterable is accepted as a cv parameter.
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=n_samples, random_state=0)
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
gs.fit(X, y)
gs2 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits))
gs2.fit(X, y)
def _pop_time_keys(cv_results):
for key in ('mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time'):
cv_results.pop(key)
return cv_results
# OneTimeSplitter is a non-re-entrant cv where split can be called only
# once if ``cv.split`` is called once per param setting in GridSearchCV.fit
# the 2nd and 3rd parameter will not be evaluated as no train/test indices
# will be generated for the 2nd and subsequent cv.split calls.
# This is a check to make sure cv.split is not called once per param
# setting.
np.testing.assert_equal(_pop_time_keys(gs.cv_results_),
_pop_time_keys(gs2.cv_results_))
# Check consistency of folds across the parameters
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.1, 0.2, 0.2]},
cv=KFold(n_splits=n_splits, shuffle=True))
gs.fit(X, y)
# As the first two param settings (C=0.1) and the next two param
# settings (C=0.2) are same, the test and train scores must also be
# same as long as the same train/test indices are generated for all
# the cv splits, for both param setting
for score_type in ('train', 'test'):
per_param_scores = {}
for param_i in range(4):
per_param_scores[param_i] = list(
gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i]
for s in range(5))
assert_array_almost_equal(per_param_scores[0],
per_param_scores[1])
assert_array_almost_equal(per_param_scores[2],
per_param_scores[3])
| bsd-3-clause |
ananthamurthy/eyeBlinkBehaviour | analysis/analyze_mouse_performance.py | 2 | 5547 | """analyze_dir.py:
Analyze a given directory. All trials are accumulated and plotted.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2016, Dilawar Singh "
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "[email protected]"
__status__ = "Development"
import os
import sys
import numpy as np
import dateutil
import dateutil.parser
import matplotlib
import matplotlib.pyplot as plt
from collections import defaultdict
import logging
import re
import analyze_trial as at
import session_type as st
import math
matplotlib.rcParams.update( {'font.size' : 10} )
try:
plt.style.use('classic')
except Exception as e:
pass
args_ = None
csplus, csminus = [], []
csplusIdx, csminusIdx = [], []
distraction = []
distractionIdx = []
probes = []
probesIdx = []
def plot_subplot( ax, data, idx, tVec, aN, bN, title ):
csplusData = np.vstack( data )
plt.imshow( csplusData, cmap = "jet"
, extent = [tVec[aN], tVec[bN], len(idx), 0]
, vmin = data.min(), vmax = data.max()
, interpolation = 'none', aspect='auto'
)
# ax.set_xticks( range(0,len(idx),2), idx[::2] )
ax.set_xlabel( 'Time (ms)' )
ax.set_ylabel( '# Trial' )
ax.set_title( title )
ax.legend( )
# ax.colorbar( )
def accept( subdir_name, reject_list ):
for l in reject_list:
if l in subdir_name:
print( '[INFO] Dir %s is rejected' % subdir_name )
return False
return True
def plot_area_under_curve( cspData, normalised = True ):
if normalised:
outfile = os.path.join( args_.dir, 'area_under_tone_puff_normalised.png' )
else:
outfile = os.path.join( args_.dir, 'area_under_tone_puff_raw.png' )
for i, (t, sense, area) in enumerate(cspData):
ax = plt.subplot( math.ceil( len(cspData)/ 2.0 ), 2, i + 1 )
area = zip(*area)
if not normalised:
plt.scatter( area[0] , area[1] )
ax.set_xlim( 0, 3000 )
ax.set_ylim( 0, 3000 )
else:
plt.scatter( area[0] / np.max( area[0] ) , area[1] / np.max( area[1]))
plt.xlabel( 'Tone AOC' )
plt.ylabel( 'Puff AOC' )
plt.savefig( outfile )
print('[INFO] Saved tone/puff area scatter for all session to %s' % outfile)
def plot_performance( cspData ):
global args_
outfile = os.path.join( args_.dir, 'performance.png' )
sessions, performances = [], []
for i, (t, sense, area) in enumerate( cspData ):
sessions.append( i + 1 )
area = zip( *area )
tone, puff = area
performances.append( np.mean(tone) / np.mean( puff) )
plt.plot( sessions, performances , '-*')
plt.xlabel( '# Session ' )
plt.ylabel( 'Performance = tone / puff ' )
plt.savefig( outfile )
print( '[INFO] Performance is save to %s' % outfile )
def plot_csp_data( cspData ):
"""Plot CS_P type of trials from each session """
global args_
allSession = []
allArea = []
for t, sense, area in cspData:
allSession.append( np.mean(sense, axis=0) )
for i, sens in enumerate(allSession):
plt.subplot( len(allSession), 1, i + 1 )
plt.plot( sens, label = 'Session %s' % (i + 1) )
plt.legend( )
# plt.colorbar( )
outfile = os.path.join( args_.dir, 'all_cs_p.png' )
plt.savefig( outfile )
print( '[INFO] Saved all CS_P to %s' % outfile )
plt.figure( )
plot_area_under_curve( cspData, False )
plt.figure( )
plot_area_under_curve( cspData, True )
# Final performace.
plt.figure( )
plot_performance( cspData )
def rank_behaviour( session_type_dirs ):
"""Rank the behaviour of a given mouse. The directory session_type_dirs
contains all the data related to this mouse.
"""
cspData = []
areaData = []
for sd in session_type_dirs:
sessionData = st.session_data( sd )
cspData.append( sessionData['CS_P'] )
plot_csp_data( cspData )
def get_sessions( dir_name, **kwargs ):
ignoreSessionTypeList = kwargs.get( 'ignore_session_types', [] )
files = {}
validSubDirs = []
for d, sd, fs in os.walk( dir_name ):
stPat = re.compile( r'SessionType\d+' )
for sdd in sd:
if stPat.search( sdd ):
if accept( sdd, ignoreSessionTypeList ):
validSubDirs.append( os.path.join(d, sdd) )
rank_behaviour( validSubDirs )
def main( ):
global args_
if not args_.output_dir:
args_.output_dir = os.path.join(args_.dir, '_plots')
if not os.path.isdir( args_.output_dir):
os.makedirs( args_.output_dir )
sessions = get_sessions( args_.dir, ignore_session_types=[ 'SessionType12'] )
if __name__ == '__main__':
import argparse
# Argument parser.
description = '''Scoring mouse performance'''
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--dir', '-d'
, required = True
, help = 'Directory to seach for behaviour data for a mouse'
)
parser.add_argument('--subplots', '-s'
, action = 'store_true'
, help = 'Each trial in subplot.'
)
parser.add_argument('--output_dir', '-o'
, required = False
, default = ''
, help = 'Directory to save results.'
)
class Args: pass
args_ = Args()
parser.parse_args(namespace=args_)
main( )
| gpl-3.0 |
ldirer/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 5 | 51103 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..utils import check_array, check_random_state, check_X_y
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils import compute_class_weight
from ..utils import deprecated
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function_, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.n_jobs = int(n_jobs)
@property
@deprecated("Attribute loss_function was deprecated in version 0.19 and "
"will be removed in 0.21. Use 'loss_function_' instead")
def loss_function(self):
return self.loss_function_
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
loss_function_ : concrete ``LossFunction``
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = y.astype(np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and getattr(self, "average_coef_", None) is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
ellisonbg/altair | altair/sphinxext/altairplot.py | 1 | 11045 | """
Altair Plot Sphinx Extension
============================
This extension provides a means of inserting live-rendered Altair plots within
sphinx documentation. There are two directives defined: ``altair-setup`` and
``altair-plot``. ``altair-setup`` code is used to set-up various options
prior to running the plot code. For example::
.. altair-plot::
:output: none
from altair import *
import pandas as pd
data = pd.DataFrame({'a': list('CCCDDDEEE'),
'b': [2, 7, 4, 1, 2, 6, 8, 4, 7]})
.. altair-plot::
Chart(data).mark_point().encode(
x='a',
y='b'
)
In the case of the ``altair-plot`` code, the *last statement* of the code-block
should contain the chart object you wish to be rendered.
Options
-------
The directives have the following options::
.. altair-plot::
:namespace: # specify a plotting namespace that is persistent within the doc
:hide-code: # if set, then hide the code and only show the plot
:code-below: # if set, then code is below rather than above the figure
:output: [plot|repr|stdout|none]
:alt: text # Alternate text when plot cannot be rendered
:links: editor source export # specify one or more of these options
:chart-var-name: chart # name of variable in namespace containing output
Additionally, this extension introduces a global configuration
``altairplot_links``, set in your ``conf.py`` which is a dictionary
of links that will appear below plots, unless the ``:links:`` option
again overrides it. It should look something like this::
# conf.py
# ...
altairplot_links = {'editor': True, 'source': True, 'export': True}
# ...
If this configuration is not specified, all are set to True.
"""
import contextlib
import io
import os
import json
import warnings
import jinja2
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives import flag, unchanged
from sphinx.locale import _
import altair as alt
from altair.utils.execeval import eval_block
# These default URLs can be changed in conf.py; see setup() below.
VEGA_JS_URL_DEFAULT = "https://cdn.jsdelivr.net/npm/vega@3"
VEGALITE_JS_URL_DEFAULT = "https://cdn.jsdelivr.net/npm/vega-lite@2"
VEGAEMBED_JS_URL_DEFAULT = "https://cdn.jsdelivr.net/npm/vega-embed@3"
VGL_TEMPLATE = jinja2.Template("""
<div id="{{ div_id }}">
<script>
// embed when document is loaded, to ensure vega library is available
// this works on all modern browsers, except IE8 and older
document.addEventListener("DOMContentLoaded", function(event) {
var spec = {{ spec }};
var opt = {
"mode": "{{ mode }}",
"renderer": "{{ renderer }}",
"actions": {{ actions}}
};
vegaEmbed('#{{ div_id }}', {{ spec }}).catch(console.err);
});
</script>
</div>
""")
class altair_plot(nodes.General, nodes.Element):
pass
def purge_altair_namespaces(app, env, docname):
if not hasattr(env, '_altair_namespaces'):
return
env._altair_namespaces.pop(docname, {})
DEFAULT_ALTAIRPLOT_LINKS = {'editor': True, 'source': True, 'export': True}
def validate_links(links):
if links.strip().lower() == 'none':
return {}
links = links.strip().split()
diff = set(links) - set(DEFAULT_ALTAIRPLOT_LINKS.keys())
if diff:
raise ValueError("Following links are invalid: {0}".format(list(diff)))
return dict((link, link in links) for link in DEFAULT_ALTAIRPLOT_LINKS)
def validate_output(output):
output = output.strip().lower()
if output not in ['plot', 'repr', 'stdout', 'none']:
raise ValueError(":output: flag must be one of [plot|repr|stdout|none]")
return output
class AltairPlotDirective(Directive):
has_content = True
option_spec = {'hide-code': flag,
'code-below': flag,
'namespace': unchanged,
'output': validate_output,
'alt': unchanged,
'links': validate_links,
'chart-var-name': unchanged}
def run(self):
env = self.state.document.settings.env
app = env.app
show_code = 'hide-code' not in self.options
code_below = 'code-below' in self.options
if not hasattr(env, '_altair_namespaces'):
env._altair_namespaces = {}
namespace_id = self.options.get('namespace', 'default')
namespace = env._altair_namespaces\
.setdefault(env.docname, {})\
.setdefault(namespace_id, {})
code = '\n'.join(self.content)
if show_code:
source_literal = nodes.literal_block(code, code)
source_literal['language'] = 'python'
# get the name of the source file we are currently processing
rst_source = self.state_machine.document['source']
rst_dir = os.path.dirname(rst_source)
rst_filename = os.path.basename(rst_source)
# use the source file name to construct a friendly target_id
serialno = env.new_serialno('altair-plot')
rst_base = rst_filename.replace('.', '-')
div_id = "{0}-altair-plot-{1}".format(rst_base, serialno)
target_id = "{0}-altair-source-{1}".format(rst_base, serialno)
target_node = nodes.target('', '', ids=[target_id])
# create the node in which the plot will appear;
# this will be processed by html_visit_altair_plot
plot_node = altair_plot()
plot_node['target_id'] = target_id
plot_node['div_id'] = div_id
plot_node['code'] = code
plot_node['namespace'] = namespace
plot_node['relpath'] = os.path.relpath(rst_dir, env.srcdir)
plot_node['rst_source'] = rst_source
plot_node['rst_lineno'] = self.lineno
plot_node['links'] = self.options.get('links', app.builder.config.altairplot_links)
plot_node['output'] = self.options.get('output', 'plot')
plot_node['chart-var-name'] = self.options.get('chart-var-name', None)
if 'alt' in self.options:
plot_node['alt'] = self.options['alt']
result = [target_node]
if code_below:
result += [plot_node]
if show_code:
result += [source_literal]
if not code_below:
result += [plot_node]
return result
def html_visit_altair_plot(self, node):
# Execute the code, saving output and namespace
namespace = node['namespace']
try:
f = io.StringIO()
with contextlib.redirect_stdout(f):
chart = eval_block(node['code'], namespace)
stdout = f.getvalue()
except Exception as e:
warnings.warn("altair-plot: {0}:{1} Code Execution failed:"
"{2}: {3}".format(node['rst_source'], node['rst_lineno'],
e.__class__.__name__, str(e)))
raise nodes.SkipNode
chart_name = node['chart-var-name']
if chart_name is not None:
if chart_name not in namespace:
raise ValueError("chart-var-name='{0}' not present in namespace"
"".format(chart_name))
chart = namespace[chart_name]
output = node['output']
if output == 'none':
raise nodes.SkipNode
elif output == 'stdout':
if not stdout:
raise nodes.SkipNode
else:
output_literal = nodes.literal_block(stdout, stdout)
output_literal['language'] = 'none'
node.extend([output_literal])
self.visit_admonition(node)
elif output == 'repr':
if chart is None:
raise nodes.SkipNode
else:
rep = ' ' + repr(chart).replace('\n', '\n ')
repr_literal = nodes.literal_block(rep, rep)
repr_literal['language'] = 'none'
node.extend([repr_literal])
self.visit_admonition(node)
elif output == 'plot':
if isinstance(chart, alt.TopLevelMixin):
# Last line should be a chart; convert to spec dict
spec = chart.to_dict()
actions = node['links']
# TODO: add an option to save spects to file & load from there.
# TODO: add renderer option
# Write spec to a *.vl.json file
# dest_dir = os.path.join(self.builder.outdir, node['relpath'])
# if not os.path.exists(dest_dir):
# os.makedirs(dest_dir)
# filename = "{0}.vl.json".format(node['target_id'])
# dest_path = os.path.join(dest_dir, filename)
# with open(dest_path, 'w') as f:
# json.dump(spec, f)
# Pass relevant info into the template and append to the output
html = VGL_TEMPLATE.render(div_id=node['div_id'],
spec=json.dumps(spec),
mode='vega-lite',
renderer='canvas',
actions=json.dumps(actions))
self.body.append(html)
else:
warnings.warn('altair-plot: {0}:{1} Malformed block. Last line of '
'code block should define a valid altair Chart object.'
''.format(node['rst_source'], node['rst_lineno']))
raise nodes.SkipNode
def generic_visit_altair_plot(self, node):
# TODO: generate PNGs and insert them here
if 'alt' in node.attributes:
self.body.append(_('[ graph: %s ]') % node['alt'])
else:
self.body.append(_('[ graph ]'))
raise nodes.SkipNode
def depart_altair_plot(self, node):
return
def builder_inited(app):
app.add_javascript(app.config.altairplot_vega_js_url)
app.add_javascript(app.config.altairplot_vegalite_js_url)
app.add_javascript(app.config.altairplot_vegaembed_js_url)
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('altairplot_links', DEFAULT_ALTAIRPLOT_LINKS, 'env')
app.add_config_value('altairplot_vega_js_url', VEGA_JS_URL_DEFAULT, 'html')
app.add_config_value('altairplot_vegalite_js_url', VEGALITE_JS_URL_DEFAULT, 'html')
app.add_config_value('altairplot_vegaembed_js_url', VEGAEMBED_JS_URL_DEFAULT, 'html')
app.add_directive('altair-plot', AltairPlotDirective)
app.add_stylesheet('altair-plot.css')
app.add_node(altair_plot,
html=(html_visit_altair_plot, depart_altair_plot),
latex=(generic_visit_altair_plot, depart_altair_plot),
texinfo=(generic_visit_altair_plot, depart_altair_plot),
text=(generic_visit_altair_plot, depart_altair_plot),
man=(generic_visit_altair_plot, depart_altair_plot))
app.connect('env-purge-doc', purge_altair_namespaces)
app.connect('builder-inited', builder_inited)
return {'version': '0.1'}
| bsd-3-clause |
rmcmillan05/dens_mat-rt | src/non-fortran/fft.py | 1 | 9852 | #! /usr/bin/env python
from __future__ import print_function
import numpy as np
from scipy.fftpack import fft
import matplotlib.pyplot as plt
from scipy.signal import blackman
import sys
import os.path
import math
def joinwords(*words):
joined = ''.join(words)
return joined
def message(*objs):
print(joinwords(*objs), file=sys.stderr)
def out_message(*objs):
print(joinwords(*objs), file=sys.stdout)
def error(*objs):
message("ERROR: ", *objs)
sys.exit(0)
def warning(*objs):
message("WARNING: ", *objs)
def read_warning():
warning('In input file "',masterin,'" at line ',str(nl),'. Parameter name "', p, '" not recognised.')
def find_nearest(array_in, value):
idx = (np.abs(np.asarray(array_in)-value)).argmin()
return idx, array_in[idx]
narg = len(sys.argv)-1
if narg > 0:
masterin = str(sys.argv[1])
else:
masterin = 'fft.in'
if not os.path.isfile(masterin):
fid = open(masterin, 'w')
print('# Input file for fft.py',file=fid)
print('',file=fid)
print('perform fft # max / integrate / average / fft',file=fid)
print('infile test.out',file=fid)
print('field_file test.field.out',file=fid)
print('outfile test.out.ft',file=fid)
print('readcol 6',file=fid)
print('field_col 2',file=fid)
print('delay 50',file=fid)
print('from 1900',file=fid)
print('to 2000',file=fid)
print('#units_time fs',file=fid)
print('#damp_factor 0.1 # Lorentzian damping in eV',file=fid)
print('#yambo_delta # Correct field to match YPP output',file=fid)
print('#yambo_timestep 0.1e-2 # Timestep used in yambo_rt (in fs)',file=fid)
print('#EELS # Output 1/FFT also', file=fid)
fid.close()
os.system('vi '+masterin)
exit()
# error('Input file "',masterin,'" does not exist. Temporary fft.in written.')
#Defaults
readcol = 2
outfile = ''
field_file = ''
start_from = 0.0
go_to = np.inf
delay = 0.0
div_by_field = 0
perform = 'fft'
time_par_type = 'au'
fs_to_au = 1.0
damp_factor = 0.0
damp_function = 1.0
start_id = 0 ################### to shift delta over for yambo
speed_of_light = 137.03599911
yambo_delta = 0
yambo_timestep = 1
calc_eels = 0
nl = 0
with open(masterin) as foo:
for line in foo:
nl = nl + 1
columns = line.split()
if len(columns) > 0:
p = columns[0]
try: a = columns[1]
except: a = 0
if p[0] == '#':
continue
if p == str('infile'):
infile = a
if not os.path.isfile(infile):
error('In input file "',masterin,'" at line ',str(nl),'. File "', infile, '" does not exist.')
elif p == 'field_file':
field_file = a
if not os.path.isfile(field_file):
error('In input file "',masterin,'" at line ',str(nl),'. File "', field_file, '" does not exist.')
elif p == str('outfile'):
outfile = a
elif p == 'readcol':
readcol = int(a)
elif p == 'from':
start_from = float(a)
elif p == 'to':
go_to = float(a)
elif p == 'delay':
delay = float(a)
elif p == 'field_col':
field_col = int(a)
div_by_field = 1
elif p == 'perform':
perform = a
elif p == 'units_time':
time_par_type = a
elif p == 'damp_factor':
damp_factor = float(a)
elif p == 'yambo_delta':
yambo_delta = 1
start_id = 1
elif p == 'yambo_timestep':
yambo_timestep = float(a)
elif p == 'EELS':
calc_eels = 1
else:
read_warning()
foo.close()
#infile = 'ft_00000100.out'
#outfile = 'test.dat'
#readcol = 2
freq_au_to_ev = 27.2113834
x = []
y = []
if div_by_field == 1: f = []
if time_par_type == 'fs':
fs_to_au = 41.341373336561361
delay = delay * fs_to_au
start_from = start_from * fs_to_au
go_to = go_to * fs_to_au
damp_factor = damp_factor / freq_au_to_ev
nl = 0
with open(infile) as foo:
for line in foo:
li = line.strip()
if li.startswith("#"):
continue
nl = nl+1
columns = line.split()
if nl == 1:
try:
xtmp = float(columns[0])*fs_to_au
name = joinwords('col_',str(readcol))
except:
name = str(columns[readcol - 1])
continue
if len(columns) > 0:
try:
xtmp = float(columns[0])*fs_to_au
except:
if nl == 2:
error('Error in reading input file "',infile,'".')
else:
message('Input file "',infile,'" read successfully.')
break
if np.abs(xtmp) >= start_from and np.abs(xtmp) <= go_to :
x.append(xtmp)
y.append(float(columns[readcol - 1]))
if (div_by_field == 1 and field_file==''):
f.append(float(columns[field_col - 1]))
foo.close()
if not field_file == '':
with open(field_file) as foo:
for line in foo:
li = line.strip()
if li.startswith("#"):
continue
nl = nl+1
columns = line.split()
if nl == 1:
try:
xtmp = float(columns[0])*fs_to_au
name = joinwords('col_',str(field_col))
except:
name = str(columns[field_col - 1])
continue
if len(columns) > 0:
try:
xtmp = float(columns[0])*fs_to_au
except:
if nl == 2:
error('Error in reading input file "',infile,'".')
else:
message('Input file "',infile,'" read successfully.')
break
if np.abs(xtmp) >= start_from and np.abs(xtmp) <= go_to :
if (div_by_field == 1):
f.append(float(columns[field_col - 1]))
foo.close()
T = x[1] - x[0]
delayid, delayapp = find_nearest(x, delay)
N = len(y)
if perform == 'integrate':
area = T*sum(y[0:len(y)-1])
message('Area between ',str(x[0]),' and ',str(x[len(x)-1]), ' is:')
out_message(str(area))
elif perform == 'average':
av = sum(y)/float(N)
message('Average value between ',str(x[0]),' and ',str(x[len(x)-1]), ' is:')
out_message(str(av))
elif perform == 'max':
maxm = max(y)
message('Maximum value between ',str(x[0]),' and ',str(x[len(x)-1]), 'is:')
out_message(str(maxm))
elif perform == 'fft':
if outfile == '':
outfile = joinwords(infile,'.',name,'.fft')
fid = open(outfile, 'w')
if not damp_factor == 0.0:
for i in range(N):
y[i] = y[i] * np.exp(-damp_factor * x[i])
if not delay == 0.0:
## ytmp = y[N-delayid:N]
## ytmp = ytmp + y[0:N-delayid]
ytmp = y[delayid:N]
ytmp = ytmp + y[0:delayid]
if div_by_field == 1:
ftmp = f[delayid:N]
ftmp = ftmp + f[0:delayid]
# ytmp = y[delayid:N]
# ytmp = ytmp + delayid*[0]
# plt.plot(x,ytmp, 'r')
# plt.plot(x,y, 'k')
# plt.show()
else:
ytmp = y
if (div_by_field == 1): ftmp = f
#yf = fft(y)
if yambo_delta and div_by_field: ftmp = [ftmp[i]*fs_to_au*yambo_timestep/T/5.14220652e11 for i in range(len(ftmp))]
yf = fft(ytmp[start_id:len(ytmp)])
if (div_by_field == 1): ff = fft(ftmp[start_id:len(ftmp)])
N = len(yf)
# for i in range(len(ytmp)): print(i,ytmp[i].imag, ytmp[i].real)
# for i in range(len(yf)): print(i,yf[i].imag, yf[i].real)
if div_by_field:
eps = [yf[i]/ff[i] for i in range(len(yf))]
else:
eps = yf
if yambo_delta and div_by_field: eps = [1. + eps[i]*4.*np.pi for i in range(len(eps))]
if calc_eels: eels = [1./eps[i] for i in range(len(eps))]
#w = blackman(N)
#ywf = fft(y*w)
xf = np.linspace(0.0, 1.0/(2.0*T ), math.floor(N/2))
# for i in range(N/2):
# yf[i] = yf[i]/np.exp(-1j*xf[i]*0.001*fs_to_au)/5.33802520488724E-11*4.*np.pi
xf = 2.0*np.pi * freq_au_to_ev * xf
xf = [2.*np.pi*freq_au_to_ev*float(n)/T/float(N) for n in range(N)]
#### NB Note that we print out the conjugate ####
if calc_eels:
print("%-22s %-22s %-22s %-22s %-22s %-22s" % ('Freq.', 'FFT.real', 'FFT.imag', 'FFT.abs', 'EELS.real', 'EELS.imag'), file=fid)
else:
print("%-22s %-22s %-22s %-22s" % ('Freq.', 'FFT.real', 'FFT.imag', 'FFT.abs'), file=fid)
if (div_by_field == 1):
for i in range(math.floor(N/2)):
if calc_eels:
print("%22.13G %22.13G %22.13G %22.13G %22.13G %22.13G" % (xf[i], eps[i].real, -eps[i].imag, np.abs(eps[i]), eels[i].real, eels[i].imag), file=fid)
else:
print("%22.13G %22.13G %22.13G %22.13G" % (xf[i], eps[i].real, -eps[i].imag, np.abs(eps[i])), file=fid)
else:
for i in range(math.floor(N/2)):
print("%22.13G %22.13G %22.13G %22.13G" % (xf[i], 2.0/float(N)*yf[i].real, -2.0/float(N)*yf[i].imag, np.abs(2.0/float(N)*yf[i])), file=fid)
if not delay == 0.0: message('Time Delay: ', str(delayapp), '.')
message('FFT of column ',str(readcol), ' successfully output to "',outfile,'".')
message('Resolution: ',str(xf[1]-xf[0]),' eV.')
message('Max Energy: ',str(xf[math.floor(N/2)-1]),' eV.')
fid.close()
else:
error('Perform program: "',perform,'" not recognised.')
| gpl-2.0 |
cduff4464/2016_summer_XPD | out_of_date/roi/plotting_interface.py | 2 | 4085 | """
This file is meant to be a prototype for defining ROI's to be used
in later applications
"""
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, RadioButtons, Button
from matplotlib.colorbar import ColorbarBase
from matplotlib.colors import Normalize
import numpy as np
from roi_definer import ROI
from file_finder import FileFinder
start = FileFinder()
start.get_name()
plt.figure(1)
axpic = plt.subplot2grid((20, 20), (0, 0), rowspan=14, colspan=14)
axps = plt.subplot2grid((20, 20), (19, 10), rowspan=1, colspan=10)
axrb = plt.subplot2grid((20, 20), (15, 10), rowspan=1, colspan=10)
axre = plt.subplot2grid((20, 20), (16, 10), rowspan=1, colspan=10)
axcb = plt.subplot2grid((20, 20), (17, 10), rowspan=1, colspan=10)
axce = plt.subplot2grid((20, 20), (18, 10), rowspan=1, colspan=10)
axgray = plt.subplot2grid((20, 20), (0, 15), rowspan=6, colspan=5)
axskipf = plt.subplot2grid((20, 20), (19, 5), rowspan=1, colspan=2)
axskipb = plt.subplot2grid((20, 20), (19, 3), rowspan=1, colspan=2)
axvmin = plt.subplot2grid((20, 20), (15, 0), rowspan=2, colspan=5)
axvmax = plt.subplot2grid((20, 20), (17, 0), rowspan=2, colspan=5)
axbar = plt.subplot2grid((20, 20), (7, 14), rowspan=3, colspan=8)
axzoom = plt.subplot2grid((20, 20), (11, 14), rowspan=3, colspan=8)
pic_swap = Slider(axps, 'Pic Index', 0, len(start.pic_list)-1, valinit=0)
rb = Slider(axrb, 'Row Begin', 0, 2047, valinit=100)
re = Slider(axre, 'Row End', 0, 2047, valinit=1900)
cb = Slider(axcb, 'Col Begin', 0, 2047, valinit=100)
ce = Slider(axce, 'Col End', 0, 2047, valinit=1900)
gray = RadioButtons(axgray, ('RdBu', 'BrBG', 'RdYlGn', 'Greys_r'))
skipf = Button(axskipf, '>')
skipb = Button(axskipb, '<')
abs_min_V = np.min(start.pic_list[int(pic_swap.val)])
abs_max_V = np.max(start.pic_list[int(pic_swap.val)])
vmin = Slider(axvmin, 'Vmin', abs_min_V, abs_max_V, abs_min_V)
vmax = Slider(axvmax, 'Vmax', abs_min_V, abs_max_V, abs_max_V)
zoom = RadioButtons(axzoom, ('Home', 'Zoom'))
im = None
def pic_switch(event):
bounds = roi1.export()
if zoom.value_selected == 'Zoom':
axpic.cla()
axpic.imshow(start.pic_list[int(pic_swap.val)], vmin=vmin.val, vmax=vmax.val, cmap=gray.value_selected)
axpic.set_title(start.file_list[int(pic_swap.val)])
axpic.set_xlim(bounds[2], bounds[3])
axpic.set_ylim(bounds[1], bounds[0])
axpic.axvline(x=bounds[2])
axpic.axvline(x=bounds[3])
axpic.axhline(y=bounds[0])
axpic.axhline(y=bounds[1])
axbar.cla()
norm = Normalize(vmin=vmin.val, vmax=vmax.val)
col = ColorbarBase(axbar, cmap=gray.value_selected, norm=norm, orientation='horizontal')
col.set_ticks([vmin.val, vmax.val], update_ticks=True)
else:
axpic.cla()
axpic.imshow(start.pic_list[int(pic_swap.val)], vmin=vmin.val, vmax=vmax.val, cmap=gray.value_selected)
axpic.set_title(start.file_list[int(pic_swap.val)])
axpic.axvline(x=bounds[2])
axpic.axvline(x=bounds[3])
axpic.axhline(y=bounds[0])
axpic.axhline(y=bounds[1])
axbar.cla()
norm = Normalize(vmin=vmin.val, vmax=vmax.val)
col = ColorbarBase(axbar, cmap=gray.value_selected, norm=norm, orientation='horizontal')
col.set_ticks([vmin.val, vmax.val], update_ticks=True)
def forward(event):
if pic_swap.val + 1 > len(start.pic_list) - 1:
pass
else:
x = pic_swap.val + 1
pic_swap.set_val(x)
def backward(event):
if pic_swap.val - 1 < 0:
pass
else:
x = pic_swap.val - 1
pic_swap.set_val(x)
roi1 = ROI(start.pic_list[0])
def update_values(event):
roi1.update(rb.val, re.val, cb.val, ce.val)
pic_switch(None)
rb.on_changed(update_values)
re.on_changed(update_values)
cb.on_changed(update_values)
ce.on_changed(update_values)
pic_swap.on_changed(pic_switch)
gray.on_clicked(pic_switch)
skipf.on_clicked(forward)
skipb.on_clicked(backward)
vmin.on_changed(pic_switch)
vmax.on_changed(pic_switch)
zoom.on_clicked(pic_switch)
update_values(None)
pic_switch(None)
plt.show()
| bsd-2-clause |
yunque/sms-tools | lectures/05-Sinusoidal-model/plots-code/sineModelAnal-bendir.py | 24 | 1245 | import numpy as np
import matplotlib.pyplot as plt
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/bendir.wav'))
w = np.hamming(2001)
N = 2048
H = 200
t = -80
minSineDur = .02
maxnSines = 150
freqDevOffset = 10
freqDevSlope = 0.001
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
plt.figure(1, figsize=(9.5, 7))
maxplotfreq = 800.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sinusoidal tracks (bendir.wav)')
plt.tight_layout()
plt.savefig('sineModelAnal-bendir.png')
plt.show() | agpl-3.0 |
pekkaroi/bldc-drive | gui/ServoGui.py | 1 | 10906 | #!/usr/bin/python
from Tkinter import Tk, Label, BOTH, Y, LEFT, RIGHT, X, RAISED, SUNKEN, StringVar, END, DISABLED, NORMAL, TOP
from ttk import Frame, Button, Entry, OptionMenu
import serial
import serial.tools.list_ports
import threading
import Queue
import time
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
matplotlib.use('TkAgg')
drivesettings = [
"commutationMethod",
"inputMethod",
"encoder_PPR",
"encoder_poles",
"encoder_counts_per_step",
"pid_Kp",
"pid_Ki",
"pid_Kd",
"pid_FF1",
"pid_FF2",
"pid_deadband",
"usart_baud",
"max_current",
"max_error",
"invert_dirstepena",
"commutation_offset"
]
class SerialThread(threading.Thread):
def __init__(self, queue, writequeue, ser):
threading.Thread.__init__(self)
self.ser = ser
self.queue = queue
self.writequeue = writequeue
self.buffer = ""
def stop(self):
self.running = False
def run(self):
self.running = True
while self.running:
while self.ser.inWaiting():
ch = self.ser.read(1)
if ch == '\n' or ch == '\r':
self.queue.put(self.buffer)
self.buffer = ""
else:
self.buffer += ch
while self.writequeue.qsize():
try:
line = self.writequeue.get()
self.ser.write(line)
except Queue.Empty:
pass
class topFrame(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.setUI()
def setUI(self):
self.parent.title("ServoGui")
self.pack(fill=BOTH, expand=1)
self.comPort = StringVar(self)
self.laststrm = StringVar(self)
settingFrame = Frame(self, borderwidth=1, relief=RAISED)
settingFrame.pack(fill=Y, side=LEFT)
Label(settingFrame, width=50, text="Port Settings", bg="green", fg="black").pack(fill=X)
ports = self.getComPorts()
w = apply(OptionMenu, (settingFrame, self.comPort) + tuple(ports))
w.pack(fill=X)
BaudFrame = Frame(settingFrame)
BaudFrame.pack(fill=X)
Label(BaudFrame, text="Baud:").pack(side=LEFT)
self.baud_entry = Entry(BaudFrame,
width=15,
validate="focusout",
validatecommand=self.baudValidate)
self.baud_entry.pack(side=LEFT, expand=True)
self.baud_entry.insert(0, "115200")
Button(settingFrame, text="Open Port", command=self.openPort). pack(fill=X)
Button(settingFrame, text="Close Port", command=self.closePort). pack(fill=X)
StreamFrame = Frame(settingFrame)
StreamFrame.pack()
self.btnStartStream = Button(StreamFrame,
text="Start Stream",
command=self.startStream,
state=DISABLED)
self.btnStopStream = Button(StreamFrame,
text="Stop Stream",
command=self.stopStream,
state=DISABLED)
self.btnGetConfig = Button(StreamFrame,
text="Get Config",
command=self.getConfig,
state=DISABLED)
self.btnStartStream.pack(side=LEFT)
self.btnStopStream.pack(side=LEFT)
self.btnGetConfig.pack(side=LEFT)
self.queue = Queue.Queue()
self.writequeue = Queue.Queue()
Label(settingFrame, width=50, text="Drive Settings", bg="green", fg="black").pack(fill=X)
DriveSettingsFrame = Frame(settingFrame, relief=SUNKEN)
DriveSettingsFrame.pack(fill=X)
driveSettingsFrames = []
self.driveSettingsEntries = []
for drivesetting in drivesettings:
driveSettingsFrames.append(Frame(DriveSettingsFrame))
driveSettingsFrames[-1].pack(fill=X)
Label(driveSettingsFrames[-1], text=drivesetting).pack(side=LEFT)
self.driveSettingsEntries.append(Entry(driveSettingsFrames[-1]))
self.driveSettingsEntries[-1].pack(side=RIGHT)
Button(DriveSettingsFrame, text="Send to drive", command=self.sendConfig).pack(fill=X)
Button(DriveSettingsFrame, text="Save config in drive", command=self.saveConfig).pack(fill=X)
Label(settingFrame, width=50, textvariable=self.laststrm, bg="green", fg="black").pack(fill=X)
# MatplotLib stuff
f = Figure(figsize=(5, 4), dpi=100)
self.a = f.add_subplot(411)
self.a.set_title("Requested and actual position")
self.b = f.add_subplot(412)
self.b.set_title("Error")
self.c = f.add_subplot(413)
self.c.set_title("Current meas ADC value")
self.d = f.add_subplot(414)
self.d.set_title("HALL")
self.canvas = FigureCanvasTkAgg(f, master=self)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg(self.canvas, self)
toolbar.update()
self.canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
self.hall = []
self.encoder_count = []
self.pos_error = []
self.requested_position = []
self.requested_delta = []
self.adc_value = []
self.pid_output = []
self.a.set_autoscaley_on(True)
self.encoder_line, = self.a.plot([], [])
self.error_line, = self.d.plot([], [])
self.reqpos_line, = self.a.plot([], [])
self.ADC_line, = self.c.plot([], [])
self.hall_line, = self.d.plot([], [])
self.pwm_line, = self.b.plot([], [])
self.updateCanvas()
def baudValidate(self):
sVal = self.baud_entry.get()
try:
iVal = int(sVal)
except ValueError:
print "Illegal baud value"
self.baud_entry.delete(0, END)
self.baud_entry.insert(0, "115200")
return False
return True
def openPort(self):
try:
self.ser = serial.Serial(self.comPort.get(), int(self.baud_entry.get()), timeout=0)
except serial.SerialException:
print "unable to open"
return
self.btnStartStream['state'] = NORMAL
self.btnStopStream['state'] = NORMAL
self.btnGetConfig['state'] = NORMAL
self.thread = SerialThread(self.queue, self.writequeue, self.ser)
self.thread.daemon = True
self.thread.start()
self.process_serial()
def closePort(self):
self.thread.stop()
self.thread.join()
self.ser.closePort()
self.btnStartStream['state'] = DISABLED
self.btnStopStream['state'] = DISABLED
self.btnGetConfig['state'] = DISABLED
def process_serial(self):
while self.queue.qsize():
try:
line = self.queue.get()
self.handleLine(line)
except Queue.Empty:
pass
self.after(100, self.process_serial)
def startStream(self):
self.writequeue.put(b"STREAM START \r")
def stopStream(self):
self.writequeue.put(b"STREAM DIE \r")
def getConfig(self):
self.writequeue.put(b"GET\r")
def saveConfig(self):
self.writequeue.put(b"SAVE \r")
def sendConfig(self):
for setting in drivesettings:
dataToSend = b"SET " + setting + " " + self.driveSettingsEntries[drivesettings.index(setting)].get() + "\r"
print dataToSend
self.writequeue.put(dataToSend)
time.sleep(0.2)
def getComPorts(self):
ports = serial.tools.list_ports.comports()
portNames = []
for port in ports:
portNames.append(port[0])
return portNames
def handleLine(self, line):
line = line.replace(" ", "")
line = line.replace("/n", "")
line = line.replace("/r", "")
parts = line.split(":")
if len(parts) > 1:
if parts[0] == "STR":
self.handleStr(parts[1])
return
if parts[0] in drivesettings:
self.driveSettingsEntries[drivesettings.index(parts[0])].delete(0, END)
self.driveSettingsEntries[drivesettings.index(parts[0])].insert(0, parts[1])
def handleStr(self, strm):
# format of the stream line: STR:hall;count;requestedPosition;requestedDelta;error
parts = strm.split(";")
self.laststrm.set(strm)
self.hall.append(int(parts[0]))
if len(self.hall) > 5000:
self.hall.pop(0)
self.encoder_count.append(parts[1])
if len(self.encoder_count) > 5000:
self.encoder_count.pop(0)
self.requested_position.append(parts[2])
if len(self.requested_position) > 5000:
self.requested_position.pop(0)
self.requested_delta.append(parts[3])
if len(self.requested_delta) > 5000:
self.requested_delta.pop(0)
self.pos_error.append(parts[4])
if len(self.pos_error) > 5000:
self.pos_error.pop(0)
self.adc_value.append(parts[5])
if len(self.adc_value) > 5000:
self.adc_value.pop(0)
self.pid_output.append(parts[6])
if len(self.pid_output) > 5000:
self.pid_output.pop(0)
def updateCanvas(self):
self.encoder_line.set_xdata(range(len(self.encoder_count)))
self.encoder_line.set_ydata(self.encoder_count)
self.error_line.set_xdata(range(len(self.pos_error)))
self.error_line.set_ydata(self.pos_error)
self.reqpos_line.set_xdata(range(len(self.requested_position)))
self.reqpos_line.set_ydata(self.requested_position)
self.ADC_line.set_xdata(range(len(self.adc_value)))
self.ADC_line.set_ydata(self.adc_value)
self.hall_line.set_xdata(range(len(self.hall)))
self.hall_line.set_ydata(self.hall)
self.pwm_line.set_xdata(range(len(self.pid_output)))
self.pwm_line.set_ydata(self.pid_output)
self.a.relim()
self.a.autoscale_view()
self.b.relim()
self.b.autoscale_view()
self.c.relim()
self.c.autoscale_view()
self.d.relim()
self.d.autoscale_view()
self.canvas.draw()
self.after(100, self.updateCanvas)
def main():
root = Tk()
root.geometry("300x280+300+300")
app = topFrame(root)
root.mainloop()
if __name__ == '__main__':
main()
| gpl-2.0 |
ebilionis/variational-reformulation-of-inverse-problems | unittests/test_optimize_diffusion_leftboundarydata.py | 1 | 5954 | """
A first test for the ELBO on the diffusion problem.
The target is consisted of an and a Gaussian likelihood.
The approximating mixture has two components.
Author:
Panagiotis Tsilifis
Date:
6/16/2014
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import cPickle as pickle
from scipy.stats.distributions import norm
import math
from vuq import GammaPDF
from vuq import UniformND
from vuq import PDFCollection
from vuq import IsotropicGaussianLikelihood
from vuq import MultivariateNormal
from vuq import Joint
from vuq import MixturePDF
from vuq import MixtureOfMultivariateNormals
from vuq import FirstOrderEntropyApproximation
from vuq import ThirdOrderExpectationFunctional
from vuq import EvidenceLowerBound
from vuq import Optimizer
import sys
sys.path.insert(0,'demos/')
from diffusion import ContaminantTransportModelLeft
# Number of dimensions
num_dim = 3
# The number of components to use for the mixture
num_comp = 1
#-------- The (hypothetical) joint distribution ----------------
# The prior
collection = [UniformND(1), UniformND(1), GammaPDF(1,0.05,1)]
prior = PDFCollection(collection)
# The data
data = np.load('data_concentrations_left_corners.npy')
# The forward model
diff_model = ContaminantTransportModelLeft()
print 'Num_input'
print str(diff_model.num_input) + '\n'
# The isotropic Likelihood
IsotropicL = IsotropicGaussianLikelihood(data[:], diff_model)
# The joint
log_p = Joint(IsotropicL, prior)
print 'Target:'
print str(log_p)
# The approximating distribution
comp = [MultivariateNormal(np.random.gamma(10,1,num_dim))]#, MultivariateNormal(np.random.gamma(10,1,num_dim))]
log_q = MixtureOfMultivariateNormals(comp)
log_q.comp[0].mu = np.ones(log_q.comp[0].mu.shape) * 0.5
#log_q.comp[1].mu = np.ones(log_q.comp[0].mu.shape) * 0.5
log_q.comp[0].C = np.eye(num_dim) * 1e-4
#log_q.comp[1].C = np.eye(num_dim) * 1e-4
print 'Initial:'
print log_q
# Pick an entropy approximation
entropy = FirstOrderEntropyApproximation()
# Pick an approximation for the expectation of the joint
expectation_functional = ThirdOrderExpectationFunctional(log_p)
# Restrictions for mu
mu_bounds = (tuple((0., 1.) for i in xrange(log_q.num_dim - 1))
+ ((1e-6, None), ))
C_bounds = tuple((1e-32, None) for i in xrange(log_q.num_comp * log_q.num_dim))
# Build the ELBO
elbo = EvidenceLowerBound(entropy, expectation_functional)
print 'ELBO:'
print str(elbo)
# Optimize the elbo
optimizer = Optimizer(elbo)
results_file = os.path.join('demos', 'diffusion_left_new_2_cali.pcl')
if os.path.exists(results_file):
print 'I found:', results_file
print 'I am skipping the experiment.'
print 'Delete the file if you want to repeat it.'
with open(results_file, 'rb') as fd:
results = pickle.load(fd)
L = results['L']
log_q = results['log_q']
else:
L = optimizer.optimize(log_q, tol=1e-3, max_it=10, mu_bounds=mu_bounds,
mu_constraints=None, C_bounds=C_bounds)
result = {}
result['L'] = L
result['log_q'] = log_q
with open(os.path.join('demos', 'diffusion_left_new_2_cali.pcl'), 'wb') as fd:
pickle.dump(result, fd)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(L, linewidth=2)
ax.set_xlabel('Iteration', fontsize=16)
ax.set_ylabel('ELBO', fontsize=16)
plt.setp(ax.get_xticklabels(), fontsize=16)
plt.setp(ax.get_yticklabels(), fontsize=16)
png_file = os.path.join('figures', 'diffusion_left_new_2_elbo.png')
print 'Writing:', png_file
plt.savefig(png_file)
for i in xrange(log_q.num_dim):
mu = log_q.comp[0].mu[i]
s = math.sqrt(log_q.comp[0].C[i, i])
if i < 2:
name = 'x_{%s}' % (i+1)
else:
name = 'sigma^2'
print name, '=', mu, '+-', s
# Plot the calibration result
t = np.array([ 0.075, 0.15, 0.225, 0.3])
fig = plt.figure()
ax = fig.add_subplot(111)
f = diff_model._eval_u(log_q.comp[0].mu[:2])
Y = f.reshape(4, 2)
data = data.reshape(4, 2)
styles = ['b', 'r']
for i in xrange(2):
ax.plot(t, Y[:, i], styles[i], linewidth=2)
ax.plot(t, data[:,i], '+' + styles[i], markersize=10, markeredgewidth=2)
ax.set_xlabel('Time (t)', fontsize=16)
ax.set_ylabel('Concentration', fontsize=16)
plt.setp(ax.get_xticklabels(), fontsize=16)
plt.setp(ax.get_yticklabels(), fontsize=16)
png_file = os.path.join('figures', 'diffusion_left_new_2_cali_output.png')
print 'Writing:', png_file
plt.savefig(png_file)
# Do an uncertainty propagation test.
uq_file = os.path.join('demos', 'diffusion_left_new_2_cali_uq.pcl')
if os.path.exists(uq_file):
with open(uq_file, 'rb') as fd:
uq_results = pickle.load(fd)
Y_m = uq_results['Y_m']
Y_p05 = uq_results['Y_p05']
Y_p95 = uq_results['Y_p95']
else:
num_mcmc = 100
Y_s = []
for i in xrange(num_mcmc):
print 'taking sample', i + 1
omega = log_q.sample().flatten()
x = omega[:2]
sigma = omega[2]
y = diff_model._eval_u(x)
Y_s.append(y + sigma * np.random.randn(*y.shape))
Y_s = np.vstack(Y_s)
Y_m = np.percentile(Y_s, 50, axis=0).reshape(Y.shape)
Y_p05 = np.percentile(Y_s, 5, axis=0).reshape(Y.shape)
Y_p95 = np.percentile(Y_s, 95, axis=0).reshape(Y.shape)
uq_results = {}
uq_results['Y_m'] = Y_m
uq_results['Y_p05'] = Y_p05
uq_results['Y_p95'] = Y_p95
with open(uq_file, 'wb') as fd:
pickle.dump(uq_results, fd)
fig = plt.figure()
ax = fig.add_subplot(111)
for i in xrange(2):
ax.plot(t, Y_m[:, i], styles[i], linewidth=2)
ax.fill_between(t, Y_p05[:, i], Y_p95[:, i], color=styles[i], alpha=0.5)
ax.plot(t, data[:, i], '+' + styles[i], markersize=10,
markeredgewidth=2)
ax.set_xlabel('Time (t)', fontsize=16)
ax.set_ylabel('Concentration', fontsize=16)
plt.setp(ax.get_xticklabels(), fontsize=16)
plt.setp(ax.get_yticklabels(), fontsize=16)
png_file = os.path.join('figures', 'diffusion_left_new_2_cali_uq.png')
print 'Writing:', png_file
plt.savefig(png_file)
print str(log_q) + '\n' | gpl-2.0 |
vkscool/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colorbar.py | 69 | 27260 | '''
Colorbar toolkit with two classes and a function:
:class:`ColorbarBase`
the base class with full colorbar drawing functionality.
It can be used as-is to make a colorbar for a given colormap;
a mappable object (e.g., image) is not needed.
:class:`Colorbar`
the derived class for use with images or contour plots.
:func:`make_axes`
a function for resizing an axes and adding a second axes
suitable for a colorbar
The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes`
and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function
is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`.
'''
import numpy as np
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import matplotlib.cbook as cbook
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.collections as collections
import matplotlib.contour as contour
make_axes_kw_doc = '''
========== ====================================================
Property Description
========== ====================================================
*fraction* 0.15; fraction of original axes to use for colorbar
*pad* 0.05 if vertical, 0.15 if horizontal; fraction
of original axes between colorbar and new image axes
*shrink* 1.0; fraction by which to shrink the colorbar
*aspect* 20; ratio of long to short dimensions
========== ====================================================
'''
colormap_kw_doc = '''
=========== ====================================================
Property Description
=========== ====================================================
*extend* [ 'neither' | 'both' | 'min' | 'max' ]
If not 'neither', make pointed end(s) for out-of-
range values. These are set for a given colormap
using the colormap set_under and set_over methods.
*spacing* [ 'uniform' | 'proportional' ]
Uniform spacing gives each discrete color the same
space; proportional makes the space proportional to
the data interval.
*ticks* [ None | list of ticks | Locator object ]
If None, ticks are determined automatically from the
input.
*format* [ None | format string | Formatter object ]
If None, the
:class:`~matplotlib.ticker.ScalarFormatter` is used.
If a format string is given, e.g. '%.3f', that is
used. An alternative
:class:`~matplotlib.ticker.Formatter` object may be
given instead.
*drawedges* [ False | True ] If true, draw lines at color
boundaries.
=========== ====================================================
The following will probably be useful only in the context of
indexed colors (that is, when the mappable has norm=NoNorm()),
or other unusual circumstances.
============ ===================================================
Property Description
============ ===================================================
*boundaries* None or a sequence
*values* None or a sequence which must be of length 1 less
than the sequence of *boundaries*. For each region
delimited by adjacent entries in *boundaries*, the
color mapped to the corresponding value in values
will be used.
============ ===================================================
'''
colorbar_doc = '''
Add a colorbar to a plot.
Function signatures for the :mod:`~matplotlib.pyplot` interface; all
but the first are also method signatures for the
:meth:`~matplotlib.figure.Figure.colorbar` method::
colorbar(**kwargs)
colorbar(mappable, **kwargs)
colorbar(mappable, cax=cax, **kwargs)
colorbar(mappable, ax=ax, **kwargs)
arguments:
*mappable*
the :class:`~matplotlib.image.Image`,
:class:`~matplotlib.contour.ContourSet`, etc. to
which the colorbar applies; this argument is mandatory for the
:meth:`~matplotlib.figure.Figure.colorbar` method but optional for the
:func:`~matplotlib.pyplot.colorbar` function, which sets the
default to the current image.
keyword arguments:
*cax*
None | axes object into which the colorbar will be drawn
*ax*
None | parent axes object from which space for a new
colorbar axes will be stolen
Additional keyword arguments are of two kinds:
axes properties:
%s
colorbar properties:
%s
If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend*
kwarg is included automatically.
Note that the *shrink* kwarg provides a simple way to keep a vertical
colorbar, for example, from being taller than the axes of the mappable
to which the colorbar is attached; but it is a manual method requiring
some trial and error. If the colorbar is too tall (or a horizontal
colorbar is too wide) use a smaller value of *shrink*.
For more precise control, you can manually specify the positions of
the axes objects in which the mappable and the colorbar are drawn. In
this case, do not use any of the axes properties kwargs.
returns:
:class:`~matplotlib.colorbar.Colorbar` instance; see also its base class,
:class:`~matplotlib.colorbar.ColorbarBase`. Call the
:meth:`~matplotlib.colorbar.ColorbarBase.set_label` method
to label the colorbar.
''' % (make_axes_kw_doc, colormap_kw_doc)
class ColorbarBase(cm.ScalarMappable):
'''
Draw a colorbar in an existing axes.
This is a base class for the :class:`Colorbar` class, which is the
basis for the :func:`~matplotlib.pyplot.colorbar` method and pylab
function.
It is also useful by itself for showing a colormap. If the *cmap*
kwarg is given but *boundaries* and *values* are left as None,
then the colormap will be displayed on a 0-1 scale. To show the
under- and over-value colors, specify the *norm* as::
colors.Normalize(clip=False)
To show the colors versus index instead of on the 0-1 scale,
use::
norm=colors.NoNorm.
Useful attributes:
:attr:`ax`
the Axes instance in which the colorbar is drawn
:attr:`lines`
a LineCollection if lines were drawn, otherwise None
:attr:`dividers`
a LineCollection if *drawedges* is True, otherwise None
Useful public methods are :meth:`set_label` and :meth:`add_lines`.
'''
_slice_dict = {'neither': slice(0,1000000),
'both': slice(1,-1),
'min': slice(1,1000000),
'max': slice(0,-1)}
def __init__(self, ax, cmap=None,
norm=None,
alpha=1.0,
values=None,
boundaries=None,
orientation='vertical',
extend='neither',
spacing='uniform', # uniform or proportional
ticks=None,
format=None,
drawedges=False,
filled=True,
):
self.ax = ax
if cmap is None: cmap = cm.get_cmap()
if norm is None: norm = colors.Normalize()
self.alpha = alpha
cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
self.values = values
self.boundaries = boundaries
self.extend = extend
self._inside = self._slice_dict[extend]
self.spacing = spacing
self.orientation = orientation
self.drawedges = drawedges
self.filled = filled
self.solids = None
self.lines = None
self.dividers = None
self.set_label('')
if cbook.iterable(ticks):
self.locator = ticker.FixedLocator(ticks, nbins=len(ticks))
else:
self.locator = ticks # Handle default in _ticker()
if format is None:
if isinstance(self.norm, colors.LogNorm):
self.formatter = ticker.LogFormatter()
else:
self.formatter = ticker.ScalarFormatter()
elif cbook.is_string_like(format):
self.formatter = ticker.FormatStrFormatter(format)
else:
self.formatter = format # Assume it is a Formatter
# The rest is in a method so we can recalculate when clim changes.
self.draw_all()
def draw_all(self):
'''
Calculate any free parameters based on the current cmap and norm,
and do all the drawing.
'''
self._process_values()
self._find_range()
X, Y = self._mesh()
C = self._values[:,np.newaxis]
self._config_axes(X, Y)
if self.filled:
self._add_solids(X, Y, C)
self._set_label()
def _config_axes(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
self.outline = lines.Line2D(xy[:, 0], xy[:, 1], color=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'])
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
self.patch = patches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.set_xticks([])
ax.yaxis.set_label_position('right')
ax.yaxis.set_ticks_position('right')
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.set_yticks([])
ax.xaxis.set_label_position('bottom')
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
def _set_label(self):
if self.orientation == 'vertical':
self.ax.set_ylabel(self._label, **self._labelkw)
else:
self.ax.set_xlabel(self._label, **self._labelkw)
def set_label(self, label, **kw):
'''
Label the long axis of the colorbar
'''
self._label = label
self._labelkw = kw
self._set_label()
def _outline(self, X, Y):
'''
Return *x*, *y* arrays of colorbar bounding polygon,
taking orientation into account.
'''
N = X.shape[0]
ii = [0, 1, N-2, N-1, 2*N-1, 2*N-2, N+1, N, 0]
x = np.take(np.ravel(np.transpose(X)), ii)
y = np.take(np.ravel(np.transpose(Y)), ii)
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if self.orientation == 'horizontal':
return np.hstack((y, x))
return np.hstack((x, y))
def _edges(self, X, Y):
'''
Return the separator line segments; helper for _add_solids.
'''
N = X.shape[0]
# Using the non-array form of these line segments is much
# simpler than making them into arrays.
if self.orientation == 'vertical':
return [zip(X[i], Y[i]) for i in range(1, N-1)]
else:
return [zip(Y[i], X[i]) for i in range(1, N-1)]
def _add_solids(self, X, Y, C):
'''
Draw the colors using :meth:`~matplotlib.axes.Axes.pcolor`;
optionally add separators.
'''
## Change to pcolorfast after fixing bugs in some backends...
if self.orientation == 'vertical':
args = (X, Y, C)
else:
args = (np.transpose(Y), np.transpose(X), np.transpose(C))
kw = {'cmap':self.cmap, 'norm':self.norm,
'shading':'flat', 'alpha':self.alpha}
# Save, set, and restore hold state to keep pcolor from
# clearing the axes. Ordinarily this will not be needed,
# since the axes object should already have hold set.
_hold = self.ax.ishold()
self.ax.hold(True)
col = self.ax.pcolor(*args, **kw)
self.ax.hold(_hold)
#self.add_observer(col) # We should observe, not be observed...
self.solids = col
if self.drawedges:
self.dividers = collections.LineCollection(self._edges(X,Y),
colors=(mpl.rcParams['axes.edgecolor'],),
linewidths=(0.5*mpl.rcParams['axes.linewidth'],)
)
self.ax.add_collection(self.dividers)
def add_lines(self, levels, colors, linewidths):
'''
Draw lines on the colorbar.
'''
N = len(levels)
dummy, y = self._locate(levels)
if len(y) <> N:
raise ValueError("levels are outside colorbar range")
x = np.array([0.0, 1.0])
X, Y = np.meshgrid(x,y)
if self.orientation == 'vertical':
xy = [zip(X[i], Y[i]) for i in range(N)]
else:
xy = [zip(Y[i], X[i]) for i in range(N)]
col = collections.LineCollection(xy, linewidths=linewidths)
self.lines = col
col.set_color(colors)
self.ax.add_collection(col)
def _ticker(self):
'''
Return two sequences: ticks (colorbar data locations)
and ticklabels (strings).
'''
locator = self.locator
formatter = self.formatter
if locator is None:
if self.boundaries is None:
if isinstance(self.norm, colors.NoNorm):
nv = len(self._values)
base = 1 + int(nv/10)
locator = ticker.IndexLocator(base=base, offset=0)
elif isinstance(self.norm, colors.BoundaryNorm):
b = self.norm.boundaries
locator = ticker.FixedLocator(b, nbins=10)
elif isinstance(self.norm, colors.LogNorm):
locator = ticker.LogLocator()
else:
locator = ticker.MaxNLocator()
else:
b = self._boundaries[self._inside]
locator = ticker.FixedLocator(b, nbins=10)
if isinstance(self.norm, colors.NoNorm):
intv = self._values[0], self._values[-1]
else:
intv = self.vmin, self.vmax
locator.create_dummy_axis()
formatter.create_dummy_axis()
locator.set_view_interval(*intv)
locator.set_data_interval(*intv)
formatter.set_view_interval(*intv)
formatter.set_data_interval(*intv)
b = np.array(locator())
b, ticks = self._locate(b)
formatter.set_locs(b)
ticklabels = [formatter(t, i) for i, t in enumerate(b)]
offset_string = formatter.get_offset()
return ticks, ticklabels, offset_string
def _process_values(self, b=None):
'''
Set the :attr:`_boundaries` and :attr:`_values` attributes
based on the input boundaries and values. Input boundaries
can be *self.boundaries* or the argument *b*.
'''
if b is None:
b = self.boundaries
if b is not None:
self._boundaries = np.asarray(b, dtype=float)
if self.values is None:
self._values = 0.5*(self._boundaries[:-1]
+ self._boundaries[1:])
if isinstance(self.norm, colors.NoNorm):
self._values = (self._values + 0.00001).astype(np.int16)
return
self._values = np.array(self.values)
return
if self.values is not None:
self._values = np.array(self.values)
if self.boundaries is None:
b = np.zeros(len(self.values)+1, 'd')
b[1:-1] = 0.5*(self._values[:-1] - self._values[1:])
b[0] = 2.0*b[1] - b[2]
b[-1] = 2.0*b[-2] - b[-3]
self._boundaries = b
return
self._boundaries = np.array(self.boundaries)
return
# Neither boundaries nor values are specified;
# make reasonable ones based on cmap and norm.
if isinstance(self.norm, colors.NoNorm):
b = self._uniform_y(self.cmap.N+1) * self.cmap.N - 0.5
v = np.zeros((len(b)-1,), dtype=np.int16)
v[self._inside] = np.arange(self.cmap.N, dtype=np.int16)
if self.extend in ('both', 'min'):
v[0] = -1
if self.extend in ('both', 'max'):
v[-1] = self.cmap.N
self._boundaries = b
self._values = v
return
elif isinstance(self.norm, colors.BoundaryNorm):
b = list(self.norm.boundaries)
if self.extend in ('both', 'min'):
b = [b[0]-1] + b
if self.extend in ('both', 'max'):
b = b + [b[-1] + 1]
b = np.array(b)
v = np.zeros((len(b)-1,), dtype=float)
bi = self.norm.boundaries
v[self._inside] = 0.5*(bi[:-1] + bi[1:])
if self.extend in ('both', 'min'):
v[0] = b[0] - 1
if self.extend in ('both', 'max'):
v[-1] = b[-1] + 1
self._boundaries = b
self._values = v
return
else:
if not self.norm.scaled():
self.norm.vmin = 0
self.norm.vmax = 1
b = self.norm.inverse(self._uniform_y(self.cmap.N+1))
if self.extend in ('both', 'min'):
b[0] = b[0] - 1
if self.extend in ('both', 'max'):
b[-1] = b[-1] + 1
self._process_values(b)
def _find_range(self):
'''
Set :attr:`vmin` and :attr:`vmax` attributes to the first and
last boundary excluding extended end boundaries.
'''
b = self._boundaries[self._inside]
self.vmin = b[0]
self.vmax = b[-1]
def _central_N(self):
'''number of boundaries **before** extension of ends'''
nb = len(self._boundaries)
if self.extend == 'both':
nb -= 2
elif self.extend in ('min', 'max'):
nb -= 1
return nb
def _extended_N(self):
'''
Based on the colormap and extend variable, return the
number of boundaries.
'''
N = self.cmap.N + 1
if self.extend == 'both':
N += 2
elif self.extend in ('min', 'max'):
N += 1
return N
def _uniform_y(self, N):
'''
Return colorbar data coordinates for *N* uniformly
spaced boundaries, plus ends if required.
'''
if self.extend == 'neither':
y = np.linspace(0, 1, N)
else:
if self.extend == 'both':
y = np.zeros(N + 2, 'd')
y[0] = -0.05
y[-1] = 1.05
elif self.extend == 'min':
y = np.zeros(N + 1, 'd')
y[0] = -0.05
else:
y = np.zeros(N + 1, 'd')
y[-1] = 1.05
y[self._inside] = np.linspace(0, 1, N)
return y
def _proportional_y(self):
'''
Return colorbar data coordinates for the boundaries of
a proportional colorbar.
'''
if isinstance(self.norm, colors.BoundaryNorm):
b = self._boundaries[self._inside]
y = (self._boundaries - self._boundaries[0])
y = y / (self._boundaries[-1] - self._boundaries[0])
else:
y = self.norm(self._boundaries.copy())
if self.extend in ('both', 'min'):
y[0] = -0.05
if self.extend in ('both', 'max'):
y[-1] = 1.05
yi = y[self._inside]
norm = colors.Normalize(yi[0], yi[-1])
y[self._inside] = norm(yi)
return y
def _mesh(self):
'''
Return X,Y, the coordinate arrays for the colorbar pcolormesh.
These are suitable for a vertical colorbar; swapping and
transposition for a horizontal colorbar are done outside
this function.
'''
x = np.array([0.0, 1.0])
if self.spacing == 'uniform':
y = self._uniform_y(self._central_N())
else:
y = self._proportional_y()
self._y = y
X, Y = np.meshgrid(x,y)
if self.extend in ('min', 'both'):
X[0,:] = 0.5
if self.extend in ('max', 'both'):
X[-1,:] = 0.5
return X, Y
def _locate(self, x):
'''
Given a possible set of color data values, return the ones
within range, together with their corresponding colorbar
data coordinates.
'''
if isinstance(self.norm, (colors.NoNorm, colors.BoundaryNorm)):
b = self._boundaries
xn = x
xout = x
else:
# Do calculations using normalized coordinates so
# as to make the interpolation more accurate.
b = self.norm(self._boundaries, clip=False).filled()
# We do our own clipping so that we can allow a tiny
# bit of slop in the end point ticks to allow for
# floating point errors.
xn = self.norm(x, clip=False).filled()
in_cond = (xn > -0.001) & (xn < 1.001)
xn = np.compress(in_cond, xn)
xout = np.compress(in_cond, x)
# The rest is linear interpolation with clipping.
y = self._y
N = len(b)
ii = np.minimum(np.searchsorted(b, xn), N-1)
i0 = np.maximum(ii - 1, 0)
#db = b[ii] - b[i0]
db = np.take(b, ii) - np.take(b, i0)
db = np.where(i0==ii, 1.0, db)
#dy = y[ii] - y[i0]
dy = np.take(y, ii) - np.take(y, i0)
z = np.take(y, i0) + (xn-np.take(b,i0))*dy/db
return xout, z
def set_alpha(self, alpha):
self.alpha = alpha
class Colorbar(ColorbarBase):
def __init__(self, ax, mappable, **kw):
mappable.autoscale_None() # Ensure mappable.norm.vmin, vmax
# are set when colorbar is called,
# even if mappable.draw has not yet
# been called. This will not change
# vmin, vmax if they are already set.
self.mappable = mappable
kw['cmap'] = mappable.cmap
kw['norm'] = mappable.norm
kw['alpha'] = mappable.get_alpha()
if isinstance(mappable, contour.ContourSet):
CS = mappable
kw['boundaries'] = CS._levels
kw['values'] = CS.cvalues
kw['extend'] = CS.extend
#kw['ticks'] = CS._levels
kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10))
kw['filled'] = CS.filled
ColorbarBase.__init__(self, ax, **kw)
if not CS.filled:
self.add_lines(CS)
else:
ColorbarBase.__init__(self, ax, **kw)
def add_lines(self, CS):
'''
Add the lines from a non-filled
:class:`~matplotlib.contour.ContourSet` to the colorbar.
'''
if not isinstance(CS, contour.ContourSet) or CS.filled:
raise ValueError('add_lines is only for a ContourSet of lines')
tcolors = [c[0] for c in CS.tcolors]
tlinewidths = [t[0] for t in CS.tlinewidths]
# The following was an attempt to get the colorbar lines
# to follow subsequent changes in the contour lines,
# but more work is needed: specifically, a careful
# look at event sequences, and at how
# to make one object track another automatically.
#tcolors = [col.get_colors()[0] for col in CS.collections]
#tlinewidths = [col.get_linewidth()[0] for lw in CS.collections]
#print 'tlinewidths:', tlinewidths
ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths)
def update_bruteforce(self, mappable):
'''
Manually change any contour line colors. This is called
when the image or contour plot to which this colorbar belongs
is changed.
'''
# We are using an ugly brute-force method: clearing and
# redrawing the whole thing. The problem is that if any
# properties have been changed by methods other than the
# colorbar methods, those changes will be lost.
self.ax.cla()
self.draw_all()
#if self.vmin != self.norm.vmin or self.vmax != self.norm.vmax:
# self.ax.cla()
# self.draw_all()
if isinstance(self.mappable, contour.ContourSet):
CS = self.mappable
if not CS.filled:
self.add_lines(CS)
#if self.lines is not None:
# tcolors = [c[0] for c in CS.tcolors]
# self.lines.set_color(tcolors)
#Fixme? Recalculate boundaries, ticks if vmin, vmax have changed.
#Fixme: Some refactoring may be needed; we should not
# be recalculating everything if there was a simple alpha
# change.
def make_axes(parent, **kw):
orientation = kw.setdefault('orientation', 'vertical')
fraction = kw.pop('fraction', 0.15)
shrink = kw.pop('shrink', 1.0)
aspect = kw.pop('aspect', 20)
#pb = transforms.PBox(parent.get_position())
pb = parent.get_position(original=True).frozen()
if orientation == 'vertical':
pad = kw.pop('pad', 0.05)
x1 = 1.0-fraction
pb1, pbx, pbcb = pb.splitx(x1-pad, x1)
pbcb = pbcb.shrunk(1.0, shrink).anchored('C', pbcb)
anchor = (0.0, 0.5)
panchor = (1.0, 0.5)
else:
pad = kw.pop('pad', 0.15)
pbcb, pbx, pb1 = pb.splity(fraction, fraction+pad)
pbcb = pbcb.shrunk(shrink, 1.0).anchored('C', pbcb)
aspect = 1.0/aspect
anchor = (0.5, 1.0)
panchor = (0.5, 0.0)
parent.set_position(pb1)
parent.set_anchor(panchor)
fig = parent.get_figure()
cax = fig.add_axes(pbcb)
cax.set_aspect(aspect, anchor=anchor, adjustable='box')
return cax, kw
make_axes.__doc__ ='''
Resize and reposition a parent axes, and return a child
axes suitable for a colorbar::
cax, kw = make_axes(parent, **kw)
Keyword arguments may include the following (with defaults):
*orientation*
'vertical' or 'horizontal'
%s
All but the first of these are stripped from the input kw set.
Returns (cax, kw), the child axes and the reduced kw dictionary.
''' % make_axes_kw_doc
| gpl-3.0 |
vbalderdash/LMAsimulation | read_logs.py | 1 | 6370 | import pandas as pd
import numpy as np
import subprocess
import os
def hex2page(value):
new_value = -111+int(value, 16)*0.488
return new_value
def check_log(filename,form='wtreg'):
"""Checks to make sure the lines in the file have the correct length.
Files with less than 26 errored lines will be copied to
[filename]_original and [filename] will be rewritten without the
errored lines.
This function will not run on a Windows platform!
"""
i=0
j=0
f=open(filename)
if form=='wtreg':
len_arrays = np.array([47, 83])
if form=='old7':
len_arrays = np.array([42, 78])
if form=='newok':
len_arrays = np.array([47, 88])
for line_no, line in enumerate(f):
if np.all(len(line.strip()) != len_arrays):
print (line)
i+=1
f.close()
if i>26:
print ("%s may be in a different format!" %(filename))
print ("Moving to '_original' and ignoring!")
subprocess.call(['mv', filename, filename+'_original'])
if (i>0) & (i<26):
if os.path.isfile(filename+'_original'):
print ("Original copies already exists for %s!" %(filename))
else:
f = open(filename)
print ("%s has some bad lines," %(filename))
print ("original copy will be made and bad lines will be removed" )
subprocess.call(['cp', filename, filename+'_original'])
for line_no, line in enumerate(f):
if np.all(len(line.strip()) != len_arrays):
subprocess.call(['sed', '-i.bak', "%s d" %(line_no+1-j),
filename])
j+=1
subprocess.call(['rm', filename+'.bak'])
f.close()
def parsing(filename, T_set='False',form='wtreg'):
"""filename must be in path/TxYYMMDD format. Returns Pandas dataframe
The log file will be run through a checker to make sure that there are
no bad lines.
Thresholds will be converted from hex format to dBm
If T_set is set to 'True' only the thresholds, latitudes, longitudes and
altitudes will be returned with the station identifier as a suffix,
otherwise the entire log file will be parsed.
"""
check_log(filename,form)
if os.path.isfile(filename):
dateparse = lambda x: pd.datetime.strptime(x, '%m/%d/%y %H:%M:%S')
namelist = ['ID','Datetime','Version','Threshold','?',
'Triggers','GPS_Number','GPS_Mode','Temp',
'Lat','Lon','Alt']
if form=='wtreg':
widths_list = [1,18,4,5,12,7,3,3,3,9,10,8]
collist = [1,3,9,10,11]
if form=='old7':
widths_list = [1,18,4,5,7,7,3,3,3,9,10,8]
collist = [1,3,9,10,11]
if form=='newok':
widths_list = [1,18,4,5,12,7,3,3,4,4,9,10,8]
collist = [1,3,10,11,12]
namelist = ['ID','Datetime','Version','Threshold','???',
'Triggers','GPS_Number','GPS_Mode','Temp','Batt',
'Lat','Lon','Alt']
if T_set=='True':
df = pd.read_fwf(filename,
widths=widths_list,
names=namelist,
usecols=collist,
parse_dates = [0],
date_parser = dateparse,
na_values='\n')
station=filename[-7]
df['Threshold'] = df['Threshold'].apply(hex2page)
df=df.rename(columns = {'Threshold':'Threshold_%s'%station,
'Lat':'Lat_%s'%station,
'Lon':'Lon_%s'%station,
'Alt':'Alt_%s'%station})
else:
df = pd.read_fwf(filename,
widths=widths_list,
names=namelist,
parse_dates = [1],
date_parser = dateparse,
na_values='\n')
df['Threshold'] = df['Threshold'].apply(hex2page)
df=df.set_index('Datetime')
return df
def parsing_variable(filename, T_set='False'):
"""filename must be in path/TxYYMMDD format. Returns Pandas dataframe
The log file will NOT be run through a checker to make sure that there are
no bad lines as not all files will be the same widths for the quick check
used in the check_log function.
Thresholds will be converted from hex format to dBm
If T_set is set to 'True' only the thresholds, latitudes, longitudes and
altitudes will be returned with the station identifier as a suffix,
otherwise the entire log file will be parsed.
"""
if os.path.isfile(filename):
dateparse = lambda x: pd.datetime.strptime(x, '%m/%d/%y %H:%M:%S')
namelist = ['ID','Date','time','Version','Threshold','?','??'
'Triggers','GPS_Number','GPS_Mode','Temp',
'Lat','Lon','Alt']
collist = [1,2,4,12,13,14]
namelist = ['ID','Date','time','Version','Threshold','?','??',
'Triggers','GPS_Number','GPS_Mode','Temp','Batt',
'Lat','Lon','Alt']
if T_set=='True':
df = pd.read_fwf(filename,
names=namelist,
usecols=collist,
parse_dates = [[0,1]],
date_parser = dateparse,
na_values='\n')
station=filename[-7]
df['Threshold'] = df['Threshold'].apply(hex2page)
df=df.rename(columns = {'Threshold':'Threshold_%s'%station,
'Lat':'Lat_%s'%station,
'Lon':'Lon_%s'%station,
'Alt':'Alt_%s'%station})
else:
df = pd.read_fwf(filename,
names=namelist,
parse_dates = [[0,1]],
date_parser = dateparse,
na_values='\n')
df['Threshold'] = df['Threshold'].apply(hex2page)
df=df.set_index('Date_time')
return df | mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/matplotlib/delaunay/testfuncs.py | 21 | 21168 | """Some test functions for bivariate interpolation.
Most of these have been yoinked from ACM TOMS 792.
http://netlib.org/toms/792
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
from .triangulate import Triangulation
class TestData(dict):
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self.__dict__ = self
class TestDataSet(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
data = TestData(
franke100=TestDataSet(
x=np.array([0.0227035, 0.0539888, 0.0217008, 0.0175129, 0.0019029,
-0.0509685, 0.0395408, -0.0487061, 0.0315828, -0.0418785,
0.1324189, 0.1090271, 0.1254439, 0.093454, 0.0767578,
0.1451874, 0.0626494, 0.1452734, 0.0958668, 0.0695559,
0.2645602, 0.2391645, 0.208899, 0.2767329, 0.1714726,
0.2266781, 0.1909212, 0.1867647, 0.2304634, 0.2426219,
0.3663168, 0.3857662, 0.3832392, 0.3179087, 0.3466321,
0.3776591, 0.3873159, 0.3812917, 0.3795364, 0.2803515,
0.4149771, 0.4277679, 0.420001, 0.4663631, 0.4855658,
0.4092026, 0.4792578, 0.4812279, 0.3977761, 0.4027321,
0.5848691, 0.5730076, 0.6063893, 0.5013894, 0.5741311,
0.6106955, 0.5990105, 0.5380621, 0.6096967, 0.5026188,
0.6616928, 0.6427836, 0.6396475, 0.6703963, 0.7001181,
0.633359, 0.6908947, 0.6895638, 0.6718889, 0.6837675,
0.7736939, 0.7635332, 0.7410424, 0.8258981, 0.7306034,
0.8086609, 0.8214531, 0.729064, 0.8076643, 0.8170951,
0.8424572, 0.8684053, 0.8366923, 0.9418461, 0.8478122,
0.8599583, 0.91757, 0.8596328, 0.9279871, 0.8512805,
1.044982, 0.9670631, 0.9857884, 0.9676313, 1.0129299,
0.965704, 1.0019855, 1.0359297, 1.0414677, 0.9471506]),
y=np.array([-0.0310206, 0.1586742, 0.2576924, 0.3414014, 0.4943596,
0.5782854, 0.6993418, 0.7470194, 0.9107649, 0.996289,
0.050133, 0.0918555, 0.2592973, 0.3381592, 0.4171125,
0.5615563, 0.6552235, 0.7524066, 0.9146523, 0.9632421,
0.0292939, 0.0602303, 0.2668783, 0.3696044, 0.4801738,
0.5940595, 0.6878797, 0.8185576, 0.9046507, 0.9805412,
0.0396955, 0.0684484, 0.2389548, 0.3124129, 0.4902989,
0.5199303, 0.6445227, 0.8203789, 0.8938079, 0.9711719,
-0.0284618, 0.1560965, 0.2262471, 0.3175094, 0.3891417,
0.5084949, 0.6324247, 0.7511007, 0.8489712, 0.9978728,
-0.0271948, 0.127243, 0.2709269, 0.3477728, 0.4259422,
0.6084711, 0.6733781, 0.7235242, 0.9242411, 1.0308762,
0.0255959, 0.0707835, 0.2008336, 0.3259843, 0.4890704,
0.5096324, 0.669788, 0.7759569, 0.9366096, 1.0064516,
0.0285374, 0.1021403, 0.1936581, 0.3235775, 0.4714228,
0.6091595, 0.6685053, 0.8022808, 0.847679, 1.0512371,
0.0380499, 0.0902048, 0.2083092, 0.3318491, 0.4335632,
0.5910139, 0.6307383, 0.8144841, 0.904231, 0.969603,
-0.01209, 0.1334114, 0.2695844, 0.3795281, 0.4396054,
0.5044425, 0.6941519, 0.7459923, 0.8682081, 0.9801409])),
franke33=TestDataSet(
x=np.array([5.00000000e-02, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 1.00000000e-01, 1.00000000e-01,
1.50000000e-01, 2.00000000e-01, 2.50000000e-01,
3.00000000e-01, 3.50000000e-01, 5.00000000e-01,
5.00000000e-01, 5.50000000e-01, 6.00000000e-01,
6.00000000e-01, 6.00000000e-01, 6.50000000e-01,
7.00000000e-01, 7.00000000e-01, 7.00000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
8.00000000e-01, 8.00000000e-01, 8.50000000e-01,
9.00000000e-01, 9.00000000e-01, 9.50000000e-01,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00]),
y=np.array([4.50000000e-01, 5.00000000e-01, 1.00000000e+00,
0.00000000e+00, 1.50000000e-01, 7.50000000e-01,
3.00000000e-01, 1.00000000e-01, 2.00000000e-01,
3.50000000e-01, 8.50000000e-01, 0.00000000e+00,
1.00000000e+00, 9.50000000e-01, 2.50000000e-01,
6.50000000e-01, 8.50000000e-01, 7.00000000e-01,
2.00000000e-01, 6.50000000e-01, 9.00000000e-01,
1.00000000e-01, 3.50000000e-01, 8.50000000e-01,
4.00000000e-01, 6.50000000e-01, 2.50000000e-01,
3.50000000e-01, 8.00000000e-01, 9.00000000e-01,
0.00000000e+00, 5.00000000e-01, 1.00000000e+00])),
lawson25=TestDataSet(
x=np.array([0.1375, 0.9125, 0.7125, 0.225, -0.05, 0.475, 0.05,
0.45, 1.0875, 0.5375, -0.0375, 0.1875, 0.7125, 0.85,
0.7, 0.275, 0.45, 0.8125, 0.45, 1., 0.5,
0.1875, 0.5875, 1.05, 0.1]),
y=np.array([0.975, 0.9875, 0.7625, 0.8375, 0.4125, 0.6375,
-0.05, 1.0375, 0.55, 0.8, 0.75, 0.575,
0.55, 0.4375, 0.3125, 0.425, 0.2875, 0.1875,
-0.0375, 0.2625, 0.4625, 0.2625, 0.125, -0.06125,
0.1125])),
random100=TestDataSet(
x=np.array([0.0096326, 0.0216348, 0.029836, 0.0417447, 0.0470462,
0.0562965, 0.0646857, 0.0740377, 0.0873907, 0.0934832,
0.1032216, 0.1110176, 0.1181193, 0.1251704, 0.132733,
0.1439536, 0.1564861, 0.1651043, 0.1786039, 0.1886405,
0.2016706, 0.2099886, 0.2147003, 0.2204141, 0.2343715,
0.240966, 0.252774, 0.2570839, 0.2733365, 0.2853833,
0.2901755, 0.2964854, 0.3019725, 0.3125695, 0.3307163,
0.3378504, 0.3439061, 0.3529922, 0.3635507, 0.3766172,
0.3822429, 0.3869838, 0.3973137, 0.4170708, 0.4255588,
0.4299218, 0.4372839, 0.4705033, 0.4736655, 0.4879299,
0.494026, 0.5055324, 0.5162593, 0.5219219, 0.5348529,
0.5483213, 0.5569571, 0.5638611, 0.5784908, 0.586395,
0.5929148, 0.5987839, 0.6117561, 0.6252296, 0.6331381,
0.6399048, 0.6488972, 0.6558537, 0.6677405, 0.6814074,
0.6887812, 0.6940896, 0.7061687, 0.7160957, 0.7317445,
0.7370798, 0.746203, 0.7566957, 0.7699998, 0.7879347,
0.7944014, 0.8164468, 0.8192794, 0.8368405, 0.8500993,
0.8588255, 0.8646496, 0.8792329, 0.8837536, 0.8900077,
0.8969894, 0.9044917, 0.9083947, 0.9203972, 0.9347906,
0.9434519, 0.9490328, 0.9569571, 0.9772067, 0.9983493]),
y=np.array([0.3083158, 0.2450434, 0.8613847, 0.0977864, 0.3648355,
0.7156339, 0.5311312, 0.9755672, 0.1781117, 0.5452797,
0.1603881, 0.7837139, 0.9982015, 0.6910589, 0.104958,
0.8184662, 0.7086405, 0.4456593, 0.1178342, 0.3189021,
0.9668446, 0.7571834, 0.2016598, 0.3232444, 0.4368583,
0.8907869, 0.064726, 0.5692618, 0.2947027, 0.4332426,
0.3347464, 0.7436284, 0.1066265, 0.8845357, 0.515873,
0.9425637, 0.4799701, 0.1783069, 0.114676, 0.8225797,
0.2270688, 0.4073598, 0.887508, 0.7631616, 0.9972804,
0.4959884, 0.3410421, 0.249812, 0.6409007, 0.105869,
0.5411969, 0.0089792, 0.8784268, 0.5515874, 0.4038952,
0.1654023, 0.2965158, 0.3660356, 0.0366554, 0.950242,
0.2638101, 0.9277386, 0.5377694, 0.7374676, 0.4674627,
0.9186109, 0.0416884, 0.1291029, 0.6763676, 0.8444238,
0.3273328, 0.1893879, 0.0645923, 0.0180147, 0.8904992,
0.4160648, 0.4688995, 0.2174508, 0.5734231, 0.8853319,
0.8018436, 0.6388941, 0.8931002, 0.1000558, 0.2789506,
0.9082948, 0.3259159, 0.8318747, 0.0508513, 0.970845,
0.5120548, 0.2859716, 0.9581641, 0.6183429, 0.3779934,
0.4010423, 0.9478657, 0.7425486, 0.8883287, 0.549675])),
uniform9=TestDataSet(
x=np.array([1.25000000e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 1.25000000e-01, 1.25000000e-01,
1.25000000e-01, 1.25000000e-01, 1.25000000e-01,
1.25000000e-01, 1.25000000e-01, 1.25000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00]),
y=np.array([0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00])),
)
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x * 9
y = y * 9
x1 = x + 1.0
x2 = x - 2.0
x4 = x - 4.0
x7 = x - 7.0
y1 = x + 1.0
y2 = y - 2.0
y3 = y - 3.0
y7 = y - 7.0
f = (0.75 * np.exp(-(x2 * x2 + y2 * y2) / 4.0) +
0.75 * np.exp(-x1 * x1 / 49.0 - y1 / 10.0) +
0.5 * np.exp(-(x7 * x7 + y3 * y3) / 4.0) -
0.2 * np.exp(-x4 * x4 - y7 * y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0 * (y - x) + 1.0) / 9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4 * y)) / (6.0 + 6.0 * (3 * x - 1.0) ** 2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)) / 3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)) / 3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64 - 81 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle, 0, 100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0 * np.cos(10.0 * x) * np.sin(10.0 * y) + np.sin(10.0 * x * y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0 - 10.0 * x
y = 5.0 - 10.0 * y
g1 = np.exp(-x * x / 2)
g2 = np.exp(-y * y / 2)
f = g1 + 0.75 * g2 * (1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0 - 20.0 * x) / 3.0)
ey = np.exp((10.0 - 20.0 * y) / 3.0)
logitx = 1.0 / (1.0 + ex)
logity = 1.0 / (1.0 + ey)
f = (((20.0 / 3.0) ** 3 * ex * ey) ** 2 * (logitx * logity) ** 5 *
(ex - 2.0 * logitx) * (ey - 2.0 * logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80 * x - 40.0, 90 * y - 45.)
f = np.exp(-0.04 * circle) * np.cos(0.15 * circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss,
cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0),
nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange + self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
import matplotlib as mpl
from matplotlib import pylab as pl
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
else:
y, x = np.mgrid[
self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
pl.ioff()
pl.clf()
pl.hot() # Some like it hot
if plotter == 'imshow':
pl.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent,
origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[
self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
pl.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(
np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]),
colors=[(0, 0, 0, 0.2)])
ax = pl.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
pl.title('%s: %s' % (func.title, title))
else:
pl.title(title)
pl.show()
pl.ion()
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange + self.yrange)
def plotallfuncs(allfuncs=allfuncs):
from matplotlib import pylab as pl
pl.ioff()
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
print(func.title)
nnt.plot(func, interp=False, plotter='imshow')
pl.savefig('%s-ref-img.png' % func.__name__)
nnt.plot(func, interp=True, plotter='imshow')
pl.savefig('%s-nn-img.png' % func.__name__)
lpt.plot(func, interp=True, plotter='imshow')
pl.savefig('%s-lin-img.png' % func.__name__)
nnt.plot(func, interp=False, plotter='contour')
pl.savefig('%s-ref-con.png' % func.__name__)
nnt.plot(func, interp=True, plotter='contour')
pl.savefig('%s-nn-con.png' % func.__name__)
lpt.plot(func, interp=True, plotter='contour')
pl.savefig('%s-lin-con.png' % func.__name__)
pl.ion()
def plot_dt(tri, colors=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if colors is None:
colors = [(0, 0, 0, 0.2)]
lc = mpl.collections.LineCollection(
np.array([((tri.x[i], tri.y[i]), (tri.x[j], tri.y[j]))
for i, j in tri.edge_db]),
colors=colors)
ax = pl.gca()
ax.add_collection(lc)
pl.draw_if_interactive()
def plot_vo(tri, colors=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if colors is None:
colors = [(0, 1, 0, 0.2)]
lc = mpl.collections.LineCollection(np.array(
[(tri.circumcenters[i], tri.circumcenters[j])
for i in xrange(len(tri.circumcenters))
for j in tri.triangle_neighbors[i] if j != -1]),
colors=colors)
ax = pl.gca()
ax.add_collection(lc)
pl.draw_if_interactive()
def plot_cc(tri, edgecolor=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if edgecolor is None:
edgecolor = (0, 0, 1, 0.2)
dxy = (np.array([(tri.x[i], tri.y[i]) for i, j, k in tri.triangle_nodes])
- tri.circumcenters)
r = np.hypot(dxy[:, 0], dxy[:, 1])
ax = pl.gca()
for i in xrange(len(r)):
p = mpl.patches.Circle(tri.circumcenters[i], r[i],
resolution=100, edgecolor=edgecolor,
facecolor=(1, 1, 1, 0), linewidth=0.2)
ax.add_patch(p)
pl.draw_if_interactive()
def quality(func, mesh, interpolator='nn', n=33):
"""Compute a quality factor (the quantity r**2 from TOMS792).
interpolator must be in ('linear', 'nn').
"""
fz = func(mesh.x, mesh.y)
tri = Triangulation(mesh.x, mesh.y)
intp = getattr(tri,
interpolator + '_extrapolator')(fz, bbox=(0., 1., 0., 1.))
Y, X = np.mgrid[0:1:complex(0, n), 0:1:complex(0, n)]
Z = func(X, Y)
iz = intp[0:1:complex(0, n), 0:1:complex(0, n)]
#nans = np.isnan(iz)
#numgood = n*n - np.sum(np.array(nans.flat, np.int32))
numgood = n * n
SE = (Z - iz) ** 2
SSE = np.sum(SE.flat)
meanZ = np.sum(Z.flat) / numgood
SM = (Z - meanZ) ** 2
SSM = np.sum(SM.flat)
r2 = 1.0 - SSE / SSM
print(func.__name__, r2, SSE, SSM, numgood)
return r2
def allquality(interpolator='nn', allfuncs=allfuncs, data=data, n=33):
results = {}
kv = list(six.iteritems(data))
kv.sort()
for name, mesh in kv:
reslist = results.setdefault(name, [])
for func in allfuncs:
reslist.append(quality(func, mesh, interpolator, n))
return results
def funky():
x0 = np.array([0.25, 0.3, 0.5, 0.6, 0.6])
y0 = np.array([0.2, 0.35, 0.0, 0.25, 0.65])
tx = 0.46
ty = 0.23
t0 = Triangulation(x0, y0)
t1 = Triangulation(np.hstack((x0, [tx])), np.hstack((y0, [ty])))
return t0, t1
| mit |
gamahead/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backend_bases.py | 69 | 69740 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
"""
from __future__ import division
import os, warnings, time
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
from matplotlib import rcParams
class RendererBase:
"""An abstract base class to handle drawing/rendering operations.
The following methods *must* be implemented in the backend:
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_text`
* :meth:`get_text_width_height_descent`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
def open_group(self, s):
"""
Open a grouping element with label *s*. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
tpath = trans.transform_path(path)
for vertices, codes in tpath.iter_segments():
if len(vertices):
x,y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans + transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
"""
Draws a collection of paths, selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before
being applied.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
draw_path. Some backends may want to override this in order
to render each set of path data only once, and then reference
that path multiple times with the different offsets, colors,
styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
path, transform = path_id
transform = transforms.Affine2D(transform.get_matrix()).translate(xo, yo)
self.draw_path(gc, path, transform, rgbFace)
def draw_quad_mesh(self, master_transform, cliprect, clippath,
clippath_trans, meshWidth, meshHeight, coordinates,
offsets, offsetTrans, facecolors, antialiased,
showedges):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if showedges:
edgecolors = np.array([[0.0, 0.0, 0.0, 1.0]], np.float_)
linewidths = np.array([1.0], np.float_)
else:
edgecolors = facecolors
linewidths = np.array([0.0], np.float_)
return self.draw_path_collection(
master_transform, cliprect, clippath, clippath_trans,
paths, [], offsets, offsetTrans, facecolors, edgecolors,
linewidths, [], [antialiased], [None])
def _iter_collection_raw_paths(self, master_transform, paths, all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = all_transforms[i % Ntransforms]
yield path, transform + master_transform
def _iter_collection(self, path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc = self.new_gc()
gc.set_clip_rectangle(cliprect)
if clippath is not None:
clippath = transforms.TransformedPath(clippath, clippath_trans)
gc.set_clip_path(clippath)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
gc.set_foreground(edgecolors[i % Nedgecolors])
if Nlinewidths:
gc.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc.set_dashes(*linestyles[i % Nlinestyles])
if rgbFace is not None and len(rgbFace)==4:
gc.set_alpha(rgbFace[-1])
rgbFace = rgbFace[:3]
gc.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc, rgbFace
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the image instance into the current axes;
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
*bbox*
a :class:`matplotlib.transforms.Bbox` instance for clipping, or
None
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
overwrite this method for renderers that do not necessarily
want to rescale and composite raster images. (like SVG)
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
raise NotImplementedError
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text in display coords
*s*
a :class:`matplotlib.text.Text` instance
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
raise NotImplementedError
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
raise NotImplementedError
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, eg, postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
pass
def stop_rasterizing(self):
pass
class GraphicsContextBase:
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid' : (None, None),
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'miter'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0)
self._hatch = None
self._url = None
self._snap = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._url = gc._url
self._snap = gc._snap
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox` instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three floats from 0-1. color can be a
matlab format string, a html hex color string, or a rgb tuple
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
"""
self._alpha = alpha
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b: self._antialiased = 1
else: self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
assert path is None or isinstance(path, transforms.TransformedPath)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points. ``(None, None)`` specifies a solid line
"""
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGB=False):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
The :class:`GraphicsContextBase` converts colors to rgb
internally. If you know the color is rgb already, you can set
``isRGB=True`` to avoid the performace hit of the conversion
"""
if isRGB:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
self._rgb = (frac, frac, frac)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = w
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
try:
offset, dashes = self.dashd[style]
except:
raise ValueError('Unrecognized linestyle: %s' % style)
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
class Event:
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas,guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class LocationEvent(Event):
"""
A event that has a screen location
The following additional attributes are defined and shown with
their default values
In addition to the :class:`Event` attributes, the following event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y,guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas,guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
axes_list = [a for a in self.canvas.figure.get_axes() if a.in_axes(self)]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axCmp = lambda _x,_y: cmp(_x.zorder, _y.zorder)
axes_list.sort(axCmp)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
xdata, ydata = self.inaxes.transData.inverted().transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes!=self.inaxes:
# process axes enter/leave events
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event', 'button_release_event', 'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used for scroll events)
*key*
the key pressed: None, chr(range(255), 'shift', 'win', or 'control'
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- eg a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print 'on pick line:', zip(xdata[ind], ydata[ind])
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist, guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key pressed: None, chr(range(255), shift, win, or control
This interface may change slightly when better support for
modifier keys is included.
Example usage::
def on_key(event):
print 'you pressed', event.key, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase:
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event'
]
def __init__(self, figure):
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry(self.events)
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event',self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event',self.pick)
if False:
## highlight the artists that are hit
self.mpl_connect('motion_notify_event',self.onHilite)
## delete the artists that are clicked on
#self.mpl_disconnect(self.button_pick_id)
#self.mpl_connect('button_press_event',self.onRemove)
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
def sort_artists(artists):
# This depends on stable sort and artists returned
# from get_children in z order.
L = [ (h.zorder, h) for h in artists ]
L.sort()
return [ h for zorder, h in L ]
# Find the top artist under the cursor
under = sort_artists(self.figure.hitlist(ev))
h = None
if under: h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
print "Removing",h
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
if not hasattr(self,'_active'): self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
print "within:"," ".join([str(x) for x in under])
#print "entering:",[str(a) for a in enter]
#print "leaving:",[str(a) for a in leave]
# On leave restore the captured colour
for a in leave:
if hasattr(a,'get_color'):
a.set_color(self._active[a])
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a,'get_color'):
self._active[a] = a.get_color()
elif hasattr(a,'get_edgecolor'):
self._active[a] = (a.get_edgecolor(),a.get_facecolor())
else: self._active[a] = None
for a in enter:
if hasattr(a,'get_color'):
a.set_color('red')
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else: self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist, **kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
def enter_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
'call when GUI is idle'
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
filetypes = {
'emf': 'Enhanced Metafile',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'png': 'Portable Network Graphics',
'ps' : 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
# All of these print_* functions do a lazy import because
# a) otherwise we'd have cyclical imports, since all of these
# classes inherit from FigureCanvasBase
# b) so we don't import a bunch of stuff the user may never use
def print_emf(self, *args, **kwargs):
from backends.backend_emf import FigureCanvasEMF # lazy import
emf = self.switch_backends(FigureCanvasEMF)
return emf.print_emf(*args, **kwargs)
def print_eps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_eps(*args, **kwargs)
def print_pdf(self, *args, **kwargs):
from backends.backend_pdf import FigureCanvasPdf # lazy import
pdf = self.switch_backends(FigureCanvasPdf)
return pdf.print_pdf(*args, **kwargs)
def print_png(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(*args, **kwargs)
def print_ps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_ps(*args, **kwargs)
def print_raw(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_raw(*args, **kwargs)
print_bmp = print_rgb = print_raw
def print_svg(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svg(*args, **kwargs)
def print_svgz(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svgz(*args, **kwargs)
def get_supported_filetypes(self):
return self.filetypes
def get_supported_filetypes_grouped(self):
groupings = {}
for ext, name in self.filetypes.items():
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation* '
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
"""
if format is None:
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
method_name = 'print_%s' % format
if (format not in self.filetypes or
not hasattr(self, method_name)):
formats = self.filetypes.keys()
formats.sort()
raise ValueError(
'Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
if dpi is None:
dpi = rcParams['savefig.dpi']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
try:
result = getattr(self, method_name)(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
**kwargs)
finally:
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
#self.figure.canvas.draw() ## seems superfluous
return result
def get_default_filetype(self):
raise NotImplementedError
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def switch_backends(self, FigureCanvasClass):
"""
instantiate an instance of FigureCanvasClass
This is used for backend switching, eg, to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (eg, setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self,timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self,timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str,DeprecationWarning)
if timeout <= 0: timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter*timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
class FigureManagerBase:
"""
Helper class for matlab mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure nuamber
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.canvas.mpl_connect('key_press_event', self.key_press)
def destroy(self):
pass
def full_screen_toggle (self):
pass
def resize(self, w, h):
'For gui backends: resize window in pixels'
pass
def key_press(self, event):
# these bindings happen whether you are over an axes or not
#if event.key == 'q':
# self.destroy() # how cruel to have to destroy oneself!
# return
if event.key == 'f':
self.full_screen_toggle()
# *h*ome or *r*eset mnemonic
elif event.key == 'h' or event.key == 'r' or event.key == "home":
self.canvas.toolbar.home()
# c and v to enable left handed quick navigation
elif event.key == 'left' or event.key == 'c' or event.key == 'backspace':
self.canvas.toolbar.back()
elif event.key == 'right' or event.key == 'v':
self.canvas.toolbar.forward()
# *p*an mnemonic
elif event.key == 'p':
self.canvas.toolbar.pan()
# z*o*om mnemonic
elif event.key == 'o':
self.canvas.toolbar.zoom()
elif event.key == 's':
self.canvas.toolbar.save_figure(self.canvas.toolbar)
if event.inaxes is None:
return
# the mouse has to be over an axes to trigger these
if event.key == 'g':
event.inaxes.grid()
self.canvas.draw()
elif event.key == 'l':
ax = event.inaxes
scale = ax.get_yscale()
if scale=='log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale=='linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
elif event.key is not None and (event.key.isdigit() and event.key!='0') or event.key=='a':
# 'a' enables all axes
if event.key!='a':
n=int(event.key)-1
for i, a in enumerate(self.canvas.figure.get_axes()):
if event.x is not None and event.y is not None and a.in_axes(event):
if event.key=='a':
a.set_navigate(True)
else:
a.set_navigate(i==n)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
pass
# cursors
class Cursors: #namespace
HAND, POINTER, SELECT_REGION, MOVE = range(4)
cursors = Cursors()
class NavigationToolbar2:
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
self._button_pressed = None # determined by the button pressed at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
'display a message on toolbar or in status bar'
pass
def back(self, *args):
'move back up the view lim stack'
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
'draw a rectangle rubberband to indicate zoom limits'
pass
def forward(self, *args):
'move forward in the view lim stack'
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
'restore the original view'
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def mouse_move(self, event):
#print 'mouse_move', event.button
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active=='ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = self._xypress[0]
self.draw_rubberband(event, x, y, lastx, lasty)
elif (self._active=='PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
if event.inaxes and event.inaxes.get_navigate():
try: s = event.inaxes.format_coord(event.xdata, event.ydata)
except ValueError: pass
except OverflowError: pass
else:
if len(self.mode):
self.set_message('%s : %s' % (self.mode, s))
else:
self.set_message(s)
else: self.set_message(self.mode)
def pan(self,*args):
'Activate the pan/zoom tool. pan with left button, zoom with right'
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
'this will be called whenver a mouse button is pressed'
pass
def press_pan(self, event):
'the press mouse button in pan/zoom mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) and a.get_navigate():
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.drag_pan)
self.press(event)
def press_zoom(self, event):
'the press mouse button in zoom to rect mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) \
and a.get_navigate() and a.can_zoom():
self._xypress.append(( x, y, a, i, a.viewLim.frozen(), a.transData.frozen()))
self.press(event)
def push_current(self):
'push the current view limits and position onto the stack'
lims = []; pos = []
for a in self.canvas.figure.get_axes():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lims.append( (xmin, xmax, ymin, ymax) )
# Store both the original and modified positions
pos.append( (
a.get_position(True).frozen(),
a.get_position().frozen() ) )
self._views.push(lims)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
'this will be called whenever mouse button is released'
pass
def release_pan(self, event):
'the release mouse button callback in pan/zoom mode'
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress: return
self._xypress = []
self._button_pressed=None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
'the drag callback in pan/zoom mode'
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def release_zoom(self, event):
'the release mouse button callback in zoom to rect mode'
if not self._xypress: return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x-lastx)<5 or abs(y-lasty)<5:
self._xypress = None
self.release(event)
self.draw()
return
x0, y0, x1, y1 = lim.extents
# zoom to rect
inverse = a.transData.inverted()
lastx, lasty = inverse.transform_point( (lastx, lasty) )
x, y = inverse.transform_point( (x, y) )
Xmin,Xmax=a.get_xlim()
Ymin,Ymax=a.get_ylim()
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a,la): twinx=True
if a.get_shared_y_axes().joined(a,la): twiny=True
last_a.append(a)
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x<lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 < Xmin: x0=Xmin
if x1 > Xmax: x1=Xmax
else:
if x>lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 > Xmin: x0=Xmin
if x1 < Xmax: x1=Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y<lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 < Ymin: y0=Ymin
if y1 > Ymax: y1=Ymax
else:
if y>lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 > Ymin: y0=Ymin
if y1 < Ymax: y1=Ymax
if self._button_pressed == 1:
a.set_xlim((x0, x1))
a.set_ylim((y0, y1))
elif self._button_pressed == 3:
if a.get_xscale()=='log':
alpha=np.log(Xmax/Xmin)/np.log(x1/x0)
rx1=pow(Xmin/x0,alpha)*Xmin
rx2=pow(Xmax/x0,alpha)*Xmin
else:
alpha=(Xmax-Xmin)/(x1-x0)
rx1=alpha*(Xmin-x0)+Xmin
rx2=alpha*(Xmax-x0)+Xmin
if a.get_yscale()=='log':
alpha=np.log(Ymax/Ymin)/np.log(y1/y0)
ry1=pow(Ymin/y0,alpha)*Ymin
ry2=pow(Ymax/y0,alpha)*Ymin
else:
alpha=(Ymax-Ymin)/(y1-y0)
ry1=alpha*(Ymin-y0)+Ymin
ry2=alpha*(Ymax-y0)+Ymin
a.set_xlim((rx1, rx2))
a.set_ylim((ry1, ry2))
self.draw()
self._xypress = None
self._button_pressed = None
self.push_current()
self.release(event)
def draw(self):
'redraw the canvases, update the locators'
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw()
def _update_view(self):
'''update the viewlim and position from the view and
position stack for each axes
'''
lims = self._views()
if lims is None: return
pos = self._positions()
if pos is None: return
for i, a in enumerate(self.canvas.figure.get_axes()):
xmin, xmax, ymin, ymax = lims[i]
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
# Restore both the original and modified positions
a.set_position( pos[i][0], 'original' )
a.set_position( pos[i][1], 'active' )
self.draw()
def save_figure(self, *args):
'save the current figure'
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
'reset the axes stack'
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
'activate zoom to rect mode'
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress=self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease=self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event', self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event', self.release_zoom)
self.mode = 'Zoom to rect mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
'enable or disable back/forward button'
pass
| gpl-3.0 |
corochann/chainer-hands-on-tutorial | src/05_ptb_rnn/ptb/predict_ptb.py | 2 | 3356 | """Inference/predict code for simple_sequence dataset
model must be trained before inference,
train_simple_sequence.py must be executed beforehand.
"""
from __future__ import print_function
import argparse
import os
import sys
import matplotlib
import numpy as np
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training, iterators, serializers, optimizers, Variable, cuda
from chainer.training import extensions
sys.path.append(os.pardir)
from RNN import RNN
from RNN2 import RNN2
from RNN3 import RNN3
from RNNForLM import RNNForLM
def main():
archs = {
'rnn': RNN,
'rnn2': RNN2,
'rnn3': RNN3,
'lstm': RNNForLM
}
parser = argparse.ArgumentParser(description='simple_sequence RNN predict code')
parser.add_argument('--arch', '-a', choices=archs.keys(),
default='rnn', help='Net architecture')
#parser.add_argument('--batchsize', '-b', type=int, default=64,
# help='Number of images in each mini-batch')
parser.add_argument('--unit', '-u', type=int, default=100,
help='Number of LSTM units in each layer')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--primeindex', '-p', type=int, default=1,
help='base index data, used for sequence generation')
parser.add_argument('--length', '-l', type=int, default=100,
help='length of the generated sequence')
parser.add_argument('--modelpath', '-m', default='',
help='Model path to be loaded')
args = parser.parse_args()
print('GPU: {}'.format(args.gpu))
#print('# Minibatch-size: {}'.format(args.batchsize))
print('')
train, val, test = chainer.datasets.get_ptb_words()
n_vocab = max(train) + 1 # train is just an array of integers
print('#vocab =', n_vocab)
print('')
# load vocabulary
ptb_word_id_dict = chainer.datasets.get_ptb_words_vocabulary()
ptb_id_word_dict = dict((v, k) for k, v in ptb_word_id_dict.items())
# Model Setup
model = archs[args.arch](n_vocab=n_vocab, n_units=args.unit)
classifier_model = L.Classifier(model)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current
classifier_model.to_gpu() # Copy the model to the GPU
xp = np if args.gpu < 0 else cuda.cupy
if args.modelpath:
serializers.load_npz(args.modelpath, model)
else:
serializers.load_npz('result/{}_ptb.model'.format(args.arch), model)
# Dataset preparation
prev_index = args.primeindex
# Predict
predicted_sequence = [prev_index]
for i in range(args.length):
prev = chainer.Variable(xp.array([prev_index], dtype=xp.int32))
current = model(prev)
current_index = np.argmax(cuda.to_cpu(current.data))
predicted_sequence.append(current_index)
prev_index = current_index
predicted_text_list = [ptb_id_word_dict[i] for i in predicted_sequence]
print('Predicted sequence: ', predicted_sequence)
print('Predicted text: ', ' '.join(predicted_text_list))
if __name__ == '__main__':
main()
| mit |
dsm054/pandas | pandas/io/formats/csvs.py | 3 | 11465 | # -*- coding: utf-8 -*-
"""
Module for formatting output data into CSV files.
"""
from __future__ import print_function
import csv as csvlib
import os
import warnings
from zipfile import ZipFile
import numpy as np
from pandas._libs import writers as libwriters
from pandas.compat import StringIO, range, zip
from pandas.core.dtypes.generic import (
ABCDatetimeIndex, ABCIndexClass, ABCMultiIndex, ABCPeriodIndex)
from pandas.core.dtypes.missing import notna
from pandas import compat
from pandas.io.common import (
UnicodeWriter, _get_handle, _infer_compression, get_filepath_or_buffer)
class CSVFormatter(object):
def __init__(self, obj, path_or_buf=None, sep=",", na_rep='',
float_format=None, cols=None, header=True, index=True,
index_label=None, mode='w', nanRep=None, encoding=None,
compression='infer', quoting=None, line_terminator='\n',
chunksize=None, tupleize_cols=False, quotechar='"',
date_format=None, doublequote=True, escapechar=None,
decimal='.'):
self.obj = obj
if path_or_buf is None:
path_or_buf = StringIO()
self.path_or_buf, _, _, _ = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression, mode=mode
)
self.sep = sep
self.na_rep = na_rep
self.float_format = float_format
self.decimal = decimal
self.header = header
self.index = index
self.index_label = index_label
self.mode = mode
if encoding is None:
encoding = 'ascii' if compat.PY2 else 'utf-8'
self.encoding = encoding
self.compression = _infer_compression(self.path_or_buf, compression)
if quoting is None:
quoting = csvlib.QUOTE_MINIMAL
self.quoting = quoting
if quoting == csvlib.QUOTE_NONE:
# prevents crash in _csv
quotechar = None
self.quotechar = quotechar
self.doublequote = doublequote
self.escapechar = escapechar
self.line_terminator = line_terminator or os.linesep
self.date_format = date_format
self.tupleize_cols = tupleize_cols
self.has_mi_columns = (isinstance(obj.columns, ABCMultiIndex) and
not self.tupleize_cols)
# validate mi options
if self.has_mi_columns:
if cols is not None:
raise TypeError("cannot specify cols with a MultiIndex on the "
"columns")
if cols is not None:
if isinstance(cols, ABCIndexClass):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = list(cols)
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
# and make sure sure cols is just a list of labels
cols = self.obj.columns
if isinstance(cols, ABCIndexClass):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = list(cols)
# save it
self.cols = cols
# preallocate data 2d list
self.blocks = self.obj._data.blocks
ncols = sum(b.shape[0] for b in self.blocks)
self.data = [None] * ncols
if chunksize is None:
chunksize = (100000 // (len(self.cols) or 1)) or 1
self.chunksize = int(chunksize)
self.data_index = obj.index
if (isinstance(self.data_index, (ABCDatetimeIndex, ABCPeriodIndex)) and
date_format is not None):
from pandas import Index
self.data_index = Index([x.strftime(date_format) if notna(x) else
'' for x in self.data_index])
self.nlevels = getattr(self.data_index, 'nlevels', 1)
if not index:
self.nlevels = 0
def save(self):
"""
Create the writer & save
"""
# GH21227 internal compression is not used when file-like passed.
if self.compression and hasattr(self.path_or_buf, 'write'):
msg = ("compression has no effect when passing file-like "
"object as input.")
warnings.warn(msg, RuntimeWarning, stacklevel=2)
# when zip compression is called.
is_zip = isinstance(self.path_or_buf, ZipFile) or (
not hasattr(self.path_or_buf, 'write')
and self.compression == 'zip')
if is_zip:
# zipfile doesn't support writing string to archive. uses string
# buffer to receive csv writing and dump into zip compression
# file handle. GH21241, GH21118
f = StringIO()
close = False
elif hasattr(self.path_or_buf, 'write'):
f = self.path_or_buf
close = False
else:
f, handles = _get_handle(self.path_or_buf, self.mode,
encoding=self.encoding,
compression=self.compression)
close = True
try:
writer_kwargs = dict(lineterminator=self.line_terminator,
delimiter=self.sep, quoting=self.quoting,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar)
if self.encoding == 'ascii':
self.writer = csvlib.writer(f, **writer_kwargs)
else:
writer_kwargs['encoding'] = self.encoding
self.writer = UnicodeWriter(f, **writer_kwargs)
self._save()
finally:
if is_zip:
# GH17778 handles zip compression separately.
buf = f.getvalue()
if hasattr(self.path_or_buf, 'write'):
self.path_or_buf.write(buf)
else:
f, handles = _get_handle(self.path_or_buf, self.mode,
encoding=self.encoding,
compression=self.compression)
f.write(buf)
close = True
if close:
f.close()
for _fh in handles:
_fh.close()
def _save_header(self):
writer = self.writer
obj = self.obj
index_label = self.index_label
cols = self.cols
has_mi_columns = self.has_mi_columns
header = self.header
encoded_labels = []
has_aliases = isinstance(header, (tuple, list, np.ndarray,
ABCIndexClass))
if not (has_aliases or self.header):
return
if has_aliases:
if len(header) != len(cols):
raise ValueError(('Writing {ncols} cols but got {nalias} '
'aliases'.format(ncols=len(cols),
nalias=len(header))))
else:
write_cols = header
else:
write_cols = cols
if self.index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(obj.index, ABCMultiIndex):
index_label = []
for i, name in enumerate(obj.index.names):
if name is None:
name = ''
index_label.append(name)
else:
index_label = obj.index.name
if index_label is None:
index_label = ['']
else:
index_label = [index_label]
elif not isinstance(index_label,
(list, tuple, np.ndarray, ABCIndexClass)):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if not has_mi_columns or has_aliases:
encoded_labels += list(write_cols)
writer.writerow(encoded_labels)
else:
# write out the mi
columns = obj.columns
# write out the names for each level, then ALL of the values for
# each level
for i in range(columns.nlevels):
# we need at least 1 index column to write our col names
col_line = []
if self.index:
# name is the first column
col_line.append(columns.names[i])
if isinstance(index_label, list) and len(index_label) > 1:
col_line.extend([''] * (len(index_label) - 1))
col_line.extend(columns._get_level_values(i))
writer.writerow(col_line)
# Write out the index line if it's not empty.
# Otherwise, we will print out an extraneous
# blank line between the mi and the data rows.
if encoded_labels and set(encoded_labels) != {''}:
encoded_labels.extend([''] * len(columns))
writer.writerow(encoded_labels)
def _save(self):
self._save_header()
nrows = len(self.data_index)
# write in chunksize bites
chunksize = self.chunksize
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self._save_chunk(start_i, end_i)
def _save_chunk(self, start_i, end_i):
data_index = self.data_index
# create the data for a chunk
slicer = slice(start_i, end_i)
for i in range(len(self.blocks)):
b = self.blocks[i]
d = b.to_native_types(slicer=slicer, na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
for col_loc, col in zip(b.mgr_locs, d):
# self.data is a preallocated list
self.data[col_loc] = col
ix = data_index.to_native_types(slicer=slicer, na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
libwriters.write_csv_rows(self.data, ix, self.nlevels,
self.cols, self.writer)
| bsd-3-clause |
andyh616/mne-python | mne/viz/tests/test_topo.py | 5 | 4767 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
from collections import namedtuple
import numpy as np
from numpy.testing import assert_raises
from mne import io, read_events, Epochs
from mne import pick_channels_evoked
from mne.channels import read_layout
from mne.time_frequency.tfr import AverageTFR
from mne.utils import run_tests_if_main
from mne.viz import (plot_topo, plot_topo_image_epochs, _get_presser,
mne_analyze_colormap, plot_evoked_topo)
from mne.viz.topo import _plot_update_evoked_topo
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.2, 0.2
layout = read_layout('Vectorview-all')
def _get_raw():
return io.Raw(raw_fname, preload=False)
def _get_events():
return read_events(event_name)
def _get_picks(raw):
return [0, 1, 2, 6, 7, 8, 340, 341, 342] # take a only few channels
def _get_epochs():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
return epochs
def _get_epochs_delayed_ssp():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
proj='delayed', reject=reject)
return epochs_delayed_ssp
def test_plot_topo():
"""Test plotting of ERP topography
"""
import matplotlib.pyplot as plt
# Show topography
evoked = _get_epochs().average()
plot_evoked_topo(evoked) # should auto-find layout
warnings.simplefilter('always', UserWarning)
picked_evoked = evoked.pick_channels(evoked.ch_names[:3], copy=True)
picked_evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
picked_evoked_eeg.pick_channels(picked_evoked_eeg.ch_names[:3])
# test scaling
with warnings.catch_warnings(record=True):
for ylim in [dict(mag=[-600, 600]), None]:
plot_topo([picked_evoked] * 2, layout, ylim=ylim)
for evo in [evoked, [evoked, picked_evoked]]:
assert_raises(ValueError, plot_topo, evo, layout, color=['y', 'b'])
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
ch_names = evoked_delayed_ssp.ch_names[:3] # make it faster
picked_evoked_delayed_ssp = pick_channels_evoked(evoked_delayed_ssp,
ch_names)
fig = plot_topo(picked_evoked_delayed_ssp, layout, proj='interactive')
func = _get_presser(fig)
event = namedtuple('Event', 'inaxes')
func(event(inaxes=fig.axes[0]))
params = dict(evokeds=[picked_evoked_delayed_ssp],
times=picked_evoked_delayed_ssp.times,
fig=fig, projs=picked_evoked_delayed_ssp.info['projs'])
bools = [True] * len(params['projs'])
_plot_update_evoked_topo(params, bools)
# should auto-generate layout
plot_evoked_topo(picked_evoked_eeg.copy(),
fig_background=np.zeros((4, 3, 3)), proj=True)
plt.close('all')
def test_plot_topo_image_epochs():
"""Test plotting of epochs image topography
"""
import matplotlib.pyplot as plt
title = 'ERF images - MNE sample data'
epochs = _get_epochs()
cmap = mne_analyze_colormap(format='matplotlib')
plot_topo_image_epochs(epochs, sigma=0.5, vmin=-200, vmax=200,
colorbar=True, title=title, cmap=cmap)
plt.close('all')
def test_plot_tfr_topo():
"""Test plotting of TFR data
"""
epochs = _get_epochs()
n_freqs = 3
nave = 1
data = np.random.RandomState(0).randn(len(epochs.ch_names),
n_freqs, len(epochs.times))
tfr = AverageTFR(epochs.info, data, epochs.times, np.arange(n_freqs), nave)
tfr.plot_topo(baseline=(None, 0), mode='ratio', title='Average power',
vmin=0., vmax=14., show=False)
tfr.plot([4], baseline=(None, 0), mode='ratio', show=False, title='foo')
run_tests_if_main()
| bsd-3-clause |
IssamLaradji/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 15 | 2130 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 4))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i+1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree %d" % degrees[i])
plt.show()
| bsd-3-clause |
hchauvet/beampy | beampy/modules/animatesvg.py | 1 | 5180 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 25 19:05:18 2015
@author: hugo
Class to manage text for beampy
"""
from beampy import document
from beampy.modules.figure import figure
from beampy.modules.core import beampy_module
from beampy.functions import convert_unit, gcs
import glob
import re
import sys
class animatesvg(beampy_module):
"""
Create svg animation from a folder containing svg files (or any files that
figure function can handle) or a list of matplotlib figures.
Parameters
----------
files_in : str or list of matplotlib figures or list of file names
List of figures to animate. List could be generated using a string
containing UNIX willcard like '/my/folder/*.svg', or using a list of
file names or matplotlib figure object.
x : int or float or {'center', 'auto'} or str, optional
Horizontal position for the animation (the default theme sets this to
'center'). See positioning system of Beampy.
y : int or float or {'center', 'auto'} or str, optional
Vertical position for the animation (the default theme sets this to
'auto'). See positioning system of Beampy.
start : integer, optional
Start position of the image sequence (the default theme sets this to
0).
end : int or 'end', optional
End position of the image sequence (the default theme sets this to
'end', which implies that the animation end at the last item of the
files_in ).
width : int or float or None, optional
Width of the figure (the default is None, which implies that the width
is width of the image).
fps : int, optional
Animation frame-rate (the default theme sets this to 25).
autoplay : boolean, optional
Automatically start the animation when the slide is shown on screen
(the default theme sets this to False).
"""
def __init__(self, files_in, **kwargs):
# Add type
self.type = 'animatesvg'
# Check input args for this module
self.check_args_from_theme(kwargs)
# Cache is useless because we call figure function which handle the cache for each figures
self.cache = False
slide = document._slides[gcs()]
# Add +1 to counter
self.anim_num = slide.cpt_anim
slide.cpt_anim += 1
input_width = self.width # Save the input width for mpl figures
if self.width is None:
self.width = slide.curwidth
# Read all files from a given wildcard
if isinstance(files_in, str):
svg_files = glob.glob(files_in)
# Need to sort using the first digits finded in the name
svg_files = sorted(svg_files, key=lambda x: int(''.join(re.findall(r'\d+', x))))
# If the input is a list of names or mpl figures or other compatible with figure
elif isinstance(files_in, list):
svg_files = files_in
if input_width is None:
width_inch, height_inch = files_in[0].get_size_inches()
self.width = convert_unit("%fin"%(width_inch))
else:
print('Unknown input type for files_folder')
sys.exit(0)
# check how many images we wants
if self.end == 'end':
self.end = len(svg_files)
# Add content
self.content = svg_files[self.start:self.end]
# Register the module
self.register()
def render(self):
"""
Render several images as an animation in html
"""
# Read all files and store their content
svgcontent = []
# Render each figure in a group
output = []
fig_args = {"width": self.width.value,
"height": self.height.value,
"x": 0, "y": 0}
if len(self.content)>0:
#Test if output format support video
if document._output_format=='html5':
for iframe, svgfile in enumerate(self.content):
#print iframe
img = figure(svgfile, **fig_args)
img.positionner = self.positionner
img.call_cmd = str(iframe)+'->'+self.call_cmd.strip()
img.call_lines = self.call_lines
img.run_render()
if iframe == 0:
self.update_size(img.width, img.height)
# parse the svg
tmpout = '''<g id="frame_%i">%s</g>'''%(iframe, img.svgout)
output += [tmpout]
img.delete()
self.animout = output
else:
# Check if pdf_animations is True
img = figure(self.content[0], **fig_args)
img.positionner = self.positionner
img.render()
self.update_size(img.width, img.height)
self.svgout = img.svgout
img.delete()
# return output
# Update the rendered state of the module
self.rendered = True
else:
print('nothing found')
| gpl-3.0 |
terhorstd/nest-simulator | pynest/examples/hh_psc_alpha.py | 1 | 2263 | # -*- coding: utf-8 -*-
#
# hh_psc_alpha.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Example using hh_psc_alpha
-------------------------------
This example produces a rate-response (FI) curve of the Hodgkin-Huxley
neuron in response to a range of different current (DC) stimulations.
The result is plotted using matplotlib.
Since a DC input affetcs only the neuron's channel dynamics, this routine
does not yet check correctness of synaptic response.
References
~~~~~~~~~~~
See Also
~~~~~~~~~~
:Authors:
KEYWORDS:
"""
import nest
import numpy as np
import matplotlib.pyplot as plt
nest.hl_api.set_verbosity('M_WARNING')
nest.ResetKernel()
simtime = 1000
# Amplitude range, in pA
dcfrom = 0
dcstep = 20
dcto = 2000
h = 0.1 # simulation step size in mS
neuron = nest.Create('hh_psc_alpha')
sd = nest.Create('spike_detector')
nest.SetStatus(sd, {'to_memory': False})
nest.Connect(neuron, sd, syn_spec={'weight': 1.0, 'delay': h})
# Simulation loop
n_data = int(dcto / float(dcstep))
amplitudes = np.zeros(n_data)
event_freqs = np.zeros(n_data)
for i, amp in enumerate(range(dcfrom, dcto, dcstep)):
nest.SetStatus(neuron, {'I_e': float(amp)})
print("Simulating with current I={} pA".format(amp))
nest.Simulate(1000) # one second warm-up time for equilibrium state
nest.SetStatus(sd, {'n_events': 0}) # then reset spike counts
nest.Simulate(simtime) # another simulation call to record firing rate
n_events = nest.GetStatus(sd, keys={'n_events'})[0][0]
amplitudes[i] = amp
event_freqs[i] = n_events / (simtime / 1000.)
plt.plot(amplitudes, event_freqs)
plt.show()
| gpl-2.0 |