repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
gnowledge/ncert_nroer | gstudio/spam_checker/backends/automattic.py | 3 | 4825 | # Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This project incorporates work covered by the following copyright and permission notice:
# Copyright (c) 2009, Julien Fache
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Akismet spam checker backend for Gstudio"""
from django.conf import settings
from django.utils.encoding import smart_str
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from gstudio.settings import PROTOCOL
try:
from akismet import Akismet
from akismet import APIKeyError
except ImportError:
raise ImproperlyConfigured('akismet module is not available')
if not getattr(settings, 'AKISMET_SECRET_API_KEY', ''):
raise ImproperlyConfigured('You have to set AKISMET_SECRET_API_KEY')
AKISMET_API_KEY = settings.AKISMET_SECRET_API_KEY
def backend(comment, content_object, request):
"""Akismet spam checker backend for Gstudio"""
blog_url = '%s://%s/' % (PROTOCOL, Site.objects.get_current().domain)
akismet = Akismet(key=AKISMET_API_KEY, blog_url=blog_url)
if not akismet.verify_key():
raise APIKeyError('Your Akismet API key is invalid.')
akismet_data = {
'user_ip': request.META.get('REMOTE_ADDR', ''),
'user_agent': request.META.get('HTTP_USER_AGENT', ''),
'referrer': request.META.get('HTTP_REFERER', 'unknown'),
'permalink': content_object.get_absolute_url(),
'comment_type': 'comment',
'comment_author': smart_str(comment.userinfo.get('name', '')),
'comment_author_email': smart_str(comment.userinfo.get('email', '')),
'comment_author_url': smart_str(comment.userinfo.get('url', '')),
}
is_spam = akismet.comment_check(smart_str(comment.comment),
data=akismet_data, build_data=True)
return is_spam
| agpl-3.0 |
darkleons/odoo | addons/mrp_repair/wizard/cancel_repair.py | 384 | 3683 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
from openerp.tools.translate import _
class repair_cancel(osv.osv_memory):
_name = 'mrp.repair.cancel'
_description = 'Cancel Repair'
def cancel_repair(self, cr, uid, ids, context=None):
""" Cancels the repair
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
if context is None:
context = {}
record_id = context and context.get('active_id', False) or False
assert record_id, _('Active ID not Found')
repair_order_obj = self.pool.get('mrp.repair')
repair_line_obj = self.pool.get('mrp.repair.line')
repair_order = repair_order_obj.browse(cr, uid, record_id, context=context)
if repair_order.invoiced or repair_order.invoice_method == 'none':
repair_order_obj.action_cancel(cr, uid, [record_id], context=context)
else:
raise osv.except_osv(_('Warning!'),_('Repair order is not invoiced.'))
return {'type': 'ir.actions.act_window_close'}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" Changes the view dynamically
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: New arch of view.
"""
if context is None:
context = {}
res = super(repair_cancel, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if not record_id or (active_model and active_model != 'mrp.repair'):
return res
repair_order = self.pool.get('mrp.repair').browse(cr, uid, record_id, context=context)
if not repair_order.invoiced:
res['arch'] = """
<form string="Cancel Repair" version="7.0">
<header>
<button name="cancel_repair" string="_Yes" type="object" class="oe_highlight"/>
or
<button string="Cancel" class="oe_link" special="cancel"/>
</header>
<label string="Do you want to continue?"/>
</form>
"""
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
petteyg/intellij-community | python/lib/Lib/distutils/command/build_scripts.py | 97 | 5528 | """distutils.command.build_scripts
Implements the Distutils 'build_scripts' command."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build_scripts.py 59668 2008-01-02 18:59:36Z guido.van.rossum $"
import sys, os, re
from stat import ST_MODE
from distutils import sysconfig
from distutils.core import Command
from distutils.dep_util import newer
from distutils.util import convert_path
from distutils import log
# check if Python is called on the first line with this expression
first_line_re = re.compile('^#!.*python[0-9.]*([ \t].*)?$')
class build_scripts (Command):
description = "\"build\" scripts (copy and fixup #! line)"
user_options = [
('build-dir=', 'd', "directory to \"build\" (copy) to"),
('force', 'f', "forcibly build everything (ignore file timestamps"),
('executable=', 'e', "specify final destination interpreter path"),
]
boolean_options = ['force']
def initialize_options (self):
self.build_dir = None
self.scripts = None
self.force = None
self.executable = None
self.outfiles = None
def finalize_options (self):
self.set_undefined_options('build',
('build_scripts', 'build_dir'),
('force', 'force'),
('executable', 'executable'))
self.scripts = self.distribution.scripts
def get_source_files(self):
return self.scripts
def run (self):
if not self.scripts:
return
self.copy_scripts()
def copy_scripts (self):
"""Copy each script listed in 'self.scripts'; if it's marked as a
Python script in the Unix way (first line matches 'first_line_re',
ie. starts with "\#!" and contains "python"), then adjust the first
line to refer to the current Python interpreter as we copy.
"""
self.mkpath(self.build_dir)
outfiles = []
for script in self.scripts:
adjust = 0
script = convert_path(script)
outfile = os.path.join(self.build_dir, os.path.basename(script))
outfiles.append(outfile)
if not self.force and not newer(script, outfile):
log.debug("not copying %s (up-to-date)", script)
continue
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, "r")
except IOError:
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line:
self.warn("%s is an empty file (skipping)" % script)
continue
match = first_line_re.match(first_line)
if match:
adjust = 1
post_interp = match.group(1) or ''
if adjust:
log.info("copying and adjusting %s -> %s", script,
self.build_dir)
if not sysconfig.python_build:
executable = self.executable
else:
executable = os.path.join(
sysconfig.get_config_var("BINDIR"),
"python" + sysconfig.get_config_var("EXE"))
executable = fix_jython_executable(executable, post_interp)
if not self.dry_run:
outf = open(outfile, "w")
outf.write("#!%s%s\n" %
(executable,
post_interp))
outf.writelines(f.readlines())
outf.close()
if f:
f.close()
else:
if f:
f.close()
self.copy_file(script, outfile)
if hasattr(os, 'chmod'):
for file in outfiles:
if self.dry_run:
log.info("changing mode of %s", file)
else:
oldmode = os.stat(file)[ST_MODE] & 07777
newmode = (oldmode | 0555) & 07777
if newmode != oldmode:
log.info("changing mode of %s from %o to %o",
file, oldmode, newmode)
os.chmod(file, newmode)
# copy_scripts ()
# class build_scripts
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
fp = open(executable)
magic = fp.read(2)
fp.close()
except IOError, OSError:
return executable
return magic == '#!'
def fix_jython_executable(executable, options):
if sys.platform.startswith('java') and is_sh(executable):
# Workaround Jython's sys.executable being a .sh (an invalid
# shebang line interpreter)
if options:
# Can't apply the workaround, leave it broken
log.warn("WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
else:
return '/usr/bin/env %s' % executable
return executable
| apache-2.0 |
pyparallel/numpy | numpy/polynomial/polynomial.py | 126 | 48268 | """
Objects for dealing with polynomials.
This module provides a number of objects (mostly functions) useful for
dealing with polynomials, including a `Polynomial` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with polynomial objects is in
the docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `polydomain` -- Polynomial default domain, [-1,1].
- `polyzero` -- (Coefficients of the) "zero polynomial."
- `polyone` -- (Coefficients of the) constant polynomial 1.
- `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``.
Arithmetic
----------
- `polyadd` -- add two polynomials.
- `polysub` -- subtract one polynomial from another.
- `polymul` -- multiply two polynomials.
- `polydiv` -- divide one polynomial by another.
- `polypow` -- raise a polynomial to an positive integer power
- `polyval` -- evaluate a polynomial at given points.
- `polyval2d` -- evaluate a 2D polynomial at given points.
- `polyval3d` -- evaluate a 3D polynomial at given points.
- `polygrid2d` -- evaluate a 2D polynomial on a Cartesian product.
- `polygrid3d` -- evaluate a 3D polynomial on a Cartesian product.
Calculus
--------
- `polyder` -- differentiate a polynomial.
- `polyint` -- integrate a polynomial.
Misc Functions
--------------
- `polyfromroots` -- create a polynomial with specified roots.
- `polyroots` -- find the roots of a polynomial.
- `polyvander` -- Vandermonde-like matrix for powers.
- `polyvander2d` -- Vandermonde-like matrix for 2D power series.
- `polyvander3d` -- Vandermonde-like matrix for 3D power series.
- `polycompanion` -- companion matrix in power series form.
- `polyfit` -- least-squares fit returning a polynomial.
- `polytrim` -- trim leading coefficients from a polynomial.
- `polyline` -- polynomial representing given straight line.
Classes
-------
- `Polynomial` -- polynomial class.
See Also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
__all__ = [
'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd',
'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval',
'polyder', 'polyint', 'polyfromroots', 'polyvander', 'polyfit',
'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d',
'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d']
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
polytrim = pu.trimcoef
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Polynomial default domain.
polydomain = np.array([-1, 1])
# Polynomial coefficients representing zero.
polyzero = np.array([0])
# Polynomial coefficients representing one.
polyone = np.array([1])
# Polynomial coefficients representing the identity x.
polyx = np.array([0, 1])
#
# Polynomial series functions
#
def polyline(off, scl):
"""
Returns an array representing a linear polynomial.
Parameters
----------
off, scl : scalars
The "y-intercept" and "slope" of the line, respectively.
Returns
-------
y : ndarray
This module's representation of the linear polynomial ``off +
scl*x``.
See Also
--------
chebline
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> P.polyline(1,-1)
array([ 1, -1])
>>> P.polyval(1, P.polyline(1,-1)) # should be 0
0.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def polyfromroots(roots):
"""
Generate a monic polynomial with given roots.
Return the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
where the `r_n` are the roots specified in `roots`. If a zero has
multiplicity n, then it must appear in `roots` n times. For instance,
if 2 is a root of multiplicity three and 3 is a root of multiplicity 2,
then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear
in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * x + ... + x^n
The coefficient of the last term is 1 for monic polynomials in this
form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of the polynomial's coefficients If all the roots are
real, then `out` is also real, otherwise it is complex. (see
Examples below).
See Also
--------
chebfromroots, legfromroots, lagfromroots, hermfromroots
hermefromroots
Notes
-----
The coefficients are determined by multiplying together linear factors
of the form `(x - r_i)`, i.e.
.. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n)
where ``n == len(roots) - 1``; note that this implies that `1` is always
returned for :math:`a_n`.
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x
array([ 0., -1., 0., 1.])
>>> j = complex(0,1)
>>> P.polyfromroots((-j,j)) # complex returned, though values are real
array([ 1.+0.j, 0.+0.j, 1.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [polyline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [polymul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = polymul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def polyadd(c1, c2):
"""
Add one polynomial to another.
Returns the sum of two polynomials `c1` + `c2`. The arguments are
sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to high.
Returns
-------
out : ndarray
The coefficient array representing their sum.
See Also
--------
polysub, polymul, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> sum = P.polyadd(c1,c2); sum
array([ 4., 4., 4.])
>>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2)
28.0
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def polysub(c1, c2):
"""
Subtract one polynomial from another.
Returns the difference of two polynomials `c1` - `c2`. The arguments
are sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of coefficients representing their difference.
See Also
--------
polyadd, polymul, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polysub(c1,c2)
array([-2., 0., 2.])
>>> P.polysub(c2,c1) # -P.polysub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def polymulx(c):
"""Multiply a polynomial by x.
Multiply the polynomial `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of polynomial coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1:] = c
return prd
def polymul(c1, c2):
"""
Multiply one polynomial by another.
Returns the product of two polynomials `c1` * `c2`. The arguments are
sequences of coefficients, from lowest order term to highest, e.g.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.``
Parameters
----------
c1, c2 : array_like
1-D arrays of coefficients representing a polynomial, relative to the
"standard" basis, and ordered from lowest order term to highest.
Returns
-------
out : ndarray
Of the coefficients of their product.
See Also
--------
polyadd, polysub, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polymul(c1,c2)
array([ 3., 8., 14., 8., 3.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
ret = np.convolve(c1, c2)
return pu.trimseq(ret)
def polydiv(c1, c2):
"""
Divide one polynomial by another.
Returns the quotient-with-remainder of two polynomials `c1` / `c2`.
The arguments are sequences of coefficients, from lowest order term
to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to high.
Returns
-------
[quo, rem] : ndarrays
Of coefficient series representing the quotient and remainder.
See Also
--------
polyadd, polysub, polymul, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polydiv(c1,c2)
(array([ 3.]), array([-8., -4.]))
>>> P.polydiv(c2,c1)
(array([ 0.33333333]), array([ 2.66666667, 1.33333333]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
len1 = len(c1)
len2 = len(c2)
if len2 == 1:
return c1/c2[-1], c1[:1]*0
elif len1 < len2:
return c1[:1]*0, c1
else:
dlen = len1 - len2
scl = c2[-1]
c2 = c2[:-1]/scl
i = dlen
j = len1 - 1
while i >= 0:
c1[i:j] -= c2*c1[j]
i -= 1
j -= 1
return c1[j+1:]/scl, pu.trimseq(c1[:j+1])
def polypow(c, pow, maxpower=None):
"""Raise a polynomial to a power.
Returns the polynomial `c` raised to the power `pow`. The argument
`c` is a sequence of coefficients ordered from low to high. i.e.,
[1,2,3] is the series ``1 + 2*x + 3*x**2.``
Parameters
----------
c : array_like
1-D array of array of series coefficients ordered from low to
high degree.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Power series of power.
See Also
--------
polyadd, polysub, polymul, polydiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = np.convolve(prd, c)
return prd
def polyder(c, m=1, scl=1, axis=0):
"""
Differentiate a polynomial.
Returns the polynomial coefficients `c` differentiated `m` times along
`axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The
argument `c` is an array of coefficients from low to high degree along
each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``
while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is
``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of polynomial coefficients. If c is multidimensional the
different axis correspond to different variables with the degree
in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change
of variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Polynomial coefficients of the derivative.
See Also
--------
polyint
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3
>>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2
array([ 2., 6., 12.])
>>> P.polyder(c,3) # (d**3/dx**3)(c) = 24
array([ 24.])
>>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2
array([ -2., -6., -12.])
>>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x
array([ 6., 24.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype fails with NA
c = c + 0.0
cdt = c.dtype
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=cdt)
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a polynomial.
Returns the polynomial coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients, from low to high degree along each axis, e.g., [1,2,3]
represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]]
represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
1-D array of polynomial coefficients, ordered from low to high.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Coefficient array of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``.
See Also
--------
polyder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`. Why
is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c = (1,2,3)
>>> P.polyint(c) # should return array([0, 1, 1, 1])
array([ 0., 1., 1., 1.])
>>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20])
array([ 0. , 0. , 0. , 0.16666667, 0.08333333,
0.05 ])
>>> P.polyint(c,k=3) # should return array([3, 1, 1, 1])
array([ 3., 1., 1., 1.])
>>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1])
array([ 6., 1., 1., 1.])
>>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2])
array([ 0., -2., -2., -2.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype doesn't preserve mask attribute.
c = c + 0.0
cdt = c.dtype
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
k = list(k) + [0]*(cnt - len(k))
c = np.rollaxis(c, iaxis)
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - polyval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def polyval(x, c, tensor=True):
"""
Evaluate a polynomial at points x.
If `c` is of length `n + 1`, this function returns the value
.. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, compatible object
The shape of the returned array is described above.
See Also
--------
polyval2d, polygrid2d, polyval3d, polygrid3d
Notes
-----
The evaluation uses Horner's method.
Examples
--------
>>> from numpy.polynomial.polynomial import polyval
>>> polyval(1, [1,2,3])
6.0
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> polyval(a, [1,2,3])
array([[ 1., 6.],
[ 17., 34.]])
>>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients
>>> coef
array([[0, 1],
[2, 3]])
>>> polyval([1,2], coef, tensor=True)
array([[ 2., 4.],
[ 4., 7.]])
>>> polyval([1,2], coef, tensor=False)
array([ 2., 7.])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype fails with NA
c = c + 0.0
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
c0 = c[-1] + x*0
for i in range(2, len(c) + 1):
c0 = c[-i] + c0*x
return c0
def polyval2d(x, y, c):
"""
Evaluate a 2-D polynomial at points (x, y).
This function returns the value
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in `c[i,j]`. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
polyval, polygrid2d, polyval3d, polygrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = polyval(x, c)
c = polyval(y, c, tensor=False)
return c
def polygrid2d(x, y, c):
"""
Evaluate a 2-D polynomial on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
polyval, polyval2d, polyval3d, polygrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
c = polyval(x, c)
c = polyval(y, c)
return c
def polyval3d(x, y, z, c):
"""
Evaluate a 3-D polynomial at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
polyval, polyval2d, polygrid2d, polygrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = polyval(x, c)
c = polyval(y, c, tensor=False)
c = polyval(z, c, tensor=False)
return c
def polygrid3d(x, y, z, c):
"""
Evaluate a 3-D polynomial on the Cartesian product of x, y and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
polyval, polyval2d, polygrid2d, polyval3d
Notes
-----
.. versionadded:: 1.7.0
"""
c = polyval(x, c)
c = polyval(y, c)
c = polyval(z, c)
return c
def polyvander(x, deg):
"""Vandermonde matrix of given degree.
Returns the Vandermonde matrix of degree `deg` and sample points
`x`. The Vandermonde matrix is defined by
.. math:: V[..., i] = x^i,
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the power of `x`.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and
``polyval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of polynomials of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray.
The Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where the last index is the power of `x`.
The dtype will be the same as the converted `x`.
See Also
--------
polyvander2d, polyvander3d
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x
return np.rollaxis(v, 0, v.ndim)
def polyvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = x^i * y^j,
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the powers of
`x` and `y`.
If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D polynomials
of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
polyvander, polyvander3d. polyval2d, polyval3d
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = polyvander(x, degx)
vy = polyvander(y, degy)
v = vx[..., None]*vy[..., None,:]
# einsum bug
#v = np.einsum("...i,...j->...ij", vx, vy)
return v.reshape(v.shape[:-2] + (-1,))
def polyvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k,
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the powers of `x`, `y`, and `z`.
If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D polynomials
of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
polyvander, polyvander3d. polyval2d, polyval3d
Notes
-----
.. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = polyvander(x, degx)
vy = polyvander(y, degy)
vz = polyvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
# einsum bug
#v = np.einsum("...i, ...j, ...k->...ijk", vx, vy, vz)
return v.reshape(v.shape[:-3] + (-1,))
def polyfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least-squares fit of a polynomial to data.
Return the coefficients of a polynomial of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n,
where `n` is `deg`.
Parameters
----------
x : array_like, shape (`M`,)
x-coordinates of the `M` sample (data) points ``(x[i], y[i])``.
y : array_like, shape (`M`,) or (`M`, `K`)
y-coordinates of the sample points. Several sets of sample points
sharing the same x-coordinates can be (independently) fit with one
call to `polyfit` by passing in for `y` a 2-D array that contains
one data set per column.
deg : int
Degree of the polynomial(s) to be fit.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than `rcond`, relative to the largest singular value, will be
ignored. The default value is ``len(x)*eps``, where `eps` is the
relative precision of the platform's float type, about 2e-16 in
most cases.
full : bool, optional
Switch determining the nature of the return value. When ``False``
(the default) just the coefficients are returned; when ``True``,
diagnostic information from the singular value decomposition (used
to solve the fit's matrix equation) is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`)
Polynomial coefficients ordered from low to high. If `y` was 2-D,
the coefficients in column `k` of `coef` represent the polynomial
fit to the data in `y`'s `k`-th column.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Raises
------
RankWarning
Raised if the matrix in the least-squares fit is rank deficient.
The warning is only raised if `full` == False. The warnings can
be turned off by:
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, lagfit, hermfit, hermefit
polyval : Evaluates a polynomial.
polyvander : Vandermonde matrix for powers.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the polynomial `p` that minimizes
the sum of the weighted squared errors
.. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) over-determined matrix equation:
.. math :: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected (and `full` == ``False``), a `RankWarning` will be raised.
This means that the coefficient values may be poorly determined.
Fitting to a lower order polynomial will usually get rid of the warning
(but may not be what you want, of course; if you have independent
reason(s) for choosing the degree which isn't working, you may have to:
a) reconsider those reasons, and/or b) reconsider the quality of your
data). The `rcond` parameter can also be set to a value smaller than
its default, but the resulting fit may be spurious and have large
contributions from roundoff error.
Polynomial fits using double precision tend to "fail" at about
(polynomial) degree 20. Fits using Chebyshev or Legendre series are
generally better conditioned, but much can still depend on the
distribution of the sample points and the smoothness of the data. If
the quality of the fit is inadequate, splines may be a good
alternative.
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1]
>>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise"
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1
array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286])
>>> stats # note the large SSR, explaining the rather poor results
[array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316,
0.28853036]), 1.1324274851176597e-014]
Same thing without the added noise
>>> y = x**3 - x
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1
array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16,
1.00000000e+00])
>>> stats # note the minuscule SSR
[array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158,
0.50443316, 0.28853036]), 1.1324274851176597e-014]
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = polyvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def polycompanion(c):
"""
Return the companion matrix of c.
The companion matrix for power series cannot be made symmetric by
scaling the basis, so this function differs from those for the
orthogonal polynomials.
Parameters
----------
c : array_like
1-D array of polynomial coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded:: 1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
bot = mat.reshape(-1)[n::n+1]
bot[...] = 1
mat[:, -1] -= c[:-1]/c[-1]
return mat
def polyroots(c):
"""
Compute the roots of a polynomial.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * x^i.
Parameters
----------
c : 1-D array_like
1-D array of polynomial coefficients.
Returns
-------
out : ndarray
Array of the roots of the polynomial. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
chebroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the power series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
Examples
--------
>>> import numpy.polynomial.polynomial as poly
>>> poly.polyroots(poly.polyfromroots((-1,0,1)))
array([-1., 0., 1.])
>>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype
dtype('float64')
>>> j = complex(0,1)
>>> poly.polyroots(poly.polyfromroots((-j,0,j)))
array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = polycompanion(c)
r = la.eigvals(m)
r.sort()
return r
#
# polynomial class
#
class Polynomial(ABCPolyBase):
"""A power series class.
The Polynomial class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Polynomial coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(polyadd)
_sub = staticmethod(polysub)
_mul = staticmethod(polymul)
_div = staticmethod(polydiv)
_pow = staticmethod(polypow)
_val = staticmethod(polyval)
_int = staticmethod(polyint)
_der = staticmethod(polyder)
_fit = staticmethod(polyfit)
_line = staticmethod(polyline)
_roots = staticmethod(polyroots)
_fromroots = staticmethod(polyfromroots)
# Virtual properties
nickname = 'poly'
domain = np.array(polydomain)
window = np.array(polydomain)
| bsd-3-clause |
RedHatQE/cfme_tests | cfme/utils/tests/test_soft_get.py | 4 | 1361 | import pytest
from cfme.utils.soft_get import MultipleResultsException
from cfme.utils.soft_get import soft_get
def test_soft_get():
class TestObj(object):
a = 1
b = 2
c = 3
aa = 11
bb = 22
bbb = 222
container_image = 'container_image'
image_registry = 'image_registry'
test_dict = {'a': 1, 'b': 2, 'c': 3, 'aa': 11, 'bb': 22,
'container_image': 'container_image',
'image_registry': 'image_registry'}
for tested in (TestObj, test_dict):
is_dict = (type(tested) is dict)
with pytest.raises(AttributeError):
soft_get(tested, 'no_such_attr', dict_=is_dict)
with pytest.raises(MultipleResultsException):
soft_get(tested, 'a', dict_=is_dict, best_match=False)
with pytest.raises(AttributeError):
soft_get(tested, 'Aa', dict_=is_dict, case_sensitive=True)
if not is_dict:
with pytest.raises(TypeError):
soft_get(tested, 'a', dict_=True)
assert soft_get(tested, 'a', dict_=is_dict) == 1
assert soft_get(tested, 'bb', dict_=is_dict) == 22
assert soft_get(tested, 'image', dict_=is_dict) == 'image_registry'
assert soft_get(tested, 'image', dict_=is_dict, dont_include=['registry']) \
== 'container_image'
| gpl-2.0 |
Leibniz137/testinfra | testinfra/backend/ssh.py | 1 | 2940 | # coding: utf-8
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import base64
from testinfra.backend import base
class SshBackend(base.BaseBackend):
"""Run command through ssh command"""
NAME = "ssh"
def __init__(self, hostspec, ssh_config=None, *args, **kwargs):
self.host, self.user, self.port = self.parse_hostspec(hostspec)
self.ssh_config = ssh_config
super(SshBackend, self).__init__(self.host, *args, **kwargs)
def run(self, command, *args, **kwargs):
return self.run_ssh(self.get_command(command, *args))
def run_ssh(self, command):
cmd = ["ssh"]
cmd_args = []
if self.ssh_config:
cmd.append("-F %s")
cmd_args.append(self.ssh_config)
if self.user:
cmd.append("-o User=%s")
cmd_args.append(self.user)
if self.port:
cmd.append("-o Port=%s")
cmd_args.append(self.port)
cmd.append("%s %s")
cmd_args.extend([self.host, command])
out = self.run_local(
" ".join(cmd), *cmd_args)
out.command = self.encode(command)
return out
class SafeSshBackend(SshBackend):
"""Run command using ssh command but try to get a more sane output
When using ssh (or a potentially bugged wrapper) additional output can be
added in stdout/stderr and exit status may not be propagate correctly
To avoid that kind of bugs, we wrap the command to have an output like
this:
TESTINFRA_START;EXIT_STATUS;STDOUT;STDERR;TESTINFRA_END
where STDOUT/STDERR are base64 encoded, then we parse that magic string to
get sanes variables
"""
NAME = "safe-ssh"
def run(self, command, *args, **kwargs):
orig_command = self.get_command(command, *args)
out = self.run_ssh((
'''of=$(mktemp)&&ef=$(mktemp)&&%s >$of 2>$ef; r=$?;'''
'''echo "TESTINFRA_START;$r;$(base64 < $of);$(base64 < $ef);'''
'''TESTINFRA_END";rm -f $of $ef''') % (orig_command,))
start = out.stdout.find("TESTINFRA_START;") + len("TESTINFRA_START;")
end = out.stdout.find("TESTINFRA_END") - 1
rc, stdout, stderr = out.stdout[start:end].split(";")
rc = int(rc)
stdout = base64.b64decode(stdout)
stderr = base64.b64decode(stderr)
return self.result(rc, self.encode(orig_command), stdout, stderr)
| apache-2.0 |
forpster/dotfiles | home/.emacs.d/.python-environments/default/lib/python2.7/site-packages/pip/_vendor/requests/compat.py | 1039 | 1469 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| mit |
alexandrebarachant/decoding-brain-challenge-2016 | models/pyriemann/utils/distance.py | 1 | 4099 | """Distance utils."""
import numpy
from scipy.linalg import eigvalsh
from .base import logm, sqrtm
def distance_kullback(A, B):
"""Kullback leibler divergence between two covariance matrices A and B.
:param A: First covariance matrix
:param B: Second covariance matrix
:returns: Kullback leibler divergence between A and B
"""
dim = A.shape[0]
logdet = numpy.log(numpy.linalg.det(B) / numpy.linalg.det(A))
kl = numpy.trace(numpy.dot(numpy.linalg.inv(B), A)) - dim + logdet
return 0.5 * kl
def distance_kullback_right(A, B):
"""wrapper for right kullblack leibler div."""
return distance_kullback(B, A)
def distance_kullback_sym(A, B):
"""Symetrized kullback leibler divergence."""
return distance_kullback(A, B) + distance_kullback_right(A, B)
def distance_euclid(A, B):
"""Euclidean distance between two covariance matrices A and B.
The Euclidean distance is defined by the Froebenius norm between the two
matrices.
.. math::
d = \Vert \mathbf{A} - \mathbf{B} \Vert_F
:param A: First covariance matrix
:param B: Second covariance matrix
:returns: Eclidean distance between A and B
"""
return numpy.linalg.norm(A - B, ord='fro')
def distance_logeuclid(A, B):
"""Log Euclidean distance between two covariance matrices A and B.
.. math::
d = \Vert \log(\mathbf{A}) - \log(\mathbf{B}) \Vert_F
:param A: First covariance matrix
:param B: Second covariance matrix
:returns: Log-Eclidean distance between A and B
"""
return distance_euclid(logm(A), logm(B))
def distance_riemann(A, B):
"""Riemannian distance between two covariance matrices A and B.
.. math::
d = {\left( \sum_i \log(\lambda_i)^2 \\right)}^{-1/2}
where :math:`\lambda_i` are the joint eigenvalues of A and B
:param A: First covariance matrix
:param B: Second covariance matrix
:returns: Riemannian distance between A and B
"""
return numpy.sqrt((numpy.log(eigvalsh(A, B))**2).sum())
def distance_logdet(A, B):
"""Log-det distance between two covariance matrices A and B.
.. math::
d = \sqrt{\left(\log(\det(\\frac{\mathbf{A}+\mathbf{B}}{2})) - 0.5 \\times \log(\det(\mathbf{A}) \det(\mathbf{B}))\\right)}
:param A: First covariance matrix
:param B: Second covariance matrix
:returns: Log-Euclid distance between A and B
"""
return numpy.sqrt(numpy.log(numpy.linalg.det(
(A + B) / 2.0)) - 0.5 * numpy.log(numpy.linalg.det(A)*numpy.linalg.det(B)))
def distance_wasserstein(A, B):
"""Wasserstein distance between two covariances matrices.
.. math::
d = \left( {tr(A + B - 2(A^{1/2}BA^{1/2})^{1/2})}\\right )^{1/2}
:param A: First covariance matrix
:param B: Second covariance matrix
:returns: Wasserstein distance between A and B
"""
B12 = sqrtm(B)
C = sqrtm(numpy.dot(numpy.dot(B12, A), B12))
return numpy.sqrt(numpy.trace(A + B - 2*C))
def distance(A, B, metric='riemann'):
"""Distance between two covariance matrices A and B according to the metric.
:param A: First covariance matrix
:param B: Second covariance matrix
:param metric: the metric (Default value 'riemann'), can be : 'riemann' ,
'logeuclid' , 'euclid' , 'logdet', 'kullback', 'kullback_right',
'kullback_sym'.
:returns: the distance between A and B
"""
distance_methods = {'riemann': distance_riemann,
'logeuclid': distance_logeuclid,
'euclid': distance_euclid,
'logdet': distance_logdet,
'kullback': distance_kullback,
'kullback_right': distance_kullback_right,
'kullback_sym': distance_kullback_sym,
'wasserstein': distance_wasserstein}
if len(A.shape) == 3:
d = numpy.empty((len(A), 1))
for i in range(len(A)):
d[i] = distance_methods[metric](A[i], B)
else:
d = distance_methods[metric](A, B)
return d
| bsd-3-clause |
hugegreenbug/libgestures | include/build/protoc_java.py | 90 | 1317 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate java source files from protobufs
Usage:
protoc_java.py {protoc} {proto_path} {java_out} {stamp_file} {proto_files}
This is a helper file for the genproto_java action in protoc_java.gypi.
It performs the following steps:
1. Deletes all old sources (ensures deleted classes are not part of new jars).
2. Creates source directory.
3. Generates Java files using protoc.
4. Creates a new stamp file.
"""
import os
import shutil
import subprocess
import sys
def main(argv):
if len(argv) < 5:
usage()
return 1
protoc_path, proto_path, java_out, stamp_file = argv[1:5]
proto_files = argv[5:]
# Delete all old sources
if os.path.exists(java_out):
shutil.rmtree(java_out)
# Create source directory
os.makedirs(java_out)
# Generate Java files using protoc
ret = subprocess.call(
[protoc_path, '--proto_path', proto_path, '--java_out', java_out]
+ proto_files)
if ret == 0:
# Create a new stamp file
with file(stamp_file, 'a'):
os.utime(stamp_file, None)
return ret
def usage():
print(__doc__);
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
orvi2014/kitsune | kitsune/customercare/api.py | 11 | 4324 | import json
from rest_framework import generics, serializers, status, decorators
from rest_framework.response import Response
from kitsune.customercare.models import TwitterAccount
from kitsune.sumo.api import GenericAPIException, GenericDjangoPermission
class TwitterAccountBanPermission(GenericDjangoPermission):
permissions = ['customercare.ban_account']
class TwitterAccountIgnorePermission(GenericDjangoPermission):
permissions = ['customercare.ignore_account']
class TwitterAccountSerializer(serializers.ModelSerializer):
"""Serializer for the TwitterAccount model."""
class Meta:
model = TwitterAccount
class BannedList(generics.ListAPIView):
"""Get all banned users."""
queryset = TwitterAccount.objects.filter(banned=True)
serializer_class = TwitterAccountSerializer
permission_classes = (TwitterAccountBanPermission,)
class IgnoredList(generics.ListAPIView):
"""Get all banned users."""
queryset = TwitterAccount.objects.filter(ignored=True)
serializer_class = TwitterAccountSerializer
permission_classes = (TwitterAccountIgnorePermission,)
@decorators.api_view(['POST'])
@decorators.permission_classes([TwitterAccountBanPermission])
def ban(request):
"""Bans a twitter account from using the AoA tool."""
username = json.loads(request.body).get('username')
if not username:
raise GenericAPIException(status.HTTP_400_BAD_REQUEST,
'Username not provided.')
username = username[1:] if username.startswith('@') else username
account, created = TwitterAccount.objects.get_or_create(
username=username,
defaults={'banned': True}
)
if not created and account.banned:
raise GenericAPIException(
status.HTTP_409_CONFLICT,
'This account is already banned!'
)
else:
account.banned = True
account.save()
return Response({'success': 'Account banned successfully!'})
@decorators.api_view(['POST'])
@decorators.permission_classes([TwitterAccountBanPermission])
def unban(request):
"""Unbans a twitter account from using the AoA tool."""
usernames = json.loads(request.body).get('usernames')
if not usernames:
raise GenericAPIException(status.HTTP_400_BAD_REQUEST,
'Usernames not provided.')
accounts = TwitterAccount.objects.filter(username__in=usernames)
for account in accounts:
if account and account.banned:
account.banned = False
account.save()
message = {'success': '{0} users unbanned successfully.'
.format(len(accounts))}
return Response(message)
@decorators.api_view(['POST'])
@decorators.permission_classes([TwitterAccountIgnorePermission])
def ignore(request):
"""Ignores a twitter account from showing up in the AoA tool."""
username = json.loads(request.body).get('username')
if not username:
raise GenericAPIException(status.HTTP_400_BAD_REQUEST,
'Username not provided.')
username = username[1:] if username.startswith('@') else username
account, created = TwitterAccount.objects.get_or_create(
username=username,
defaults={'ignored': True}
)
if not created and account.ignored:
raise GenericAPIException(
status.HTTP_409_CONFLICT,
'This account is already in the ignore list!'
)
else:
account.ignored = True
account.save()
return Response({'success': 'Account is now being ignored!'})
@decorators.api_view(['POST'])
@decorators.permission_classes([TwitterAccountIgnorePermission])
def unignore(request):
"""Unignores a twitter account from showing up in the AoA tool."""
usernames = json.loads(request.body).get('usernames')
if not usernames:
raise GenericAPIException(status.HTTP_400_BAD_REQUEST,
'Usernames not provided.')
accounts = TwitterAccount.objects.filter(username__in=usernames)
for account in accounts:
if account and account.ignored:
account.ignored = False
account.save()
message = {'success': '{0} users unignored successfully.'
.format(len(accounts))}
return Response(message)
| bsd-3-clause |
xxsergzzxx/python-for-android | python3-alpha/python3-src/Lib/sre_compile.py | 48 | 16292 | #
# Secret Labs' Regular Expression Engine
#
# convert template to internal format
#
# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
import _sre, sys
import sre_parse
from sre_constants import *
assert _sre.MAGIC == MAGIC, "SRE module mismatch"
if _sre.CODESIZE == 2:
MAXCODE = 65535
else:
MAXCODE = 0xFFFFFFFF
def _identityfunction(x):
return x
_LITERAL_CODES = set([LITERAL, NOT_LITERAL])
_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT])
_SUCCESS_CODES = set([SUCCESS, FAILURE])
_ASSERT_CODES = set([ASSERT, ASSERT_NOT])
def _compile(code, pattern, flags):
# internal: compile a (sub)pattern
emit = code.append
_len = len
LITERAL_CODES = _LITERAL_CODES
REPEATING_CODES = _REPEATING_CODES
SUCCESS_CODES = _SUCCESS_CODES
ASSERT_CODES = _ASSERT_CODES
for op, av in pattern:
if op in LITERAL_CODES:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
emit(_sre.getlower(av, flags))
else:
emit(OPCODES[op])
emit(av)
elif op is IN:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
def fixup(literal, flags=flags):
return _sre.getlower(literal, flags)
else:
emit(OPCODES[op])
fixup = _identityfunction
skip = _len(code); emit(0)
_compile_charset(av, flags, code, fixup)
code[skip] = _len(code) - skip
elif op is ANY:
if flags & SRE_FLAG_DOTALL:
emit(OPCODES[ANY_ALL])
else:
emit(OPCODES[ANY])
elif op in REPEATING_CODES:
if flags & SRE_FLAG_TEMPLATE:
raise error("internal: unsupported template operator")
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif _simple(av) and op is not REPEAT:
if op is MAX_REPEAT:
emit(OPCODES[REPEAT_ONE])
else:
emit(OPCODES[MIN_REPEAT_ONE])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
else:
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
code[skip] = _len(code) - skip
if op is MAX_REPEAT:
emit(OPCODES[MAX_UNTIL])
else:
emit(OPCODES[MIN_UNTIL])
elif op is SUBPATTERN:
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2)
# _compile_info(code, av[1], flags)
_compile(code, av[1], flags)
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2+1)
elif op in SUCCESS_CODES:
emit(OPCODES[op])
elif op in ASSERT_CODES:
emit(OPCODES[op])
skip = _len(code); emit(0)
if av[0] >= 0:
emit(0) # look ahead
else:
lo, hi = av[1].getwidth()
if lo != hi:
raise error("look-behind requires fixed-width pattern")
emit(lo) # look behind
_compile(code, av[1], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is CALL:
emit(OPCODES[op])
skip = _len(code); emit(0)
_compile(code, av, flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is AT:
emit(OPCODES[op])
if flags & SRE_FLAG_MULTILINE:
av = AT_MULTILINE.get(av, av)
if flags & SRE_FLAG_LOCALE:
av = AT_LOCALE.get(av, av)
elif flags & SRE_FLAG_UNICODE:
av = AT_UNICODE.get(av, av)
emit(ATCODES[av])
elif op is BRANCH:
emit(OPCODES[op])
tail = []
tailappend = tail.append
for av in av[1]:
skip = _len(code); emit(0)
# _compile_info(code, av, flags)
_compile(code, av, flags)
emit(OPCODES[JUMP])
tailappend(_len(code)); emit(0)
code[skip] = _len(code) - skip
emit(0) # end of branch
for tail in tail:
code[tail] = _len(code) - tail
elif op is CATEGORY:
emit(OPCODES[op])
if flags & SRE_FLAG_LOCALE:
av = CH_LOCALE[av]
elif flags & SRE_FLAG_UNICODE:
av = CH_UNICODE[av]
emit(CHCODES[av])
elif op is GROUPREF:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
else:
emit(OPCODES[op])
emit(av-1)
elif op is GROUPREF_EXISTS:
emit(OPCODES[op])
emit(av[0]-1)
skipyes = _len(code); emit(0)
_compile(code, av[1], flags)
if av[2]:
emit(OPCODES[JUMP])
skipno = _len(code); emit(0)
code[skipyes] = _len(code) - skipyes + 1
_compile(code, av[2], flags)
code[skipno] = _len(code) - skipno
else:
code[skipyes] = _len(code) - skipyes + 1
else:
raise ValueError("unsupported operand type", op)
def _compile_charset(charset, flags, code, fixup=None):
# compile charset subprogram
emit = code.append
if fixup is None:
fixup = _identityfunction
for op, av in _optimize_charset(charset, fixup):
emit(OPCODES[op])
if op is NEGATE:
pass
elif op is LITERAL:
emit(fixup(av))
elif op is RANGE:
emit(fixup(av[0]))
emit(fixup(av[1]))
elif op is CHARSET:
code.extend(av)
elif op is BIGCHARSET:
code.extend(av)
elif op is CATEGORY:
if flags & SRE_FLAG_LOCALE:
emit(CHCODES[CH_LOCALE[av]])
elif flags & SRE_FLAG_UNICODE:
emit(CHCODES[CH_UNICODE[av]])
else:
emit(CHCODES[av])
else:
raise error("internal: unsupported set operator")
emit(OPCODES[FAILURE])
def _optimize_charset(charset, fixup):
# internal: optimize character set
out = []
outappend = out.append
charmap = [0]*256
try:
for op, av in charset:
if op is NEGATE:
outappend((op, av))
elif op is LITERAL:
charmap[fixup(av)] = 1
elif op is RANGE:
for i in range(fixup(av[0]), fixup(av[1])+1):
charmap[i] = 1
elif op is CATEGORY:
# XXX: could append to charmap tail
return charset # cannot compress
except IndexError:
# character set contains unicode characters
return _optimize_unicode(charset, fixup)
# compress character map
i = p = n = 0
runs = []
runsappend = runs.append
for c in charmap:
if c:
if n == 0:
p = i
n = n + 1
elif n:
runsappend((p, n))
n = 0
i = i + 1
if n:
runsappend((p, n))
if len(runs) <= 2:
# use literal/range
for p, n in runs:
if n == 1:
outappend((LITERAL, p))
else:
outappend((RANGE, (p, p+n-1)))
if len(out) < len(charset):
return out
else:
# use bitmap
data = _mk_bitmap(charmap)
outappend((CHARSET, data))
return out
return charset
def _mk_bitmap(bits):
data = []
dataappend = data.append
if _sre.CODESIZE == 2:
start = (1, 0)
else:
start = (1, 0)
m, v = start
for c in bits:
if c:
v = v + m
m = m + m
if m > MAXCODE:
dataappend(v)
m, v = start
return data
# To represent a big charset, first a bitmap of all characters in the
# set is constructed. Then, this bitmap is sliced into chunks of 256
# characters, duplicate chunks are eliminated, and each chunk is
# given a number. In the compiled expression, the charset is
# represented by a 16-bit word sequence, consisting of one word for
# the number of different chunks, a sequence of 256 bytes (128 words)
# of chunk numbers indexed by their original chunk position, and a
# sequence of chunks (16 words each).
# Compression is normally good: in a typical charset, large ranges of
# Unicode will be either completely excluded (e.g. if only cyrillic
# letters are to be matched), or completely included (e.g. if large
# subranges of Kanji match). These ranges will be represented by
# chunks of all one-bits or all zero-bits.
# Matching can be also done efficiently: the more significant byte of
# the Unicode character is an index into the chunk number, and the
# less significant byte is a bit index in the chunk (just like the
# CHARSET matching).
# In UCS-4 mode, the BIGCHARSET opcode still supports only subsets
# of the basic multilingual plane; an efficient representation
# for all of UTF-16 has not yet been developed. This means,
# in particular, that negated charsets cannot be represented as
# bigcharsets.
def _optimize_unicode(charset, fixup):
try:
import array
except ImportError:
return charset
charmap = [0]*65536
negate = 0
try:
for op, av in charset:
if op is NEGATE:
negate = 1
elif op is LITERAL:
charmap[fixup(av)] = 1
elif op is RANGE:
for i in range(fixup(av[0]), fixup(av[1])+1):
charmap[i] = 1
elif op is CATEGORY:
# XXX: could expand category
return charset # cannot compress
except IndexError:
# non-BMP characters
return charset
if negate:
if sys.maxunicode != 65535:
# XXX: negation does not work with big charsets
return charset
for i in range(65536):
charmap[i] = not charmap[i]
comps = {}
mapping = [0]*256
block = 0
data = []
for i in range(256):
chunk = tuple(charmap[i*256:(i+1)*256])
new = comps.setdefault(chunk, block)
mapping[i] = new
if new == block:
block = block + 1
data = data + _mk_bitmap(chunk)
header = [block]
if _sre.CODESIZE == 2:
code = 'H'
else:
code = 'I'
# Convert block indices to byte array of 256 bytes
mapping = array.array('b', mapping).tobytes()
# Convert byte array to word array
mapping = array.array(code, mapping)
assert mapping.itemsize == _sre.CODESIZE
assert len(mapping) * mapping.itemsize == 256
header = header + mapping.tolist()
data[0:0] = header
return [(BIGCHARSET, data)]
def _simple(av):
# check if av is a "simple" operator
lo, hi = av[2].getwidth()
if lo == 0 and hi == MAXREPEAT:
raise error("nothing to repeat")
return lo == hi == 1 and av[2][0][0] != SUBPATTERN
def _compile_info(code, pattern, flags):
# internal: compile an info block. in the current version,
# this contains min/max pattern width, and an optional literal
# prefix or a character map
lo, hi = pattern.getwidth()
if lo == 0:
return # not worth it
# look for a literal prefix
prefix = []
prefixappend = prefix.append
prefix_skip = 0
charset = [] # not used
charsetappend = charset.append
if not (flags & SRE_FLAG_IGNORECASE):
# look for literal prefix
for op, av in pattern.data:
if op is LITERAL:
if len(prefix) == prefix_skip:
prefix_skip = prefix_skip + 1
prefixappend(av)
elif op is SUBPATTERN and len(av[1]) == 1:
op, av = av[1][0]
if op is LITERAL:
prefixappend(av)
else:
break
else:
break
# if no prefix, look for charset prefix
if not prefix and pattern.data:
op, av = pattern.data[0]
if op is SUBPATTERN and av[1]:
op, av = av[1][0]
if op is LITERAL:
charsetappend((op, av))
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is IN:
charset = av
## if prefix:
## print "*** PREFIX", prefix, prefix_skip
## if charset:
## print "*** CHARSET", charset
# add an info block
emit = code.append
emit(OPCODES[INFO])
skip = len(code); emit(0)
# literal flag
mask = 0
if prefix:
mask = SRE_INFO_PREFIX
if len(prefix) == prefix_skip == len(pattern.data):
mask = mask + SRE_INFO_LITERAL
elif charset:
mask = mask + SRE_INFO_CHARSET
emit(mask)
# pattern length
if lo < MAXCODE:
emit(lo)
else:
emit(MAXCODE)
prefix = prefix[:MAXCODE]
if hi < MAXCODE:
emit(hi)
else:
emit(0)
# add literal prefix
if prefix:
emit(len(prefix)) # length
emit(prefix_skip) # skip
code.extend(prefix)
# generate overlap table
table = [-1] + ([0]*len(prefix))
for i in range(len(prefix)):
table[i+1] = table[i]+1
while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]:
table[i+1] = table[table[i+1]-1]+1
code.extend(table[1:]) # don't store first entry
elif charset:
_compile_charset(charset, flags, code)
code[skip] = len(code) - skip
def isstring(obj):
return isinstance(obj, (str, bytes))
def _code(p, flags):
flags = p.pattern.flags | flags
code = []
# compile info block
_compile_info(code, p, flags)
# compile the pattern
_compile(code, p.data, flags)
code.append(OPCODES[SUCCESS])
return code
def compile(p, flags=0):
# internal: convert pattern list to internal format
if isstring(p):
pattern = p
p = sre_parse.parse(p, flags)
else:
pattern = None
code = _code(p, flags)
# print code
# XXX: <fl> get rid of this limitation!
if p.pattern.groups > 100:
raise AssertionError(
"sorry, but this version only supports 100 named groups"
)
# map in either direction
groupindex = p.pattern.groupdict
indexgroup = [None] * p.pattern.groups
for k, i in groupindex.items():
indexgroup[i] = k
return _sre.compile(
pattern, flags | p.pattern.flags, code,
p.pattern.groups-1,
groupindex, indexgroup
)
| apache-2.0 |
phihag/youtube-dl | youtube_dl/extractor/foxsports.py | 31 | 1391 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
smuggle_url,
update_url_query,
)
class FoxSportsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?foxsports\.com/(?:[^/]+/)*(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.foxsports.com/tennessee/video/432609859715',
'md5': 'b49050e955bebe32c301972e4012ac17',
'info_dict': {
'id': 'bwduI3X_TgUB',
'ext': 'mp4',
'title': 'Courtney Lee on going up 2-0 in series vs. Blazers',
'description': 'Courtney Lee talks about Memphis being focused.',
'upload_date': '20150423',
'timestamp': 1429761109,
'uploader': 'NEWA-FNG-FOXSPORTS',
},
'add_ie': ['ThePlatform'],
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
config = self._parse_json(
self._html_search_regex(
r"""class="[^"]*(?:fs-player|platformPlayer-wrapper)[^"]*".+?data-player-config='([^']+)'""",
webpage, 'data player config'),
video_id)
return self.url_result(smuggle_url(update_url_query(
config['releaseURL'], {
'mbr': 'true',
'switch': 'http',
}), {'force_smil_url': True}))
| unlicense |
leafclick/intellij-community | plugins/hg4idea/testData/bin/hgext/extdiff.py | 93 | 12642 | # extdiff.py - external diff program support for mercurial
#
# Copyright 2006 Vadim Gelfer <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to allow external programs to compare revisions
The extdiff Mercurial extension allows you to use external programs
to compare revisions, or revision with working directory. The external
diff programs are called with a configurable set of options and two
non-option arguments: paths to directories containing snapshots of
files to compare.
The extdiff extension also allows you to configure new diff commands, so
you do not need to type :hg:`extdiff -p kdiff3` always. ::
[extdiff]
# add new command that runs GNU diff(1) in 'context diff' mode
cdiff = gdiff -Nprc5
## or the old way:
#cmd.cdiff = gdiff
#opts.cdiff = -Nprc5
# add new command called vdiff, runs kdiff3
vdiff = kdiff3
# add new command called meld, runs meld (no need to name twice)
meld =
# add new command called vimdiff, runs gvimdiff with DirDiff plugin
# (see http://www.vim.org/scripts/script.php?script_id=102) Non
# English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
# your .vimrc
vimdiff = gvim -f "+next" \\
"+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
Tool arguments can include variables that are expanded at runtime::
$parent1, $plabel1 - filename, descriptive label of first parent
$child, $clabel - filename, descriptive label of child revision
$parent2, $plabel2 - filename, descriptive label of second parent
$root - repository root
$parent is an alias for $parent1.
The extdiff extension will look in your [diff-tools] and [merge-tools]
sections for diff tool arguments, when none are specified in [extdiff].
::
[extdiff]
kdiff3 =
[diff-tools]
kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
You can use -I/-X and list of file or directory names like normal
:hg:`diff` command. The extdiff extension makes snapshots of only
needed files, so running the external diff program will actually be
pretty fast (at least faster than having to compare the entire tree).
'''
from mercurial.i18n import _
from mercurial.node import short, nullid
from mercurial import scmutil, scmutil, util, commands, encoding
import os, shlex, shutil, tempfile, re
testedwith = 'internal'
def snapshot(ui, repo, files, node, tmproot):
'''snapshot files as of some revision
if not using snapshot, -I/-X does not work and recursive diff
in tools like kdiff3 and meld displays too many files.'''
dirname = os.path.basename(repo.root)
if dirname == "":
dirname = "root"
if node is not None:
dirname = '%s.%s' % (dirname, short(node))
base = os.path.join(tmproot, dirname)
os.mkdir(base)
if node is not None:
ui.note(_('making snapshot of %d files from rev %s\n') %
(len(files), short(node)))
else:
ui.note(_('making snapshot of %d files from working directory\n') %
(len(files)))
wopener = scmutil.opener(base)
fns_and_mtime = []
ctx = repo[node]
for fn in files:
wfn = util.pconvert(fn)
if wfn not in ctx:
# File doesn't exist; could be a bogus modify
continue
ui.note(' %s\n' % wfn)
dest = os.path.join(base, wfn)
fctx = ctx[wfn]
data = repo.wwritedata(wfn, fctx.data())
if 'l' in fctx.flags():
wopener.symlink(data, wfn)
else:
wopener.write(wfn, data)
if 'x' in fctx.flags():
util.setflags(dest, False, True)
if node is None:
fns_and_mtime.append((dest, repo.wjoin(fn),
os.lstat(dest).st_mtime))
return dirname, fns_and_mtime
def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
'''Do the actual diff:
- copy to a temp structure if diffing 2 internal revisions
- copy to a temp structure if diffing working revision with
another one and more than 1 file is changed
- just invoke the diff for a single file in the working dir
'''
revs = opts.get('rev')
change = opts.get('change')
args = ' '.join(diffopts)
do3way = '$parent2' in args
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = scmutil.revsingle(repo, change, None).node()
node1a, node1b = repo.changelog.parents(node2)
else:
node1a, node2 = scmutil.revpair(repo, revs)
if not revs:
node1b = repo.dirstate.p2()
else:
node1b = nullid
# Disable 3-way merge if there is only one parent
if do3way:
if node1b == nullid:
do3way = False
matcher = scmutil.match(repo[node2], pats, opts)
mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3])
if do3way:
mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3])
else:
mod_b, add_b, rem_b = set(), set(), set()
modadd = mod_a | add_a | mod_b | add_b
common = modadd | rem_a | rem_b
if not common:
return 0
tmproot = tempfile.mkdtemp(prefix='extdiff.')
try:
# Always make a copy of node1a (and node1b, if applicable)
dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0]
rev1a = '@%d' % repo[node1a].rev()
if do3way:
dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0]
rev1b = '@%d' % repo[node1b].rev()
else:
dir1b = None
rev1b = ''
fns_and_mtime = []
# If node2 in not the wc or there is >1 change, copy it
dir2root = ''
rev2 = ''
if node2:
dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0]
rev2 = '@%d' % repo[node2].rev()
elif len(common) > 1:
#we only actually need to get the files to copy back to
#the working dir in this case (because the other cases
#are: diffing 2 revisions or single file -- in which case
#the file is already directly passed to the diff tool).
dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot)
else:
# This lets the diff tool open the changed file directly
dir2 = ''
dir2root = repo.root
label1a = rev1a
label1b = rev1b
label2 = rev2
# If only one change, diff the files instead of the directories
# Handle bogus modifies correctly by checking if the files exist
if len(common) == 1:
common_file = util.localpath(common.pop())
dir1a = os.path.join(tmproot, dir1a, common_file)
label1a = common_file + rev1a
if not os.path.isfile(dir1a):
dir1a = os.devnull
if do3way:
dir1b = os.path.join(tmproot, dir1b, common_file)
label1b = common_file + rev1b
if not os.path.isfile(dir1b):
dir1b = os.devnull
dir2 = os.path.join(dir2root, dir2, common_file)
label2 = common_file + rev2
# Function to quote file/dir names in the argument string.
# When not operating in 3-way mode, an empty string is
# returned for parent2
replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b,
plabel1=label1a, plabel2=label1b,
clabel=label2, child=dir2,
root=repo.root)
def quote(match):
key = match.group()[1:]
if not do3way and key == 'parent2':
return ''
return util.shellquote(replace[key])
# Match parent2 first, so 'parent1?' will match both parent1 and parent
regex = '\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)'
if not do3way and not re.search(regex, args):
args += ' $parent1 $child'
args = re.sub(regex, quote, args)
cmdline = util.shellquote(diffcmd) + ' ' + args
ui.debug('running %r in %s\n' % (cmdline, tmproot))
util.system(cmdline, cwd=tmproot, out=ui.fout)
for copy_fn, working_fn, mtime in fns_and_mtime:
if os.lstat(copy_fn).st_mtime != mtime:
ui.debug('file changed while diffing. '
'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
util.copyfile(copy_fn, working_fn)
return 1
finally:
ui.note(_('cleaning up temp directory\n'))
shutil.rmtree(tmproot)
def extdiff(ui, repo, *pats, **opts):
'''use external program to diff repository (or selected files)
Show differences between revisions for the specified files, using
an external program. The default program used is diff, with
default options "-Npru".
To select a different program, use the -p/--program option. The
program will be passed the names of two directories to compare. To
pass additional options to the program, use -o/--option. These
will be passed before the names of the directories to compare.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.'''
program = opts.get('program')
option = opts.get('option')
if not program:
program = 'diff'
option = option or ['-Npru']
return dodiff(ui, repo, program, option, pats, opts)
cmdtable = {
"extdiff":
(extdiff,
[('p', 'program', '',
_('comparison program to run'), _('CMD')),
('o', 'option', [],
_('pass option to comparison program'), _('OPT')),
('r', 'rev', [],
_('revision'), _('REV')),
('c', 'change', '',
_('change made by revision'), _('REV')),
] + commands.walkopts,
_('hg extdiff [OPT]... [FILE]...')),
}
def uisetup(ui):
for cmd, path in ui.configitems('extdiff'):
if cmd.startswith('cmd.'):
cmd = cmd[4:]
if not path:
path = cmd
diffopts = ui.config('extdiff', 'opts.' + cmd, '')
diffopts = diffopts and [diffopts] or []
elif cmd.startswith('opts.'):
continue
else:
# command = path opts
if path:
diffopts = shlex.split(path)
path = diffopts.pop(0)
else:
path, diffopts = cmd, []
# look for diff arguments in [diff-tools] then [merge-tools]
if diffopts == []:
args = ui.config('diff-tools', cmd+'.diffargs') or \
ui.config('merge-tools', cmd+'.diffargs')
if args:
diffopts = shlex.split(args)
def save(cmd, path, diffopts):
'''use closure to save diff command to use'''
def mydiff(ui, repo, *pats, **opts):
return dodiff(ui, repo, path, diffopts + opts['option'],
pats, opts)
doc = _('''\
use %(path)s to diff repository (or selected files)
Show differences between revisions for the specified files, using
the %(path)s program.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.\
''') % dict(path=util.uirepr(path))
# We must translate the docstring right away since it is
# used as a format string. The string will unfortunately
# be translated again in commands.helpcmd and this will
# fail when the docstring contains non-ASCII characters.
# Decoding the string to a Unicode string here (using the
# right encoding) prevents that.
mydiff.__doc__ = doc.decode(encoding.encoding)
return mydiff
cmdtable[cmd] = (save(cmd, path, diffopts),
cmdtable['extdiff'][1][1:],
_('hg %s [OPTION]... [FILE]...') % cmd)
commands.inferrepo += " extdiff"
| apache-2.0 |
r351574nc3/kualigan-courseware | modules/review/stats.py | 7 | 5775 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for displaying peer review analytics."""
__author__ = 'Sean Lip ([email protected])'
import os
from common import safe_dom
from controllers.utils import ApplicationHandler
from controllers.utils import HUMAN_READABLE_TIME_FORMAT
import jinja2
from models import courses
from models import jobs
from models import transforms
from models import utils
from modules.review import peer
class ReviewStatsAggregator(object):
"""Aggregates peer review statistics."""
def __init__(self):
# This dict records, for each unit, how many submissions have a given
# number of completed reviews. The format of each key-value pair is
# unit_id: {num_reviews: count_of_submissions}
self.counts_by_completed_reviews = {}
def visit(self, review_summary):
unit_id = review_summary.unit_id
if unit_id not in self.counts_by_completed_reviews:
self.counts_by_completed_reviews[unit_id] = {}
count = review_summary.completed_count
if count not in self.counts_by_completed_reviews[unit_id]:
self.counts_by_completed_reviews[unit_id][count] = 1
else:
self.counts_by_completed_reviews[unit_id][count] += 1
class ComputeReviewStats(jobs.DurableJob):
"""A job for computing peer review statistics."""
def run(self):
"""Computes peer review statistics."""
stats = ReviewStatsAggregator()
mapper = utils.QueryMapper(
peer.ReviewSummary.all(), batch_size=500, report_every=1000)
mapper.run(stats.visit)
completed_arrays_by_unit = {}
for unit_id in stats.counts_by_completed_reviews:
max_completed_reviews = max(
stats.counts_by_completed_reviews[unit_id].keys())
completed_reviews_array = []
for i in range(max_completed_reviews + 1):
if i in stats.counts_by_completed_reviews[unit_id]:
completed_reviews_array.append(
stats.counts_by_completed_reviews[unit_id][i])
else:
completed_reviews_array.append(0)
completed_arrays_by_unit[unit_id] = completed_reviews_array
return {'counts_by_completed_reviews': completed_arrays_by_unit}
class PeerReviewStatsHandler(ApplicationHandler):
"""Shows peer review analytics on the dashboard."""
# The key used in the statistics dict that generates the dashboard page.
# Must be unique.
name = 'peer_review_stats'
# The class that generates the data to be displayed.
stats_computer = ComputeReviewStats
def get_markup(self, job):
"""Returns Jinja markup for peer review statistics."""
errors = []
stats_calculated = False
update_message = safe_dom.Text('')
course = courses.Course(self)
serialized_units = []
if not job:
update_message = safe_dom.Text(
'Peer review statistics have not been calculated yet.')
else:
if job.status_code == jobs.STATUS_CODE_COMPLETED:
stats = transforms.loads(job.output)
stats_calculated = True
for unit in course.get_peer_reviewed_units():
if unit.unit_id in stats['counts_by_completed_reviews']:
unit_stats = (
stats['counts_by_completed_reviews'][unit.unit_id])
serialized_units.append({
'stats': unit_stats,
'title': unit.title,
'unit_id': unit.unit_id,
})
update_message = safe_dom.Text("""
Peer review statistics were last updated at
%s in about %s second(s).""" % (
job.updated_on.strftime(HUMAN_READABLE_TIME_FORMAT),
job.execution_time_sec))
elif job.status_code == jobs.STATUS_CODE_FAILED:
update_message = safe_dom.NodeList().append(
safe_dom.Text("""
There was an error updating peer review statistics.
Here is the message:""")
).append(
safe_dom.Element('br')
).append(
safe_dom.Element('blockquote').add_child(
safe_dom.Element('pre').add_text('\n%s' % job.output)))
else:
update_message = safe_dom.Text("""
Peer review statistics update started at %s and is running
now. Please come back shortly.""" % job.updated_on.strftime(
HUMAN_READABLE_TIME_FORMAT))
return jinja2.utils.Markup(self.get_template(
'stats.html', [os.path.dirname(__file__)]
).render({
'errors': errors,
'serialized_units': serialized_units,
'serialized_units_json': transforms.dumps(serialized_units),
'stats_calculated': stats_calculated,
'update_message': update_message,
}, autoescape=True))
| apache-2.0 |
ZachMassia/platformio | platformio/pkgmanager.py | 1 | 5564 | # Copyright 2014-2016 Ivan Kravets <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import makedirs, remove
from os.path import basename, isdir, isfile, join
from shutil import rmtree
from time import time
import click
import requests
from platformio import exception, telemetry, util
from platformio.app import get_state_item, set_state_item
from platformio.downloader import FileDownloader
from platformio.unpacker import FileUnpacker
class PackageManager(object):
def __init__(self):
self._package_dir = join(util.get_home_dir(), "packages")
if not isdir(self._package_dir):
makedirs(self._package_dir)
assert isdir(self._package_dir)
@classmethod
@util.memoized
def get_manifest(cls):
return util.get_api_result("/packages/manifest")
@staticmethod
def download(url, dest_dir, sha1=None):
fd = FileDownloader(url, dest_dir)
fd.start()
fd.verify(sha1)
return fd.get_filepath()
@staticmethod
def unpack(pkgpath, dest_dir):
fu = FileUnpacker(pkgpath, dest_dir)
return fu.start()
@staticmethod
def get_installed():
return get_state_item("installed_packages", {})
def get_outdated(self):
outdated = []
for name, data in self.get_installed().items():
if data['version'] != self.get_info(name)['version']:
outdated.append(name)
return outdated
def is_installed(self, name):
return name in self.get_installed()
def get_info(self, name, version=None):
manifest = self.get_manifest()
if name not in manifest:
raise exception.UnknownPackage(name)
# check system platform
systype = util.get_systype()
builds = ([b for b in manifest[name] if b['system'] == "all" or systype
in b['system']])
if not builds:
raise exception.NonSystemPackage(name, systype)
if version:
for b in builds:
if b['version'] == version:
return b
raise exception.InvalidPackageVersion(name, version)
else:
return sorted(builds, key=lambda s: s['version'])[-1]
def install(self, name):
click.echo("Installing %s package:" % click.style(name, fg="cyan"))
if self.is_installed(name):
click.secho("Already installed", fg="yellow")
return False
info = self.get_info(name)
pkg_dir = join(self._package_dir, name)
if not isdir(pkg_dir):
makedirs(pkg_dir)
dlpath = None
try:
dlpath = self.download(info['url'], pkg_dir, info['sha1'])
except (requests.exceptions.ConnectionError,
requests.exceptions.ChunkedEncodingError,
exception.FDUnrecognizedStatusCode, StopIteration):
if info['url'].startswith("http://sourceforge.net"):
dlpath = self.download(
"http://dl.platformio.org/packages/%s" %
basename(info['url']), pkg_dir, info['sha1'])
assert isfile(dlpath)
if self.unpack(dlpath, pkg_dir):
self._register(name, info['version'])
# remove archive
remove(dlpath)
telemetry.on_event(
category="PackageManager", action="Install", label=name)
def uninstall(self, name):
click.echo("Uninstalling %s package: \t" %
click.style(name, fg="cyan"), nl=False)
if not self.is_installed(name):
click.secho("Not installed", fg="yellow")
return False
rmtree(join(self._package_dir, name))
self._unregister(name)
click.echo("[%s]" % click.style("OK", fg="green"))
# report usage
telemetry.on_event(
category="PackageManager", action="Uninstall", label=name)
def update(self, name):
click.echo("Updating %s package:" % click.style(name, fg="yellow"))
installed = self.get_installed()
current_version = installed[name]['version']
latest_version = self.get_info(name)['version']
click.echo("Versions: Current=%d, Latest=%d \t " %
(current_version, latest_version), nl=False)
if current_version == latest_version:
click.echo("[%s]" % (click.style("Up-to-date", fg="green")))
return True
else:
click.echo("[%s]" % (click.style("Out-of-date", fg="red")))
self.uninstall(name)
self.install(name)
telemetry.on_event(
category="PackageManager", action="Update", label=name)
def _register(self, name, version):
data = self.get_installed()
data[name] = {
"version": version,
"time": int(time())
}
set_state_item("installed_packages", data)
def _unregister(self, name):
data = self.get_installed()
del data[name]
set_state_item("installed_packages", data)
| apache-2.0 |
chjw8016/GreenOdoo7-haibao | openerp/addons/auth_oauth/controllers/main.py | 20 | 4484 | import functools
import logging
import simplejson
import werkzeug.utils
from werkzeug.exceptions import BadRequest
import openerp
from openerp import SUPERUSER_ID
import openerp.addons.web.http as oeweb
from openerp.addons.web.controllers.main import db_monodb, set_cookie_and_redirect, login_and_redirect
from openerp.modules.registry import RegistryManager
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# helpers
#----------------------------------------------------------
def fragment_to_query_string(func):
@functools.wraps(func)
def wrapper(self, req, **kw):
if not kw:
return """<html><head><script>
var l = window.location;
var q = l.hash.substring(1);
var r = '/' + l.search;
if(q.length !== 0) {
var s = l.search ? (l.search === '?' ? '' : '&') : '?';
r = l.pathname + l.search + s + q;
}
window.location = r;
</script></head><body></body></html>"""
return func(self, req, **kw)
return wrapper
#----------------------------------------------------------
# Controller
#----------------------------------------------------------
class OAuthController(oeweb.Controller):
_cp_path = '/auth_oauth'
@oeweb.jsonrequest
def list_providers(self, req, dbname):
try:
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
providers = registry.get('auth.oauth.provider')
l = providers.read(cr, SUPERUSER_ID, providers.search(cr, SUPERUSER_ID, [('enabled', '=', True)]))
except Exception:
l = []
return l
@oeweb.httprequest
@fragment_to_query_string
def signin(self, req, **kw):
state = simplejson.loads(kw['state'])
dbname = state['d']
provider = state['p']
context = state.get('c', {})
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
try:
u = registry.get('res.users')
credentials = u.auth_oauth(cr, SUPERUSER_ID, provider, kw, context=context)
cr.commit()
action = state.get('a')
menu = state.get('m')
url = '/'
if action:
url = '/#action=%s' % action
elif menu:
url = '/#menu_id=%s' % menu
return login_and_redirect(req, *credentials, redirect_url=url)
except AttributeError:
# auth_signup is not installed
_logger.error("auth_signup not installed on database %s: oauth sign up cancelled." % (dbname,))
url = "/#action=login&oauth_error=1"
except openerp.exceptions.AccessDenied:
# oauth credentials not valid, user could be on a temporary session
_logger.info('OAuth2: access denied, redirect to main page in case a valid session exists, without setting cookies')
url = "/#action=login&oauth_error=3"
redirect = werkzeug.utils.redirect(url, 303)
redirect.autocorrect_location_header = False
return redirect
except Exception, e:
# signup error
_logger.exception("OAuth2: %s" % str(e))
url = "/#action=login&oauth_error=2"
return set_cookie_and_redirect(req, url)
@oeweb.httprequest
def oea(self, req, **kw):
"""login user via OpenERP Account provider"""
dbname = kw.pop('db', None)
if not dbname:
dbname = db_monodb(req)
if not dbname:
return BadRequest()
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
IMD = registry['ir.model.data']
try:
model, provider_id = IMD.get_object_reference(cr, SUPERUSER_ID, 'auth_oauth', 'provider_openerp')
except ValueError:
return set_cookie_and_redirect(req, '/?db=%s' % dbname)
assert model == 'auth.oauth.provider'
state = {
'd': dbname,
'p': provider_id,
'c': {'no_user_creation': True},
}
kw['state'] = simplejson.dumps(state)
return self.signin(req, **kw)
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
| mit |
vlachoudis/sl4a | python/src/Mac/Modules/res/resscan.py | 34 | 2701 | # Scan Resources.h header file, generate resgen.py and Resources.py files.
# Then run ressupport to generate Resmodule.c.
# (Should learn how to tell the compiler to compile it as well.)
import sys
import MacOS
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner
def main():
input = "Resources.h"
output = "resgen.py"
defsoutput = TOOLBOXDIR + "Resources.py"
scanner = ResourcesScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "=== Done scanning and generating, now doing 'import ressupport' ==="
import ressupport
print "=== Done 'import ressupport'. It's up to you to compile Resmodule.c ==="
class ResourcesScanner(Scanner):
def destination(self, type, name, arglist):
classname = "ResFunction"
listname = "functions"
if arglist:
t, n, m = arglist[0]
if t == "Handle" and m == "InMode":
classname = "ResMethod"
listname = "resmethods"
return classname, listname
def makeblacklistnames(self):
return [
"ReadPartialResource",
"WritePartialResource",
"TempInsertROMMap",
## "RmveResource", # RemoveResource
## "SizeResource", # GetResourceSizeOnDisk
## "MaxSizeRsrc", # GetMaxResourceSize
# OS8 only
'RGetResource',
'OpenResFile',
'CreateResFile',
'RsrcZoneInit',
'InitResources',
'RsrcMapEntry',
]
def makeblacklisttypes(self):
return [
]
def makerepairinstructions(self):
return [
([("Str255", "*", "InMode")],
[("*", "*", "OutMode")]),
([("void_ptr", "*", "InMode"), ("long", "*", "InMode")],
[("InBuffer", "*", "*")]),
([("void", "*", "OutMode"), ("long", "*", "InMode")],
[("InOutBuffer", "*", "*")]),
([("void", "*", "OutMode"), ("long", "*", "InMode"),
("long", "*", "OutMode")],
[("OutBuffer", "*", "InOutMode")]),
([("SInt8", "*", "*")],
[("SignedByte", "*", "*")]),
([("UniCharCount", "*", "InMode"), ("UniChar_ptr", "*", "InMode")],
[("UnicodeReverseInBuffer", "*", "*")]),
]
if __name__ == "__main__":
main()
| apache-2.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/scipy/signal/lti_conversion.py | 41 | 13971 | """
ltisys -- a collection of functions to convert linear time invariant systems
from one representation to another.
"""
from __future__ import division, print_function, absolute_import
import numpy
import numpy as np
from numpy import (r_, eye, atleast_2d, poly, dot,
asarray, product, zeros, array, outer)
from scipy import linalg
from .filter_design import tf2zpk, zpk2tf, normalize
__all__ = ['tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk',
'cont2discrete']
def tf2ss(num, den):
r"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree. The
denominator needs to be at least as long as the numerator.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
Examples
--------
Convert the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
to the state-space representation:
.. math::
\dot{\textbf{x}}(t) =
\begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
\textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
>>> from scipy.signal import tf2ss
>>> A, B, C, D = tf2ss(num, den)
>>> A
array([[-2., -1.],
[ 1., 0.]])
>>> B
array([[ 1.],
[ 0.]])
>>> C
array([[ 1., 2.]])
>>> D
array([[ 1.]])
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if M > K:
msg = "Improper transfer function. `num` is longer than `den`."
raise ValueError(msg)
if M == 0 or K == 0: # Null system
return (array([], float), array([], float), array([], float),
array([], float))
# pad numerator to have same number of columns has denominator
num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num]
if num.shape[-1] > 0:
D = atleast_2d(num[:, 0])
else:
# We don't assign it an empty array because this system
# is not 'null'. It just doesn't have a non-zero D
# matrix. Thus, it should have a non-zero shape so that
# it can be operated on by functions like 'ss2tf'
D = array([[0]], float)
if K == 1:
D = D.reshape(num.shape)
return (zeros((1, 1)), zeros((1, D.shape[1])),
zeros((D.shape[0], 1)), D)
frow = -array([den[1:]])
A = r_[frow, eye(K - 2, K - 1)]
B = eye(K - 1, 1)
C = num[:, 1:] - outer(num[:, 0], den[1:])
D = D.reshape((C.shape[0], B.shape[1]))
return A, B, C, D
def _none_to_empty_2d(arg):
if arg is None:
return zeros((0, 0))
else:
return arg
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
def _shape_or_none(M):
if M is not None:
return M.shape
else:
return (None,) * 2
def _choice_not_none(*args):
for arg in args:
if arg is not None:
return arg
def _restore(M, shape):
if M.shape == (0, 0):
return zeros(shape)
else:
if M.shape != shape:
raise ValueError("The input arrays have incompatible shapes.")
return M
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are two-dimensional.
If enough information on the system is provided, that is, enough
properly-shaped arrays are passed to the function, the missing ones
are built from this information, ensuring the correct number of
rows and columns. Otherwise a ValueError is raised.
Parameters
----------
A, B, C, D : array_like, optional
State-space matrices. All of them are None (missing) by default.
See `ss2tf` for format.
Returns
-------
A, B, C, D : array
Properly shaped state-space matrices.
Raises
------
ValueError
If not enough information on the system was provided.
"""
A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
MA, NA = _shape_or_none(A)
MB, NB = _shape_or_none(B)
MC, NC = _shape_or_none(C)
MD, ND = _shape_or_none(D)
p = _choice_not_none(MA, MB, NC)
q = _choice_not_none(NB, ND)
r = _choice_not_none(MC, MD)
if p is None or q is None or r is None:
raise ValueError("Not enough information on the system.")
A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
A = _restore(A, (p, p))
B = _restore(B, (p, q))
C = _restore(C, (r, p))
D = _restore(D, (r, q))
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
r"""State-space to transfer function.
A, B, C, D defines a linear state-space system with `p` inputs,
`q` outputs, and `n` state variables.
Parameters
----------
A : array_like
State (or system) matrix of shape ``(n, n)``
B : array_like
Input matrix of shape ``(n, p)``
C : array_like
Output matrix of shape ``(q, n)``
D : array_like
Feedthrough (or feedforward) matrix of shape ``(q, p)``
input : int, optional
For multiple-input systems, the index of the input to use.
Returns
-------
num : 2-D ndarray
Numerator(s) of the resulting transfer function(s). `num` has one row
for each of the system's outputs. Each row is a sequence representation
of the numerator polynomial.
den : 1-D ndarray
Denominator of the resulting transfer function(s). `den` is a sequence
representation of the denominator polynomial.
Examples
--------
Convert the state-space representation:
.. math::
\dot{\textbf{x}}(t) =
\begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
\textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
>>> A = [[-2, -1], [1, 0]]
>>> B = [[1], [0]] # 2-dimensional column vector
>>> C = [[1, 2]] # 2-dimensional row vector
>>> D = 1
to the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> from scipy.signal import ss2tf
>>> ss2tf(A, B, C, D)
(array([[1, 3, 3]]), array([ 1., 2., 1.]))
"""
# transfer function is C (sI - A)**(-1) B + D
# Check consistency and make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make SIMO from possibly MIMO system.
B = B[:, input:input + 1]
D = D[:, input:input + 1]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape, axis=0) == 0) and (product(C.shape, axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape, axis=0) == 0) and (product(A.shape, axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:, 0] + B[:, 0] + C[0, :] + D
num = numpy.zeros((nout, num_states + 1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k, :])
num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
return tf2ss(*zpk2tf(z, p, k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
A, B, C, D defines a linear state-space system with `p` inputs,
`q` outputs, and `n` state variables.
Parameters
----------
A : array_like
State (or system) matrix of shape ``(n, n)``
B : array_like
Input matrix of shape ``(n, p)``
C : array_like
Output matrix of shape ``(q, n)``
D : array_like
Feedthrough (or feedforward) matrix of shape ``(q, p)``
input : int, optional
For multiple-input systems, the index of the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A, B, C, D, input=input))
def cont2discrete(system, dt, method="zoh", alpha=None):
"""
Transform a continuous to a discrete state-space system.
Parameters
----------
system : a tuple describing the system or an instance of `lti`
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
dt : float
The discretization time step.
method : {"gbt", "bilinear", "euler", "backward_diff", "zoh"}, optional
Which method to use:
* gbt: generalized bilinear transformation
* bilinear: Tustin's approximation ("gbt" with alpha=0.5)
* euler: Euler (or forward differencing) method ("gbt" with alpha=0)
* backward_diff: Backwards differencing ("gbt" with alpha=1.0)
* zoh: zero-order hold (default)
alpha : float within [0, 1], optional
The generalized bilinear transformation weighting parameter, which
should only be specified with method="gbt", and is ignored otherwise
Returns
-------
sysd : tuple containing the discrete system
Based on the input type, the output will be of the form
* (num, den, dt) for transfer function input
* (zeros, poles, gain, dt) for zeros-poles-gain input
* (A, B, C, D, dt) for state-space system input
Notes
-----
By default, the routine uses a Zero-Order Hold (zoh) method to perform
the transformation. Alternatively, a generalized bilinear transformation
may be used, which includes the common Tustin's bilinear approximation,
an Euler's method technique, or a backwards differencing technique.
The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear
approximation is based on [2]_ and [3]_.
References
----------
.. [1] http://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models
.. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf
.. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized
bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754,
2009.
(http://www.ece.ualberta.ca/~gfzhang/research/ZCC07_preprint.pdf)
"""
if len(system) == 1:
return system.to_discrete()
if len(system) == 2:
sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method,
alpha=alpha)
return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
elif len(system) == 3:
sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt,
method=method, alpha=alpha)
return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
elif len(system) == 4:
a, b, c, d = system
else:
raise ValueError("First argument must either be a tuple of 2 (tf), "
"3 (zpk), or 4 (ss) arrays.")
if method == 'gbt':
if alpha is None:
raise ValueError("Alpha parameter must be specified for the "
"generalized bilinear transform (gbt) method")
elif alpha < 0 or alpha > 1:
raise ValueError("Alpha parameter must be within the interval "
"[0,1] for the gbt method")
if method == 'gbt':
# This parameter is used repeatedly - compute once here
ima = np.eye(a.shape[0]) - alpha*dt*a
ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a)
bd = linalg.solve(ima, dt*b)
# Similarly solve for the output equation matrices
cd = linalg.solve(ima.transpose(), c.transpose())
cd = cd.transpose()
dd = d + alpha*np.dot(c, bd)
elif method == 'bilinear' or method == 'tustin':
return cont2discrete(system, dt, method="gbt", alpha=0.5)
elif method == 'euler' or method == 'forward_diff':
return cont2discrete(system, dt, method="gbt", alpha=0.0)
elif method == 'backward_diff':
return cont2discrete(system, dt, method="gbt", alpha=1.0)
elif method == 'zoh':
# Build an exponential matrix
em_upper = np.hstack((a, b))
# Need to stack zeros under the a and b matrices
em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])),
np.zeros((b.shape[1], b.shape[1]))))
em = np.vstack((em_upper, em_lower))
ms = linalg.expm(dt * em)
# Dispose of the lower rows
ms = ms[:a.shape[0], :]
ad = ms[:, 0:a.shape[1]]
bd = ms[:, a.shape[1]:]
cd = c
dd = d
else:
raise ValueError("Unknown transformation method '%s'" % method)
return ad, bd, cd, dd, dt
| mit |
Brett55/moto | tests/test_autoscaling/test_elbv2.py | 4 | 4795 | from __future__ import unicode_literals
import boto3
import sure # noqa
from moto import mock_autoscaling, mock_ec2, mock_elbv2
from utils import setup_networking
@mock_elbv2
@mock_autoscaling
def test_attach_detach_target_groups():
mocked_networking = setup_networking()
INSTANCE_COUNT = 2
client = boto3.client('autoscaling', region_name='us-east-1')
elbv2_client = boto3.client('elbv2', region_name='us-east-1')
response = elbv2_client.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=mocked_networking['vpc'],
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
target_group_arn = response['TargetGroups'][0]['TargetGroupArn']
client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration')
# create asg, attach to target group on create
client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
MinSize=0,
MaxSize=INSTANCE_COUNT,
DesiredCapacity=INSTANCE_COUNT,
TargetGroupARNs=[target_group_arn],
VPCZoneIdentifier=mocked_networking['subnet1'])
# create asg without attaching to target group
client.create_auto_scaling_group(
AutoScalingGroupName='test_asg2',
LaunchConfigurationName='test_launch_configuration',
MinSize=0,
MaxSize=INSTANCE_COUNT,
DesiredCapacity=INSTANCE_COUNT,
VPCZoneIdentifier=mocked_networking['subnet2'])
response = client.describe_load_balancer_target_groups(
AutoScalingGroupName='test_asg')
list(response['LoadBalancerTargetGroups']).should.have.length_of(1)
response = elbv2_client.describe_target_health(
TargetGroupArn=target_group_arn)
list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT)
client.attach_load_balancer_target_groups(
AutoScalingGroupName='test_asg2',
TargetGroupARNs=[target_group_arn])
response = elbv2_client.describe_target_health(
TargetGroupArn=target_group_arn)
list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT * 2)
response = client.detach_load_balancer_target_groups(
AutoScalingGroupName='test_asg2',
TargetGroupARNs=[target_group_arn])
response = elbv2_client.describe_target_health(
TargetGroupArn=target_group_arn)
list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT)
@mock_elbv2
@mock_autoscaling
def test_detach_all_target_groups():
mocked_networking = setup_networking()
INSTANCE_COUNT = 2
client = boto3.client('autoscaling', region_name='us-east-1')
elbv2_client = boto3.client('elbv2', region_name='us-east-1')
response = elbv2_client.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=mocked_networking['vpc'],
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
target_group_arn = response['TargetGroups'][0]['TargetGroupArn']
client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration')
client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
MinSize=0,
MaxSize=INSTANCE_COUNT,
DesiredCapacity=INSTANCE_COUNT,
TargetGroupARNs=[target_group_arn],
VPCZoneIdentifier=mocked_networking['vpc'])
response = client.describe_load_balancer_target_groups(
AutoScalingGroupName='test_asg')
list(response['LoadBalancerTargetGroups']).should.have.length_of(1)
response = elbv2_client.describe_target_health(
TargetGroupArn=target_group_arn)
list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT)
response = client.detach_load_balancer_target_groups(
AutoScalingGroupName='test_asg',
TargetGroupARNs=[target_group_arn])
response = elbv2_client.describe_target_health(
TargetGroupArn=target_group_arn)
list(response['TargetHealthDescriptions']).should.have.length_of(0)
response = client.describe_load_balancer_target_groups(
AutoScalingGroupName='test_asg')
list(response['LoadBalancerTargetGroups']).should.have.length_of(0)
| apache-2.0 |
jbonofre/incubator-beam | sdks/python/apache_beam/examples/wordcount_minimal.py | 6 | 4919 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A minimalist word-counting workflow that counts words in Shakespeare.
This is the first in a series of successively more detailed 'word count'
examples.
Next, see the wordcount pipeline, then the wordcount_debugging pipeline, for
more detailed examples that introduce additional concepts.
Concepts:
1. Reading data from text files
2. Specifying 'inline' transforms
3. Counting a PCollection
4. Writing data to Cloud Storage as text files
To execute this pipeline locally, first edit the code to specify the output
location. Output location could be a local file path or an output prefix
on GCS. (Only update the output location marked with the first CHANGE comment.)
To execute this pipeline remotely, first edit the code to set your project ID,
runner type, the staging location, the temp location, and the output location.
The specified GCS bucket(s) must already exist. (Update all the places marked
with a CHANGE comment.)
Then, run the pipeline as described in the README. It will be deployed and run
using the Google Cloud Dataflow Service. No args are required to run the
pipeline. You can see the results in your output bucket in the GCS browser.
"""
from __future__ import absolute_import
import argparse
import logging
import re
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
def run(argv=None):
"""Main entry point; defines and runs the wordcount pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
help='Input file to process.')
parser.add_argument('--output',
dest='output',
# CHANGE 1/5: The Google Cloud Storage path is required
# for outputting the results.
default='gs://YOUR_OUTPUT_BUCKET/AND_OUTPUT_PREFIX',
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_args.extend([
# CHANGE 2/5: (OPTIONAL) Change this to DataflowRunner to
# run your pipeline on the Google Cloud Dataflow Service.
'--runner=DirectRunner',
# CHANGE 3/5: Your project ID is required in order to run your pipeline on
# the Google Cloud Dataflow Service.
'--project=SET_YOUR_PROJECT_ID_HERE',
# CHANGE 4/5: Your Google Cloud Storage path is required for staging local
# files.
'--staging_location=gs://YOUR_BUCKET_NAME/AND_STAGING_DIRECTORY',
# CHANGE 5/5: Your Google Cloud Storage path is required for temporary
# files.
'--temp_location=gs://YOUR_BUCKET_NAME/AND_TEMP_DIRECTORY',
'--job_name=your-wordcount-job',
])
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
# Read the text file[pattern] into a PCollection.
lines = p | ReadFromText(known_args.input)
# Count the occurrences of each word.
counts = (
lines
| 'Split' >> (beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
.with_output_types(unicode))
| 'PairWithOne' >> beam.Map(lambda x: (x, 1))
| 'GroupAndSum' >> beam.CombinePerKey(sum))
# Format the counts into a PCollection of strings.
def format_result(word_count):
(word, count) = word_count
return '%s: %s' % (word, count)
output = counts | 'Format' >> beam.Map(format_result)
# Write the output using a "Write" transform that has side effects.
# pylint: disable=expression-not-assigned
output | WriteToText(known_args.output)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| apache-2.0 |
zofuthan/edx-platform | common/test/acceptance/tests/test_ora.py | 118 | 15617 | """
Tests for ORA (Open Response Assessment) through the LMS UI.
"""
import json
from unittest import skip
from bok_choy.promise import Promise, BrokenPromise
from ..pages.lms.peer_confirm import PeerConfirmPage
from ..pages.lms.auto_auth import AutoAuthPage
from ..pages.lms.course_info import CourseInfoPage
from ..pages.lms.tab_nav import TabNavPage
from ..pages.lms.course_nav import CourseNavPage
from ..pages.lms.open_response import OpenResponsePage
from ..pages.lms.peer_grade import PeerGradePage
from ..pages.lms.peer_calibrate import PeerCalibratePage
from ..pages.lms.progress import ProgressPage
from ..fixtures.course import XBlockFixtureDesc, CourseFixture
from ..fixtures.xqueue import XQueueResponseFixture
from .helpers import load_data_str, UniqueCourseTest
class OpenResponseTest(UniqueCourseTest):
"""
Tests that interact with ORA (Open Response Assessment) through the LMS UI.
This base class sets up a course with open response problems and defines
some helper functions used in the ORA tests.
"""
# Grade response (dict) to return from the XQueue stub
# in response to our unique submission text.
XQUEUE_GRADE_RESPONSE = None
def setUp(self):
"""
Install a test course with ORA problems.
Always start in the subsection with open response problems.
"""
super(OpenResponseTest, self).setUp()
# Create page objects
self.auth_page = AutoAuthPage(self.browser, course_id=self.course_id)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
self.course_nav = CourseNavPage(self.browser)
self.open_response = OpenResponsePage(self.browser)
self.peer_grade = PeerGradePage(self.browser)
self.peer_calibrate = PeerCalibratePage(self.browser)
self.peer_confirm = PeerConfirmPage(self.browser)
self.progress_page = ProgressPage(self.browser, self.course_id)
# Configure the test course
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
# Create a unique name for the peer assessed problem. This will show up
# in the list of peer problems, which is shared among tests running
# in parallel; it needs to be unique so we can find it.
# It's also import that the problem has "Peer" in the name; otherwise,
# the ORA stub will ignore it.
self.peer_problem_name = "Peer-Assessed {}".format(self.unique_id[0:6])
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc(
'combinedopenended',
'Self-Assessed',
data=load_data_str('ora_self_problem.xml'),
metadata={
'graded': True,
},
),
XBlockFixtureDesc(
'combinedopenended',
'AI-Assessed',
data=load_data_str('ora_ai_problem.xml'),
metadata={
'graded': True,
},
),
XBlockFixtureDesc(
'combinedopenended',
self.peer_problem_name,
data=load_data_str('ora_peer_problem.xml'),
metadata={
'graded': True,
},
),
# This is the interface a student can use to grade his/her peers
XBlockFixtureDesc('peergrading', 'Peer Module'),
)
)
).install()
# Configure the XQueue stub's response for the text we will submit
# The submission text is unique so we can associate each response with a particular test case.
self.submission = "Test submission " + self.unique_id[0:4]
if self.XQUEUE_GRADE_RESPONSE is not None:
XQueueResponseFixture(self.submission, self.XQUEUE_GRADE_RESPONSE).install()
# Log in and navigate to the essay problems
self.auth_page.visit()
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
def submit_essay(self, expected_assessment_type, expected_prompt):
"""
Submit an essay and verify that the problem uses
the `expected_assessment_type` ("self", "ai", or "peer") and
shows the `expected_prompt` (a string).
"""
# Check the assessment type and prompt
self.assertEqual(self.open_response.assessment_type, expected_assessment_type)
self.assertIn(expected_prompt, self.open_response.prompt)
# Enter a submission, which will trigger a pre-defined response from the XQueue stub.
self.open_response.set_response(self.submission)
# Save the response and expect some UI feedback
self.open_response.save_response()
self.assertEqual(
self.open_response.alert_message,
"Answer saved, but not yet submitted."
)
# Submit the response
self.open_response.submit_response()
def get_asynch_feedback(self, assessment_type):
"""
Wait for and retrieve asynchronous feedback
(e.g. from AI, instructor, or peer grading)
`assessment_type` is either "ai" or "peer".
"""
# Because the check function involves fairly complicated actions
# (navigating through several screens), we give it more time to complete
# than the default.
return Promise(
self._check_feedback_func(assessment_type),
'Got feedback for {0} problem'.format(assessment_type),
timeout=600, try_interval=5
).fulfill()
def _check_feedback_func(self, assessment_type):
"""
Navigate away from, then return to, the peer problem to
receive updated feedback.
The returned function will return a tuple `(is_success, rubric_feedback)`,
`is_success` is True iff we have received feedback for the problem;
`rubric_feedback` is a list of "correct" or "incorrect" strings.
"""
if assessment_type == 'ai':
section_name = 'AI-Assessed'
elif assessment_type == 'peer':
section_name = self.peer_problem_name
else:
raise ValueError('Assessment type not recognized. Must be either "ai" or "peer"')
def _inner_check():
self.course_nav.go_to_sequential('Self-Assessed')
self.course_nav.go_to_sequential(section_name)
try:
feedback = self.open_response.rubric.feedback
# Unsuccessful if the rubric hasn't loaded
except BrokenPromise:
return False, None
# Successful if `feedback` is a non-empty list
else:
return bool(feedback), feedback
return _inner_check
class SelfAssessmentTest(OpenResponseTest):
"""
Test ORA self-assessment.
"""
def test_self_assessment(self):
"""
Given I am viewing a self-assessment problem
When I submit an essay and complete a self-assessment rubric
Then I see a scored rubric
And I see my score in the progress page.
"""
# Navigate to the self-assessment problem and submit an essay
self.course_nav.go_to_sequential('Self-Assessed')
self.submit_essay('self', 'Censorship in the Libraries')
# Fill in the rubric and expect that we get feedback
rubric = self.open_response.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit('self')
self.assertEqual(rubric.feedback, ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# The first score is self-assessment, which we've answered, so it's 1/2
# The other scores are AI- and peer-assessment, which we haven't answered so those are 0/2
self.assertEqual(scores, [(1, 2), (0, 2), (0, 2)])
class AIAssessmentTest(OpenResponseTest):
"""
Test ORA AI-assessment.
"""
XQUEUE_GRADE_RESPONSE = {
'score': 1,
'feedback': json.dumps({"spelling": "Ok.", "grammar": "Ok.", "markup_text": "NA"}),
'grader_type': 'BC',
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_scores_complete': True,
'rubric_xml': load_data_str('ora_rubric.xml')
}
@skip('Intermittently failing, see ORA-342')
def test_ai_assessment(self):
"""
Given I am viewing an AI-assessment problem that has a trained ML model
When I submit an essay and wait for a response
Then I see a scored rubric
And I see my score in the progress page.
"""
# Navigate to the AI-assessment problem and submit an essay
self.course_nav.go_to_sequential('AI-Assessed')
self.submit_essay('ai', 'Censorship in the Libraries')
# Refresh the page to get the updated feedback
# then verify that we get the feedback sent by our stub XQueue implementation
self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we have answered, so it's 1/2
# Third score is peer-assessment, which we haven't answered, so it's 0/2
self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])
class InstructorAssessmentTest(OpenResponseTest):
"""
Test an AI-assessment that has been graded by an instructor.
This runs the same test as the AI-assessment test, except
that the feedback comes from an instructor instead of the machine grader.
From the student's perspective, it should look the same.
"""
XQUEUE_GRADE_RESPONSE = {
'score': 1,
'feedback': json.dumps({"feedback": "Good job!"}),
'grader_type': 'IN',
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_scores_complete': True,
'rubric_xml': load_data_str('ora_rubric.xml')
}
@skip('Intermittently failing, see ORA-342')
def test_instructor_assessment(self):
"""
Given an instructor has graded my submission
When I view my submission
Then I see a scored rubric
And my progress page shows the problem score.
"""
# Navigate to the AI-assessment problem and submit an essay
# We have configured the stub to simulate that this essay will be staff-graded
self.course_nav.go_to_sequential('AI-Assessed')
self.submit_essay('ai', 'Censorship in the Libraries')
# Refresh the page to get the updated feedback
# then verify that we get the feedback sent by our stub XQueue implementation
self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we have answered, so it's 1/2
# Third score is peer-assessment, which we haven't answered, so it's 0/2
self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])
class PeerAssessmentTest(OpenResponseTest):
"""
Test ORA peer-assessment, including calibration and giving/receiving scores.
"""
# Unlike other assessment types, peer assessment has multiple scores
XQUEUE_GRADE_RESPONSE = {
'score': [2, 2, 2],
'feedback': [json.dumps({"feedback": ""})] * 3,
'grader_type': 'PE',
'success': True,
'grader_id': [1, 2, 3],
'submission_id': 1,
'rubric_scores_complete': [True, True, True],
'rubric_xml': [load_data_str('ora_rubric.xml')] * 3
}
def test_peer_calibrate_and_grade(self):
"""
Given I am viewing a peer-assessment problem
And the instructor has submitted enough example essays
When I submit acceptable scores for enough calibration essays
Then I am able to peer-grade other students' essays.
Given I have submitted an essay for peer-assessment
And I have peer-graded enough students essays
And enough other students have scored my essay
Then I can view the scores and written feedback
And I see my score in the progress page.
"""
# Initially, the student should NOT be able to grade peers,
# because he/she hasn't submitted any essays.
self.course_nav.go_to_sequential('Peer Module')
self.assertIn("You currently do not have any peer grading to do", self.peer_calibrate.message)
# Submit an essay
self.course_nav.go_to_sequential(self.peer_problem_name)
self.submit_essay('peer', 'Censorship in the Libraries')
# Need to reload the page to update the peer grading module
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
self.course_nav.go_to_section('Test Section', 'Test Subsection')
# Select the problem to calibrate
self.course_nav.go_to_sequential('Peer Module')
self.assertIn(self.peer_problem_name, self.peer_grade.problem_list)
self.peer_grade.select_problem(self.peer_problem_name)
# Calibrate
self.peer_confirm.start(is_calibrating=True)
rubric = self.peer_calibrate.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit('peer')
self.peer_calibrate.continue_to_grading()
# Grade a peer
self.peer_confirm.start()
rubric = self.peer_grade.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit()
# Expect to receive essay feedback
# We receive feedback from all three peers, each of which
# provide 2 scores (one for each rubric item)
# Written feedback is a dummy value sent by the XQueue stub.
self.course_nav.go_to_sequential(self.peer_problem_name)
self.assertEqual(self.get_asynch_feedback('peer'), ['incorrect', 'correct'] * 3)
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we haven't answered, so it's 0/2
# Third score is peer-assessment, which we have answered, so it's 2/2
self.assertEqual(scores, [(0, 2), (0, 2), (2, 2)])
| agpl-3.0 |
dermoth/gramps | gramps/gui/widgets/basicentry.py | 11 | 1586 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
__all__ = ["BasicEntry"]
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import logging
_LOG = logging.getLogger(".widgets.basicentry")
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# BasicEntry class
#
#-------------------------------------------------------------------------
class BasicEntry(Gtk.Entry):
def __init__(self):
Gtk.Entry.__init__(self)
self.set_width_chars(5)
self.show()
| gpl-2.0 |
ramadhane/odoo | addons/product_visible_discount/__init__.py | 433 | 1054 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_visible_discount
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
trafi/gyp | test/win/gyptest-cl-buffer-security-check.py | 344 | 1612 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure buffer security check setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('buffer-security-check.gyp', chdir=CHDIR)
test.build('buffer-security-check.gyp', chdir=CHDIR)
def GetDisassemblyOfMain(exe):
# The standard library uses buffer security checks independent of our
# buffer security settings, so we extract just our code (i.e. main()) to
# check against.
full_path = test.built_file_path(exe, chdir=CHDIR)
output = test.run_dumpbin('/disasm', full_path)
result = []
in_main = False
for line in output.splitlines():
if line == '_main:':
in_main = True
elif in_main:
# Disassembly of next function starts.
if line.startswith('_'):
break
result.append(line)
return '\n'.join(result)
# Buffer security checks are on by default, make sure security_cookie
# appears in the disassembly of our code.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_unset.exe'):
test.fail_test()
# Explicitly on.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_on.exe'):
test.fail_test()
# Explicitly off, shouldn't be a reference to the security cookie.
if 'security_cookie' in GetDisassemblyOfMain('test_bsc_off.exe'):
test.fail_test()
test.pass_test()
| bsd-3-clause |
amisrs/angular-flask | angular_flask/lib/python2.7/site-packages/requests/packages/charade/langhungarianmodel.py | 184 | 12761 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = {
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
# flake8: noqa
| mit |
Litiks/django_server_stats | server_stats/migrations/0001_initial.py | 1 | 6604 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PageViewLog'
db.create_table('server_stats_pageviewlog', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('url', self.gf('django.db.models.fields.CharField')(default='', max_length=255)),
('gen_time', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal('server_stats', ['PageViewLog'])
# Adding model 'QueryStat'
db.create_table('server_stats_querystat', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('queries', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('server_stats', ['QueryStat'])
# Adding model 'BenchmarkMilestone'
db.create_table('server_stats_benchmarkmilestone', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('server_stats', ['BenchmarkMilestone'])
def backwards(self, orm):
# Deleting model 'PageViewLog'
db.delete_table('server_stats_pageviewlog')
# Deleting model 'QueryStat'
db.delete_table('server_stats_querystat')
# Deleting model 'BenchmarkMilestone'
db.delete_table('server_stats_benchmarkmilestone')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'server_stats.benchmarkmilestone': {
'Meta': {'object_name': 'BenchmarkMilestone'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'server_stats.pageviewlog': {
'Meta': {'object_name': 'PageViewLog'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'gen_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'server_stats.querystat': {
'Meta': {'object_name': 'QueryStat'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'queries': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['server_stats'] | mit |
vnsofthe/odoo | addons/l10n_pt/__openerp__.py | 380 | 1834 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Thinkopen Solutions, Lda. All Rights Reserved
# http://www.thinkopensolutions.com.
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portugal - Chart of Accounts',
'version': '0.011',
'author': 'ThinkOpen Solutions',
'website': 'http://www.thinkopensolutions.com/',
'category': 'Localization/Account Charts',
'description': 'Plano de contas SNC para Portugal',
'depends': ['base',
'base_vat',
'account',
'account_chart',
],
'data': [
'account_types.xml',
'account_chart.xml',
'account_tax_code_template.xml',
'account_chart_template.xml',
'fiscal_position_templates.xml',
'account_taxes.xml',
'l10n_chart_pt_wizard.xml',
],
'demo': [],
'installable': True,
}
| agpl-3.0 |
tellesnobrega/storm_plugin | sahara/plugins/hdp/hadoopserver.py | 1 | 9095 | # Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from sahara.openstack.common import log as logging
from sahara.plugins.general import exceptions as ex
from sahara.plugins.hdp import saharautils
from sahara.utils import files as f
AMBARI_RPM = ('http://s3.amazonaws.com/public-repo-1.hortonworks.com/'
'ambari/centos6/1.x/updates/1.6.0/ambari.repo')
EPEL_RELEASE_PACKAGE_NAME = 'epel-release'
HADOOP_SWIFT_RPM = ('https://s3.amazonaws.com/public-repo-1.hortonworks.com/'
'sahara/swift/hadoop-swift-1.0-1.x86_64.rpm')
HADOOP_SWIFT_LOCAL_RPM = ('/opt/hdp-local-repos/hadoop-swift/'
'hadoop-swift-1.0-1.x86_64.rpm')
LOG = logging.getLogger(__name__)
class HadoopServer:
_master_ip = None
def __init__(self, instance, node_group, ambari_rpm=None):
self.instance = instance
self.node_group = node_group
self.ambari_rpm = ambari_rpm or AMBARI_RPM
def provision_ambari(self, ambari_info, cluster_spec):
self.install_rpms()
global_config = cluster_spec.configurations['global']
jdk_path = global_config.get('java64_home')
if 'AMBARI_SERVER' in self.node_group.components:
self._setup_and_start_ambari_server(ambari_info.port, jdk_path)
# all nodes must run Ambari agent
self._setup_and_start_ambari_agent(ambari_info.host.internal_ip)
@saharautils.inject_remote('r')
def rpms_installed(self, r):
yum_cmd = 'yum -q list installed %s' % EPEL_RELEASE_PACKAGE_NAME
ret_code, stdout = r.execute_command(yum_cmd,
run_as_root=True,
raise_when_error=False)
return ret_code == 0
@saharautils.inject_remote('r')
def install_rpms(self, r):
LOG.info(
"{0}: Installing rpm's ...".format(self.instance.hostname()))
# TODO(jspeidel): based on image type, use correct command
curl_cmd = ('curl -f -s -o /etc/yum.repos.d/ambari.repo %s' %
self.ambari_rpm)
ret_code, stdout = r.execute_command(curl_cmd,
run_as_root=True,
raise_when_error=False)
if ret_code == 0:
yum_cmd = 'yum -y install %s' % EPEL_RELEASE_PACKAGE_NAME
r.execute_command(yum_cmd, run_as_root=True)
else:
LOG.info("{0}: Unable to install rpm's from repo, "
"checking for local install."
.format(self.instance.hostname()))
if not self.rpms_installed():
raise ex.HadoopProvisionError(
'Failed to install Hortonworks Ambari')
@saharautils.inject_remote('r')
def install_swift_integration(self, r):
LOG.info(
"{0}: Installing swift integration ..."
.format(self.instance.hostname()))
base_rpm_cmd = 'rpm -U --quiet '
rpm_cmd = base_rpm_cmd + HADOOP_SWIFT_RPM
ret_code, stdout = r.execute_command(rpm_cmd,
run_as_root=True,
raise_when_error=False)
if ret_code != 0:
LOG.info("{0}: Unable to install swift integration from source, "
"checking for local rpm."
.format(self.instance.hostname()))
ret_code, stdout = r.execute_command(
'ls ' + HADOOP_SWIFT_LOCAL_RPM,
run_as_root=True,
raise_when_error=False)
if ret_code == 0:
rpm_cmd = base_rpm_cmd + HADOOP_SWIFT_LOCAL_RPM
r.execute_command(rpm_cmd, run_as_root=True)
else:
raise ex.HadoopProvisionError(
'Failed to install Hadoop Swift integration')
@saharautils.inject_remote('r')
def configure_topology(self, topology_str, r):
r.write_file_to(
'/etc/hadoop/conf/topology.sh',
f.get_file_text(
'plugins/hdp/versions/version_1_3_2/resources/topology.sh'))
r.execute_command(
'chmod +x /etc/hadoop/conf/topology.sh', run_as_root=True
)
r.write_file_to('/etc/hadoop/conf/topology.data', topology_str)
@saharautils.inject_remote('r')
def _setup_and_start_ambari_server(self, port, jdk_path, r):
LOG.info('{0}: Installing ambari-server ...'.format(
self.instance.hostname()))
r.execute_command('yum -y install ambari-server', run_as_root=True)
LOG.info('Running Ambari Server setup ...')
# remove postgres data directory as a precaution since its existence
# has prevented successful postgres installation
r.execute_command('rm -rf /var/lib/pgsql/data', run_as_root=True)
# determine if the JDK is installed on this image
# in the case of the plain image, no JDK will be available
return_code, stdout = r.execute_command('ls -l {jdk_location}'.format(
jdk_location=jdk_path), raise_when_error=False)
LOG.debug('Queried for JDK location on VM instance, return code = ' +
str(return_code))
# do silent setup since we only use default responses now
# only add -j command if the JDK is configured for the template,
# and if the JDK is present
# in all other cases, allow Ambari to install the JDK
r.execute_command(
'ambari-server setup -s {jdk_arg} > /dev/null 2>&1'.format(
jdk_arg='-j ' + jdk_path if jdk_path and (return_code == 0)
else ''),
run_as_root=True, timeout=1800
)
self._configure_ambari_server_api_port(port)
LOG.info('Starting Ambari ...')
# NOTE(dmitryme): Reading stdout from 'ambari-server start'
# hangs ssh. Redirecting output to /dev/null fixes that
r.execute_command(
'ambari-server start > /dev/null 2>&1', run_as_root=True
)
@saharautils.inject_remote('r')
def _configure_ambari_server_api_port(self, port, r):
# do nothing if port is not specified or is default
if port is None or port == 8080:
return
ambari_config_file = '/etc/ambari-server/conf/ambari.properties'
LOG.debug('Configuring Ambari Server API port: {0}'.format(port))
# read the current contents
data = r.read_file_from(ambari_config_file)
data = '{0}\nclient.api.port={1}\n'.format(data, port)
# write the file back
r.write_file_to(ambari_config_file, data, run_as_root=True)
@saharautils.inject_remote('r')
def _setup_and_start_ambari_agent(self, ambari_server_ip, r):
LOG.info('{0}: Installing Ambari Agent ...'.format(
self.instance.hostname()))
r.execute_command('yum -y install ambari-agent', run_as_root=True)
LOG.debug(
'{0}: setting master-ip: {1} in ambari-agent.ini'.format(
self.instance.hostname(), ambari_server_ip))
r.replace_remote_string(
'/etc/ambari-agent/conf/ambari-agent.ini', 'localhost',
ambari_server_ip)
LOG.info(
'{0}: Starting Ambari Agent ...'.format(self.instance.hostname()))
# If the HDP 2 ambari agent is pre-installed on an image, the agent
# will start up during instance launch and therefore the agent
# registration will fail. It is therefore more appropriate to call
# restart since it will either start (if stopped) or restart (if
# running)
r.execute_command('ambari-agent restart', run_as_root=True)
def _log(self, buf):
LOG.debug(buf)
def _is_component_available(self, component):
return component in self.node_group.components
def _is_ganglia_master(self):
return self._is_component_available('GANGLIA_SERVER')
def _is_ganglia_slave(self):
return self._is_component_available('GANGLIA_MONITOR')
class DefaultPromptMatcher():
prompt_pattern = re.compile('(.*\()(.)(\)\?\s*$)', re.DOTALL)
def __init__(self, terminal_token):
self.eof_token = terminal_token
def get_response(self, s):
match = self.prompt_pattern.match(s)
if match:
response = match.group(2)
return response
else:
return None
def is_eof(self, s):
eof = self.eof_token in s
return eof
| apache-2.0 |
iitis/mutrics | portname/step2_train.py | 1 | 1699 | #!/usr/bin/env python3
# Author: Pawel Foremski <[email protected]>
# Copyright (C) 2012-2014 IITiS PAN <http://www.iitis.pl/>
# Licensed under GNU GPL v3
import sys
import argparse
import random
from HTClass import *
def main(param, src, dst, numtrain, numtest):
samples = []
# read data
for line in src:
line = line.strip()
if len(line) == 0 or not line[0].isdigit(): continue
(fid, proto, port, dns_name, gt) = line.split()
k = ",".join([proto, port, dns_name])
samples.append((k, gt))
# take random samples
if numtrain > 0:
samples = random.sample(samples, numtrain + numtest)
train = samples[:numtrain]
test = samples[numtrain:]
else:
train = samples
test = []
# compute the minc
minc = int(0.00001 * len(train))
if minc < 2: minc = 2
print("minc=%d" % minc)
# train
knc = HTClass(minc=minc)
knc.fit([x[0] for x in train], [x[1] for x in train])
# test
if len(test) > 0:
(acc, ratio, err) = knc.score([x[0] for x in test], [x[1] for x in test])
print("%.4f\t%.4f\t%d errors" % (acc * 100.0, ratio * 100.0, err))
# store model
if dst:
knc.store(dst)
if __name__ == "__main__":
p = argparse.ArgumentParser(description='Simple DNS name traffic classifier')
p.add_argument('model', nargs='?', help='output file')
p.add_argument("-t", type=int, default=0, help="number of training patterns [0=all]")
p.add_argument("-T", type=int, default=0, help="number of testing patterns [0=none]")
p.add_argument("--exe", help="exec given Python file first (e.g. for params)")
args = p.parse_args()
if args.model: marg = open(args.model, "wb")
else: marg = None
if args.exe: exec(open(args.exe).read())
main(args, sys.stdin, marg, args.t, args.T)
| gpl-3.0 |
ucloud/uai-sdk | uai/operation/modify_node_range/modify_node_range.py | 1 | 3803 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from uai.operation.base_operation import BaseUaiServiceOp
from uai.api.modify_uai_srv_version_node_range import ModifyUAISrvVersionNodeRangeApiOp
class UaiServiceModifySrvVersionNodeRangeOp(BaseUaiServiceOp):
"""
The Base Modify Service Version Node Range Class with UAI
"""
def __init__(self, parser):
super(UaiServiceModifySrvVersionNodeRangeOp, self).__init__(parser)
def _add_noderange_args(self, noderange_parser):
noderange_parse = noderange_parser.add_argument_group(
'Node-range-Params', 'Node-range Params, help to modify service version node range'
)
noderange_parse.add_argument(
'--service_id',
type=str,
required=True,
help='The service id of UAI Inference',
)
noderange_parse.add_argument(
'--srv_version',
type=str,
required=True,
help='The service version of UAI Inference'
)
noderange_parse.add_argument(
'--node_range_min',
type=int,
required=True,
help='The minimum node count'
)
noderange_parse.add_argument(
'--node_range_max',
type=int,
required=True,
help='The maximum node count'
)
def _add_args(self):
super(UaiServiceModifySrvVersionNodeRangeOp, self)._add_args()
self._add_noderange_args(self.parser)
def _parse_noderange_args(self, args):
self.service_id = args['service_id']
self.srv_version = args['srv_version']
self.node_range_min = args['node_range_min']
self.node_range_max = args['node_range_max']
if int(self.node_range_min) < 2:
raise ValueError('Node range min should not be smaller than 2, current node count: {0}'.format(self.node_range_min))
if int(self.node_range_max) < 2:
raise ValueError('Node range max should not be smaller than 2, current node count: {0}'.format(self.node_range_max))
if int(self.node_range_min) > int(self.node_range_max):
raise ValueError('Node range min (current: {0}) should be smaller than node range max (current: {1})'.format(self.node_range_min, self.node_range_max))
def _parse_args(self, args):
super(UaiServiceModifySrvVersionNodeRangeOp, self)._parse_args(args)
self._parse_noderange_args(args)
def cmd_run(self, args):
self._parse_args(args)
modifyOp = ModifyUAISrvVersionNodeRangeApiOp(
public_key=self.public_key,
private_key=self.private_key,
project_id=self.project_id,
region=self.region,
zone=self.zone,
service_id=self.service_id,
srv_version=self.srv_version,
node_range_min=self.node_range_min,
node_range_max=self.node_range_max,
)
succ, rsp = modifyOp.call_api()
if not succ:
raise RuntimeError('Call ModifyUAISrvVersionNodeRange error, Error message: {0}'.format(rsp['Message']))
return succ, rsp
| apache-2.0 |
varunagrawal/azure-services | varunagrawal/site-packages/django/db/backends/util.py | 88 | 4767 | import datetime
import decimal
import hashlib
from time import time
from django.conf import settings
from django.utils.log import getLogger
from django.utils.timezone import utc
logger = getLogger('django.db.backends')
class CursorWrapper(object):
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
def set_dirty(self):
if self.db.is_managed():
self.db.set_dirty()
def __getattr__(self, attr):
self.set_dirty()
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class CursorDebugWrapper(CursorWrapper):
def execute(self, sql, params=()):
self.set_dirty()
start = time()
try:
return self.cursor.execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, params),
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
self.set_dirty()
start = time()
try:
return self.cursor.executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, param_list),
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return s and datetime.date(*map(int, s.split('-'))) or None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s: return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int(float('.'+microseconds) * 1000000))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s: return None
if not ' ' in s: return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo)
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""Shortens a string to a repeatable mangled version with the given length.
"""
if length is None or len(name) <= length:
return name
hsh = hashlib.md5(name).hexdigest()[:hash_len]
return '%s%s' % (name[:length-hash_len], hsh)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
return u'%s' % str(value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return u"%.*f" % (decimal_places, value)
| gpl-2.0 |
arthru/OpenUpgrade | addons/project_issue_sheet/__init__.py | 442 | 1105 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_issue_sheet
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ninuxorg/nodeshot | nodeshot/networking/links/serializers.py | 5 | 2196 | from rest_framework import serializers
from rest_framework_gis import serializers as gis_serializers
from nodeshot.core.base.serializers import DynamicRelationshipsMixin
from .models import Link
__all__ = [
'LinkListSerializer',
'LinkDetailSerializer',
'LinkListGeoJSONSerializer',
'LinkDetailGeoJSONSerializer',
]
class LinkListSerializer(serializers.ModelSerializer):
layer = serializers.SlugRelatedField(slug_field='slug', read_only=True)
status = serializers.ReadOnlyField(source='get_status_display')
type = serializers.ReadOnlyField(source='get_type_display')
class Meta:
model = Link
fields = [
'id',
'layer',
'node_a_name', 'node_b_name',
'status', 'type', 'line',
'metric_type', 'metric_value',
]
class LinkListGeoJSONSerializer(LinkListSerializer, gis_serializers.GeoFeatureModelSerializer):
class Meta:
model = Link
geo_field = 'line'
fields = LinkListSerializer.Meta.fields[:]
class LinkDetailSerializer(DynamicRelationshipsMixin, LinkListSerializer):
relationships = serializers.SerializerMethodField()
# this is needed to avoid adding stuff to DynamicRelationshipsMixin
_relationships = {}
class Meta:
model = Link
fields = [
'id',
'layer',
'node_a_name', 'node_b_name',
'interface_a_mac', 'interface_b_mac',
'status', 'type', 'line',
'quality', 'metric_type', 'metric_value',
'max_rate', 'min_rate', 'dbm', 'noise',
'first_seen', 'last_seen',
'added', 'updated', 'relationships'
]
LinkDetailSerializer.add_relationship(
'node_a',
view_name='api_node_details',
lookup_field='node_a_slug'
)
LinkDetailSerializer.add_relationship(
'node_b',
view_name='api_node_details',
lookup_field='node_b_slug'
)
class LinkDetailGeoJSONSerializer(LinkDetailSerializer,
gis_serializers.GeoFeatureModelSerializer):
class Meta:
model = Link
geo_field = 'line'
fields = LinkDetailSerializer.Meta.fields[:]
| gpl-3.0 |
haripradhan/MissionPlanner | Lib/site-packages/scipy/ndimage/__init__.py | 55 | 1880 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy
from filters import *
from fourier import *
from interpolation import *
from measurements import *
from morphology import *
from io import *
# doccer is moved to scipy.misc in scipy 0.8
from scipy.misc import doccer
doccer = numpy.deprecate(doccer, old_name='doccer',
new_name='scipy.misc.doccer')
from info import __doc__
__version__ = '2.0'
from numpy.testing import Tester
test = Tester().test
| gpl-3.0 |
Sorsly/subtle | google-cloud-sdk/lib/surface/genomics/datasets/set_iam_policy.py | 6 | 2074 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of gcloud genomics datasets set-iam-policy.
"""
from googlecloudsdk.api_lib.genomics import genomics_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iam import iam_util
from googlecloudsdk.core import resources
class SetIamPolicy(base.Command):
"""Set IAM policy for a dataset.
This command sets the IAM policy for a dataset, given a dataset ID and a
file that contains the JSON encoded IAM policy.
"""
detailed_help = iam_util.GetDetailedHelpForSetIamPolicy(
'dataset', '1000',
"""See https://cloud.google.com/genomics/v1/access-control for details on
managing access control on Genomics datasets.""")
@staticmethod
def Args(parser):
parser.add_argument('id', type=str,
help='The ID of the dataset.')
parser.add_argument('policy_file', help='JSON file with the IAM policy')
def Run(self, args):
apitools_client = genomics_util.GetGenomicsClient()
messages = genomics_util.GetGenomicsMessages()
dataset_resource = resources.REGISTRY.Parse(
args.id, collection='genomics.datasets')
policy = iam_util.ParsePolicyFile(args.policy_file, messages.Policy)
policy_request = messages.GenomicsDatasetsSetIamPolicyRequest(
resource='datasets/{0}'.format(dataset_resource.Name()),
setIamPolicyRequest=messages.SetIamPolicyRequest(policy=policy),
)
return apitools_client.datasets.SetIamPolicy(policy_request)
| mit |
vmax-feihu/hue | apps/useradmin/src/useradmin/ldap_access.py | 1 | 14369 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides access to LDAP servers, along with some basic functionality required for Hue and
User Admin to work seamlessly with LDAP.
"""
import ldap
import ldap.filter
import logging
import re
from django.contrib.auth.models import User
import desktop.conf
from desktop.lib.python_util import CaseInsensitiveDict
LOG = logging.getLogger(__name__)
CACHED_LDAP_CONN = None
class LdapBindException(Exception):
pass
class LdapSearchException(Exception):
pass
def get_connection_from_server(server=None):
ldap_servers = desktop.conf.LDAP.LDAP_SERVERS.get()
if server and ldap_servers:
ldap_config = ldap_servers[server]
else:
ldap_config = desktop.conf.LDAP
return get_connection(ldap_config)
def get_connection(ldap_config):
global CACHED_LDAP_CONN
if CACHED_LDAP_CONN is not None:
return CACHED_LDAP_CONN
ldap_url = ldap_config.LDAP_URL.get()
username = ldap_config.BIND_DN.get()
password = desktop.conf.get_ldap_bind_password(ldap_config)
ldap_cert = ldap_config.LDAP_CERT.get()
search_bind_authentication = ldap_config.SEARCH_BIND_AUTHENTICATION.get()
if ldap_url is None:
raise Exception('No LDAP URL was specified')
if search_bind_authentication:
return LdapConnection(ldap_config, ldap_url, username, password, ldap_cert)
else:
return LdapConnection(ldap_config, ldap_url, get_ldap_username(username, ldap_config.NT_DOMAIN.get()), password, ldap_cert)
def get_ldap_username(username, nt_domain):
if nt_domain:
return '%s@%s' % (username, nt_domain)
else:
return username
def get_ldap_user_kwargs(username):
if desktop.conf.LDAP.IGNORE_USERNAME_CASE.get():
return {
'username__iexact': username
}
else:
return {
'username': username
}
def get_ldap_user(username):
username_kwargs = get_ldap_user_kwargs(username)
return User.objects.get(**username_kwargs)
def get_or_create_ldap_user(username):
username_kwargs = get_ldap_user_kwargs(username)
users = User.objects.filter(**username_kwargs)
if users.exists():
return User.objects.get(**username_kwargs), False
else:
username = desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get() and username.lower() or username
return User.objects.create(username=username), True
class LdapConnection(object):
"""
Constructor creates LDAP connection. Contains methods
to easily query an LDAP server.
"""
def __init__(self, ldap_config, ldap_url, bind_user=None, bind_password=None, cert_file=None):
"""
Constructor initializes the LDAP connection
"""
self.ldap_config = ldap_config
if cert_file is not None:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, cert_file)
if self.ldap_config.FOLLOW_REFERRALS.get():
ldap.set_option(ldap.OPT_REFERRALS, 1)
else:
ldap.set_option(ldap.OPT_REFERRALS, 0)
if ldap_config.DEBUG.get():
ldap.set_option(ldap.OPT_DEBUG_LEVEL, ldap_config.DEBUG_LEVEL.get())
self.ldap_handle = ldap.initialize(uri=ldap_url, trace_level=ldap_config.TRACE_LEVEL.get())
if bind_user is not None:
try:
self.ldap_handle.simple_bind_s(bind_user, bind_password)
except:
msg = "Failed to bind to LDAP server as user %s" % bind_user
LOG.exception(msg)
raise LdapBindException(msg)
else:
try:
# Do anonymous bind
self.ldap_handle.simple_bind_s('','')
except:
msg = "Failed to bind to LDAP server anonymously"
LOG.exception(msg)
raise LdapBindException(msg)
def _get_search_params(self, name, attr, find_by_dn=False):
"""
if we are to find this ldap object by full distinguished name,
then search by setting search_dn to the 'name'
rather than by filtering by 'attr'.
"""
base_dn = self._get_root_dn()
if find_by_dn:
search_dn = re.sub(r'(\w+=)', lambda match: match.group(0).upper(), name)
if not search_dn.upper().endswith(base_dn.upper()):
raise LdapSearchException("Distinguished Name provided does not contain configured Base DN. Base DN: %(base_dn)s, DN: %(dn)s" % {
'base_dn': base_dn,
'dn': search_dn
})
return (search_dn, '')
else:
return (base_dn, '(' + attr + '=' + name + ')')
def _transform_find_user_results(self, result_data, user_name_attr):
"""
:param result_data: List of dictionaries that have ldap attributes and their associated values. Generally the result list from an ldapsearch request.
:param user_name_attr: The ldap attribute that is returned by the server to map to ``username`` in the return dictionary.
:returns list of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'username': <ldap attribute associated with user_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
"""
user_info = []
if result_data:
for dn, data in result_data:
# Skip Active Directory # refldap entries.
if dn is not None:
# Case insensitivity
data = CaseInsensitiveDict.from_dict(data)
# Skip unnamed entries.
if user_name_attr not in data:
LOG.warn('Could not find %s in ldap attributes' % user_name_attr)
continue
ldap_info = {
'dn': dn,
'username': data[user_name_attr][0]
}
if 'givenName' in data:
ldap_info['first'] = data['givenName'][0]
if 'sn' in data:
ldap_info['last'] = data['sn'][0]
if 'mail' in data:
ldap_info['email'] = data['mail'][0]
# memberOf and isMemberOf should be the same if they both exist
if 'memberOf' in data:
ldap_info['groups'] = data['memberOf']
if 'isMemberOf' in data:
ldap_info['groups'] = data['isMemberOf']
user_info.append(ldap_info)
return user_info
def _transform_find_group_results(self, result_data, group_name_attr, group_member_attr):
group_info = []
if result_data:
for dn, data in result_data:
# Skip Active Directory # refldap entries.
if dn is not None:
# Case insensitivity
data = CaseInsensitiveDict.from_dict(data)
# Skip unnamed entries.
if group_name_attr not in data:
LOG.warn('Could not find %s in ldap attributes' % group_name_attr)
continue
group_name = data[group_name_attr][0]
if desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get():
group_name = group_name.lower()
ldap_info = {
'dn': dn,
'name': group_name
}
if group_member_attr in data and 'posixGroup' not in data['objectClass']:
ldap_info['members'] = data[group_member_attr]
else:
ldap_info['members'] = []
if 'posixGroup' in data['objectClass'] and 'memberUid' in data:
ldap_info['posix_members'] = data['memberUid']
else:
ldap_info['posix_members'] = []
group_info.append(ldap_info)
return group_info
def find_users(self, username_pattern, search_attr=None, user_name_attr=None, user_filter=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
"""
LDAP search helper method finding users. This supports searching for users
by distinguished name, or the configured username attribute.
:param username_pattern: The pattern to match ``search_attr`` against. Defaults to ``search_attr`` if none.
:param search_attr: The ldap attribute to search for ``username_pattern``. Defaults to LDAP -> USERS -> USER_NAME_ATTR config value.
:param user_name_attr: The ldap attribute that is returned by the server to map to ``username`` in the return dictionary.
:param find_by_dn: Search by distinguished name.
:param scope: ldapsearch scope.
:returns: List of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'username': <ldap attribute associated with user_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
``
"""
if not search_attr:
search_attr = self.ldap_config.USERS.USER_NAME_ATTR.get()
if not user_name_attr:
user_name_attr = search_attr
if not user_filter:
user_filter = self.ldap_config.USERS.USER_FILTER.get()
if not user_filter.startswith('('):
user_filter = '(' + user_filter + ')'
# Allow wild cards on non distinguished names
sanitized_name = ldap.filter.escape_filter_chars(username_pattern).replace(r'\2a', r'*')
# Fix issue where \, is converted to \5c,
sanitized_name = sanitized_name.replace(r'\5c,', r'\2c')
search_dn, user_name_filter = self._get_search_params(sanitized_name, search_attr, find_by_dn)
ldap_filter = '(&' + user_filter + user_name_filter + ')'
attrlist = ['objectClass', 'isMemberOf', 'memberOf', 'givenName', 'sn', 'mail', 'dn', user_name_attr]
ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT:
return self._transform_find_user_results(result_data, user_name_attr)
else:
return []
def find_groups(self, groupname_pattern, search_attr=None, group_name_attr=None, group_member_attr=None, group_filter=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
"""
LDAP search helper method for finding groups
:param groupname_pattern: The pattern to match ``search_attr`` against. Defaults to ``search_attr`` if none.
:param search_attr: The ldap attribute to search for ``groupname_pattern``. Defaults to LDAP -> GROUPS -> GROUP_NAME_ATTR config value.
:param group_name_attr: The ldap attribute that is returned by the server to map to ``name`` in the return dictionary.
:param find_by_dn: Search by distinguished name.
:param scope: ldapsearch scope.
:returns: List of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'name': <ldap attribute associated with group_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
"""
if not search_attr:
search_attr = self.ldap_config.GROUPS.GROUP_NAME_ATTR.get()
if not group_name_attr:
group_name_attr = search_attr
if not group_member_attr:
group_member_attr = self.ldap_config.GROUPS.GROUP_MEMBER_ATTR.get()
if not group_filter:
group_filter = self.ldap_config.GROUPS.GROUP_FILTER.get()
if not group_filter.startswith('('):
group_filter = '(' + group_filter + ')'
# Allow wild cards on non distinguished names
sanitized_name = ldap.filter.escape_filter_chars(groupname_pattern).replace(r'\2a', r'*')
# Fix issue where \, is converted to \5c,
sanitized_name = sanitized_name.replace(r'\5c,', r'\2c')
search_dn, group_name_filter = self._get_search_params(sanitized_name, search_attr, find_by_dn)
ldap_filter = '(&' + group_filter + group_name_filter + ')'
attrlist = ['objectClass', 'dn', 'memberUid', group_member_attr, group_name_attr]
ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT:
return self._transform_find_group_results(result_data, group_name_attr, group_member_attr)
else:
return []
def find_members_of_group(self, dn, search_attr, ldap_filter, scope=ldap.SCOPE_SUBTREE):
if ldap_filter and not ldap_filter.startswith('('):
ldap_filter = '(' + ldap_filter + ')'
# Allow wild cards on non distinguished names
dn = ldap.filter.escape_filter_chars(dn).replace(r'\2a', r'*')
# Fix issue where \, is converted to \5c,
dn = dn.replace(r'\5c,', r'\2c')
search_dn, _ = self._get_search_params(dn, search_attr)
ldap_filter = '(&%(ldap_filter)s(|(isMemberOf=%(group_dn)s)(memberOf=%(group_dn)s)))' % {'group_dn': dn, 'ldap_filter': ldap_filter}
attrlist = ['objectClass', 'isMemberOf', 'memberOf', 'givenName', 'sn', 'mail', 'dn', search_attr]
ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT:
return result_data
else:
return []
def find_users_of_group(self, dn):
ldap_filter = self.ldap_config.USERS.USER_FILTER.get()
name_attr = self.ldap_config.USERS.USER_NAME_ATTR.get()
result_data = self.find_members_of_group(dn, name_attr, ldap_filter)
return self._transform_find_user_results(result_data, name_attr)
def find_groups_of_group(self, dn):
ldap_filter = self.ldap_config.GROUPS.GROUP_FILTER.get()
name_attr = self.ldap_config.GROUPS.GROUP_NAME_ATTR.get()
member_attr = self.ldap_config.GROUPS.GROUP_MEMBER_ATTR.get()
result_data = self.find_members_of_group(dn, name_attr, ldap_filter)
return self._transform_find_group_results(result_data, name_attr, member_attr)
def _get_root_dn(self):
return self.ldap_config.BASE_DN.get()
| apache-2.0 |
shsingh/ansible | lib/ansible/modules/storage/netapp/na_ontap_disks.py | 23 | 7220 | #!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_disks
short_description: NetApp ONTAP Assign disks to nodes
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Assign all or part of disks to nodes.
options:
node:
required: true
description:
- It specifies the node to assign all visible unowned disks.
disk_count:
required: false
type: int
description:
- Total number of disks a node should own
version_added: '2.9'
'''
EXAMPLES = """
- name: Assign unowned disks
na_ontap_disks:
node: cluster-01
hostname: "{{ hostname }}"
username: "{{ admin username }}"
password: "{{ admin password }}"
- name: Assign specified total disks
na_ontap_disks:
node: cluster-01
disk_count: 56
hostname: "{{ hostname }}"
username: "{{ admin username }}"
password: "{{ admin password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapDisks(object):
''' object initialize and class methods '''
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
node=dict(required=True, type='str'),
disk_count=dict(required=False, type='int')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_unassigned_disk_count(self):
"""
Check for free disks
"""
disk_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
disk_storage_info = netapp_utils.zapi.NaElement('storage-disk-info')
disk_raid_info = netapp_utils.zapi.NaElement('disk-raid-info')
disk_raid_info.add_new_child('container-type', 'unassigned')
disk_storage_info.add_child_elem(disk_raid_info)
disk_query = netapp_utils.zapi.NaElement('query')
disk_query.add_child_elem(disk_storage_info)
disk_iter.add_child_elem(disk_query)
try:
result = self.server.invoke_successfully(disk_iter, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error getting disk information: %s'
% (to_native(error)),
exception=traceback.format_exc())
return int(result.get_child_content('num-records'))
def get_owned_disk_count(self):
"""
Check for owned disks
"""
disk_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
disk_storage_info = netapp_utils.zapi.NaElement('storage-disk-info')
disk_ownership_info = netapp_utils.zapi.NaElement('disk-ownership-info')
disk_ownership_info.add_new_child('home-node-name', self.parameters['node'])
disk_storage_info.add_child_elem(disk_ownership_info)
disk_query = netapp_utils.zapi.NaElement('query')
disk_query.add_child_elem(disk_storage_info)
disk_iter.add_child_elem(disk_query)
try:
result = self.server.invoke_successfully(disk_iter, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error getting disk information: %s'
% (to_native(error)),
exception=traceback.format_exc())
return int(result.get_child_content('num-records'))
def disk_assign(self, needed_disks):
"""
Set node as disk owner.
"""
if needed_disks > 0:
assign_disk = netapp_utils.zapi.NaElement.create_node_with_children(
'disk-sanown-assign', **{'owner': self.parameters['node'],
'disk-count': str(needed_disks)})
else:
assign_disk = netapp_utils.zapi.NaElement.create_node_with_children(
'disk-sanown-assign', **{'node-name': self.parameters['node'],
'all': 'true'})
try:
self.server.invoke_successfully(assign_disk,
enable_tunneling=True)
return True
except netapp_utils.zapi.NaApiError as error:
if to_native(error.code) == "13001":
# Error 13060 denotes aggregate is already online
return False
else:
self.module.fail_json(msg='Error assigning disks %s' %
(to_native(error)),
exception=traceback.format_exc())
def apply(self):
'''Apply action to disks'''
changed = False
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(
module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_disks", cserver)
# check if anything needs to be changed (add/delete/update)
unowned_disks = self.get_unassigned_disk_count()
owned_disks = self.get_owned_disk_count()
if 'disk_count' in self.parameters:
if self.parameters['disk_count'] < owned_disks:
self.module.fail_json(msg="Fewer disks than are currently owned was requested. "
"This module does not do any disk removing. "
"All disk removing will need to be done manually.")
if self.parameters['disk_count'] > owned_disks + unowned_disks:
self.module.fail_json(msg="Not enough unowned disks remain to fulfill request")
if unowned_disks >= 1:
if 'disk_count' in self.parameters:
if self.parameters['disk_count'] > owned_disks:
needed_disks = self.parameters['disk_count'] - owned_disks
self.disk_assign(needed_disks)
changed = True
else:
self.disk_assign(0)
changed = True
self.module.exit_json(changed=changed)
def main():
''' Create object and call apply '''
obj_aggr = NetAppOntapDisks()
obj_aggr.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
frenchfrywpepper/ansible-modules-extras | files/patch.py | 26 | 6549 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Luis Alberto Perez Lazaro <[email protected]>
# (c) 2015, Jakub Jirutka <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: patch
author:
- "Jakub Jirutka (@jirutka)"
- "Luis Alberto Perez Lazaro (@luisperlaz)"
version_added: 1.9
description:
- Apply patch files using the GNU patch tool.
short_description: Apply patch files using the GNU patch tool.
options:
basedir:
description:
- Path of a base directory in which the patch file will be applied.
May be ommitted when C(dest) option is specified, otherwise required.
required: false
dest:
description:
- Path of the file on the remote machine to be patched.
- The names of the files to be patched are usually taken from the patch
file, but if there's just one file to be patched it can specified with
this option.
required: false
aliases: [ "originalfile" ]
src:
description:
- Path of the patch file as accepted by the GNU patch tool. If
C(remote_src) is 'no', the patch source file is looked up from the
module's "files" directory.
required: true
aliases: [ "patchfile" ]
remote_src:
description:
- If C(no), it will search for src at originating/master machine, if C(yes) it will
go to the remote/target machine for the src. Default is C(no).
choices: [ "yes", "no" ]
required: false
default: "no"
strip:
description:
- Number that indicates the smallest prefix containing leading slashes
that will be stripped from each file name found in the patch file.
For more information see the strip parameter of the GNU patch tool.
required: false
type: "int"
default: "0"
backup:
version_added: "2.0"
description:
- passes --backup --version-control=numbered to patch,
producing numbered backup copies
choices: [ 'yes', 'no' ]
default: 'no'
binary:
version_added: "2.0"
description:
- Setting to C(yes) will disable patch's heuristic for transforming CRLF
line endings into LF. Line endings of src and dest must match. If set to
C(no), patch will replace CRLF in src files on POSIX.
required: false
type: "bool"
default: "no"
note:
- This module requires GNU I(patch) utility to be installed on the remote host.
'''
EXAMPLES = '''
- name: apply patch to one file
patch: >
src=/tmp/index.html.patch
dest=/var/www/index.html
- name: apply patch to multiple files under basedir
patch: >
src=/tmp/customize.patch
basedir=/var/www
strip=1
'''
import os
from os import path, R_OK, W_OK
class PatchError(Exception):
pass
def is_already_applied(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0):
opts = ['--quiet', '--reverse', '--forward', '--dry-run',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
if binary:
opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
(rc, _, _) = patch_func(opts)
return rc == 0
def apply_patch(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, dry_run=False, backup=False):
opts = ['--quiet', '--forward', '--batch', '--reject-file=-',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
if dry_run:
opts.append('--dry-run')
if binary:
opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
if backup:
opts.append('--backup --version-control=numbered')
(rc, out, err) = patch_func(opts)
if rc != 0:
msg = err or out
raise PatchError(msg)
def main():
module = AnsibleModule(
argument_spec={
'src': {'required': True, 'aliases': ['patchfile']},
'dest': {'aliases': ['originalfile']},
'basedir': {},
'strip': {'default': 0, 'type': 'int'},
'remote_src': {'default': False, 'type': 'bool'},
# NB: for 'backup' parameter, semantics is slightly different from standard
# since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~")
'backup': {'default': False, 'type': 'bool'},
'binary': {'default': False, 'type': 'bool'},
},
required_one_of=[['dest', 'basedir']],
supports_check_mode=True
)
# Create type object as namespace for module params
p = type('Params', (), module.params)
p.src = os.path.expanduser(p.src)
if not os.access(p.src, R_OK):
module.fail_json(msg="src %s doesn't exist or not readable" % (p.src))
if p.dest and not os.access(p.dest, W_OK):
module.fail_json(msg="dest %s doesn't exist or not writable" % (p.dest))
if p.basedir and not path.exists(p.basedir):
module.fail_json(msg="basedir %s doesn't exist" % (p.basedir))
if not p.basedir:
p.basedir = path.dirname(p.dest)
patch_bin = module.get_bin_path('patch')
if patch_bin is None:
module.fail_json(msg="patch command not found")
patch_func = lambda opts: module.run_command("%s %s" % (patch_bin, ' '.join(opts)))
# patch need an absolute file name
p.src = os.path.abspath(p.src)
changed = False
if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip):
try:
apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip,
dry_run=module.check_mode, backup=p.backup )
changed = True
except PatchError, e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/pbr/tests/base.py | 20 | 5944 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
"""Common utilities used in testing"""
import os
import shutil
import subprocess
import sys
import fixtures
import testresources
import testtools
from pbr import options
class DiveDir(fixtures.Fixture):
"""Dive into given directory and return back on cleanup.
:ivar path: The target directory.
"""
def __init__(self, path):
self.path = path
def setUp(self):
super(DiveDir, self).setUp()
self.addCleanup(os.chdir, os.getcwd())
os.chdir(self.path)
class BaseTestCase(testtools.TestCase, testresources.ResourcedTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 30)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid, fail hard.
print("OS_TEST_TIMEOUT set to invalid value"
" defaulting to no timeout")
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if os.environ.get('OS_STDOUT_CAPTURE') in options.TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in options.TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.log_fixture = self.useFixture(
fixtures.FakeLogger('pbr'))
# Older git does not have config --local, so create a temporary home
# directory to permit using git config --global without stepping on
# developer configuration.
self.useFixture(fixtures.TempHomeDir())
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.FakeLogger())
self.useFixture(fixtures.EnvironmentVariable('PBR_VERSION', '0.0'))
self.temp_dir = self.useFixture(fixtures.TempDir()).path
self.package_dir = os.path.join(self.temp_dir, 'testpackage')
shutil.copytree(os.path.join(os.path.dirname(__file__), 'testpackage'),
self.package_dir)
self.addCleanup(os.chdir, os.getcwd())
os.chdir(self.package_dir)
self.addCleanup(self._discard_testpackage)
def _discard_testpackage(self):
# Remove pbr.testpackage from sys.modules so that it can be freshly
# re-imported by the next test
for k in list(sys.modules):
if (k == 'pbr_testpackage' or
k.startswith('pbr_testpackage.')):
del sys.modules[k]
def run_setup(self, *args, **kwargs):
return self._run_cmd(sys.executable, ('setup.py',) + args, **kwargs)
def _run_cmd(self, cmd, args=[], allow_fail=True, cwd=None):
"""Run a command in the root of the test working copy.
Runs a command, with the given argument list, in the root of the test
working copy--returns the stdout and stderr streams and the exit code
from the subprocess.
:param cwd: If falsy run within the test package dir, otherwise run
within the named path.
"""
cwd = cwd or self.package_dir
result = _run_cmd([cmd] + list(args), cwd=cwd)
if result[2] and not allow_fail:
raise Exception("Command failed retcode=%s" % result[2])
return result
def _run_cmd(args, cwd):
"""Run the command args in cwd.
:param args: The command to run e.g. ['git', 'status']
:param cwd: The directory to run the comamnd in.
:return: ((stdout, stderr), returncode)
"""
p = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
streams = tuple(s.decode('latin1').strip() for s in p.communicate())
for content in streams:
print(content)
return (streams) + (p.returncode,)
| agpl-3.0 |
repotvsupertuga/tvsupertuga.repository | plugin.video.sport365.live/resources/lib/pyaes_new/blockfeeder.py | 59 | 8119 | # The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .aes import AESBlockModeOfOperation, AESSegmentModeOfOperation, AESStreamModeOfOperation
from .util import append_PKCS7_padding, strip_PKCS7_padding, to_bufferable
# First we inject three functions to each of the modes of operations
#
# _can_consume(size)
# - Given a size, determine how many bytes could be consumed in
# a single call to either the decrypt or encrypt method
#
# _final_encrypt(data, padding = PADDING_DEFAULT)
# - call and return encrypt on this (last) chunk of data,
# padding as necessary; this will always be at least 16
# bytes unless the total incoming input was less than 16
# bytes
#
# _final_decrypt(data, padding = PADDING_DEFAULT)
# - same as _final_encrypt except for decrypt, for
# stripping off padding
#
PADDING_NONE = 'none'
PADDING_DEFAULT = 'default'
# @TODO: Ciphertext stealing and explicit PKCS#7
# PADDING_CIPHERTEXT_STEALING
# PADDING_PKCS7
# ECB and CBC are block-only ciphers
def _block_can_consume(self, size):
if size >= 16: return 16
return 0
# After padding, we may have more than one block
def _block_final_encrypt(self, data, padding = PADDING_DEFAULT):
if padding == PADDING_DEFAULT:
data = append_PKCS7_padding(data)
elif padding == PADDING_NONE:
if len(data) != 16:
raise Exception('invalid data length for final block')
else:
raise Exception('invalid padding option')
if len(data) == 32:
return self.encrypt(data[:16]) + self.encrypt(data[16:])
return self.encrypt(data)
def _block_final_decrypt(self, data, padding = PADDING_DEFAULT):
if padding == PADDING_DEFAULT:
return strip_PKCS7_padding(self.decrypt(data))
if padding == PADDING_NONE:
if len(data) != 16:
raise Exception('invalid data length for final block')
return self.decrypt(data)
raise Exception('invalid padding option')
AESBlockModeOfOperation._can_consume = _block_can_consume
AESBlockModeOfOperation._final_encrypt = _block_final_encrypt
AESBlockModeOfOperation._final_decrypt = _block_final_decrypt
# CFB is a segment cipher
def _segment_can_consume(self, size):
return self.segment_bytes * int(size // self.segment_bytes)
# CFB can handle a non-segment-sized block at the end using the remaining cipherblock
def _segment_final_encrypt(self, data, padding = PADDING_DEFAULT):
if padding != PADDING_DEFAULT:
raise Exception('invalid padding option')
faux_padding = (chr(0) * (self.segment_bytes - (len(data) % self.segment_bytes)))
padded = data + to_bufferable(faux_padding)
return self.encrypt(padded)[:len(data)]
# CFB can handle a non-segment-sized block at the end using the remaining cipherblock
def _segment_final_decrypt(self, data, padding = PADDING_DEFAULT):
if padding != PADDING_DEFAULT:
raise Exception('invalid padding option')
faux_padding = (chr(0) * (self.segment_bytes - (len(data) % self.segment_bytes)))
padded = data + to_bufferable(faux_padding)
return self.decrypt(padded)[:len(data)]
AESSegmentModeOfOperation._can_consume = _segment_can_consume
AESSegmentModeOfOperation._final_encrypt = _segment_final_encrypt
AESSegmentModeOfOperation._final_decrypt = _segment_final_decrypt
# OFB and CTR are stream ciphers
def _stream_can_consume(self, size):
return size
def _stream_final_encrypt(self, data, padding = PADDING_DEFAULT):
if padding not in [PADDING_NONE, PADDING_DEFAULT]:
raise Exception('invalid padding option')
return self.encrypt(data)
def _stream_final_decrypt(self, data, padding = PADDING_DEFAULT):
if padding not in [PADDING_NONE, PADDING_DEFAULT]:
raise Exception('invalid padding option')
return self.decrypt(data)
AESStreamModeOfOperation._can_consume = _stream_can_consume
AESStreamModeOfOperation._final_encrypt = _stream_final_encrypt
AESStreamModeOfOperation._final_decrypt = _stream_final_decrypt
class BlockFeeder(object):
'''The super-class for objects to handle chunking a stream of bytes
into the appropriate block size for the underlying mode of operation
and applying (or stripping) padding, as necessary.'''
def __init__(self, mode, feed, final, padding = PADDING_DEFAULT):
self._mode = mode
self._feed = feed
self._final = final
self._buffer = to_bufferable("")
self._padding = padding
def feed(self, data = None):
'''Provide bytes to encrypt (or decrypt), returning any bytes
possible from this or any previous calls to feed.
Call with None or an empty string to flush the mode of
operation and return any final bytes; no further calls to
feed may be made.'''
if self._buffer is None:
raise ValueError('already finished feeder')
# Finalize; process the spare bytes we were keeping
if not data:
result = self._final(self._buffer, self._padding)
self._buffer = None
return result
self._buffer += to_bufferable(data)
# We keep 16 bytes around so we can determine padding
result = to_bufferable('')
while len(self._buffer) > 16:
can_consume = self._mode._can_consume(len(self._buffer) - 16)
if can_consume == 0: break
result += self._feed(self._buffer[:can_consume])
self._buffer = self._buffer[can_consume:]
return result
class Encrypter(BlockFeeder):
'Accepts bytes of plaintext and returns encrypted ciphertext.'
def __init__(self, mode, padding = PADDING_DEFAULT):
BlockFeeder.__init__(self, mode, mode.encrypt, mode._final_encrypt, padding)
class Decrypter(BlockFeeder):
'Accepts bytes of ciphertext and returns decrypted plaintext.'
def __init__(self, mode, padding = PADDING_DEFAULT):
BlockFeeder.__init__(self, mode, mode.decrypt, mode._final_decrypt, padding)
# 8kb blocks
BLOCK_SIZE = (1 << 13)
def _feed_stream(feeder, in_stream, out_stream, block_size = BLOCK_SIZE):
'Uses feeder to read and convert from in_stream and write to out_stream.'
while True:
chunk = in_stream.read(block_size)
if not chunk:
break
converted = feeder.feed(chunk)
out_stream.write(converted)
converted = feeder.feed()
out_stream.write(converted)
def encrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT):
'Encrypts a stream of bytes from in_stream to out_stream using mode.'
encrypter = Encrypter(mode, padding = padding)
_feed_stream(encrypter, in_stream, out_stream, block_size)
def decrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT):
'Decrypts a stream of bytes from in_stream to out_stream using mode.'
decrypter = Decrypter(mode, padding = padding)
_feed_stream(decrypter, in_stream, out_stream, block_size)
| gpl-2.0 |
atopuzov/nitro-python | nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationnegotiatepolicy.py | 3 | 9833 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationnegotiatepolicy(base_resource) :
""" Configuration for Negotiate Policy resource. """
def __init__(self) :
self._name = ""
self._rule = ""
self._reqaction = ""
self.___count = 0
@property
def name(self) :
ur"""Name for the negotiate authentication policy.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after AD KCD (negotiate) policy is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication policy" or 'my authentication policy').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name for the negotiate authentication policy.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after AD KCD (negotiate) policy is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication policy" or 'my authentication policy').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def rule(self) :
ur"""Name of the NetScaler named rule, or a default syntax expression, that the policy uses to determine whether to attempt to authenticate the user with the AD KCD server.<br/>Minimum length = 1.
"""
try :
return self._rule
except Exception as e:
raise e
@rule.setter
def rule(self, rule) :
ur"""Name of the NetScaler named rule, or a default syntax expression, that the policy uses to determine whether to attempt to authenticate the user with the AD KCD server.<br/>Minimum length = 1
"""
try :
self._rule = rule
except Exception as e:
raise e
@property
def reqaction(self) :
ur"""Name of the negotiate action to perform if the policy matches.<br/>Minimum length = 1.
"""
try :
return self._reqaction
except Exception as e:
raise e
@reqaction.setter
def reqaction(self, reqaction) :
ur"""Name of the negotiate action to perform if the policy matches.<br/>Minimum length = 1
"""
try :
self._reqaction = reqaction
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationnegotiatepolicy_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationnegotiatepolicy
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add authenticationnegotiatepolicy.
"""
try :
if type(resource) is not list :
addresource = authenticationnegotiatepolicy()
addresource.name = resource.name
addresource.rule = resource.rule
addresource.reqaction = resource.reqaction
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ authenticationnegotiatepolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].rule = resource[i].rule
addresources[i].reqaction = resource[i].reqaction
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete authenticationnegotiatepolicy.
"""
try :
if type(resource) is not list :
deleteresource = authenticationnegotiatepolicy()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationnegotiatepolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationnegotiatepolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update authenticationnegotiatepolicy.
"""
try :
if type(resource) is not list :
updateresource = authenticationnegotiatepolicy()
updateresource.name = resource.name
updateresource.rule = resource.rule
updateresource.reqaction = resource.reqaction
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ authenticationnegotiatepolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].rule = resource[i].rule
updateresources[i].reqaction = resource[i].reqaction
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the authenticationnegotiatepolicy resources that are configured on netscaler.
"""
try :
if not name :
obj = authenticationnegotiatepolicy()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = authenticationnegotiatepolicy()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [authenticationnegotiatepolicy() for _ in range(len(name))]
obj = [authenticationnegotiatepolicy() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = authenticationnegotiatepolicy()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of authenticationnegotiatepolicy resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationnegotiatepolicy()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the authenticationnegotiatepolicy resources configured on NetScaler.
"""
try :
obj = authenticationnegotiatepolicy()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of authenticationnegotiatepolicy resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationnegotiatepolicy()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class authenticationnegotiatepolicy_response(base_response) :
def __init__(self, length=1) :
self.authenticationnegotiatepolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationnegotiatepolicy = [authenticationnegotiatepolicy() for _ in range(length)]
| apache-2.0 |
yosshy/nova | nova/db/sqlalchemy/migrate_repo/versions/279_fix_unique_constraint_for_compute_node.py | 81 | 1274 | # Copyright (c) Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import UniqueConstraint
from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
compute_nodes = Table('compute_nodes', meta, autoload=True)
# Drop the old UniqueConstraint
ukey = UniqueConstraint('host', 'hypervisor_hostname', table=compute_nodes,
name="uniq_compute_nodes0host0hypervisor_hostname")
ukey.drop()
# Add new UniqueConstraint
ukey = UniqueConstraint(
'host', 'hypervisor_hostname', 'deleted',
table=compute_nodes,
name="uniq_compute_nodes0host0hypervisor_hostname0deleted")
ukey.create()
| apache-2.0 |
akbargumbira/inasafe | safe/messaging/item/success_paragraph.py | 16 | 2322 | """
InaSAFE Disaster risk assessment tool developed by AusAid - **Paragraph.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '27/05/2013'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
from paragraph import Paragraph, Text
# FIXME (MB) remove when all to_* methods are implemented
# pylint: disable=W0223
class SuccessParagraph(Paragraph):
"""A Success Paragraph class for text blocks much like the p in html."""
def __init__(self, *args, **kwargs):
"""Creates an important paragraph object.
Args:
String text, a string to add to the message
Returns:
None
Raises:
Errors are propagated
We pass the kwargs on to the base class so an exception is raised
if invalid keywords were passed. See:
http://stackoverflow.com/questions/13124961/
how-to-pass-arguments-efficiently-kwargs-in-python
"""
if 'style_class' in kwargs:
my_class = '%s alert alert-success' % kwargs['style_class']
kwargs['style_class'] = my_class
super(SuccessParagraph, self).__init__(**kwargs)
self.text = Text(*args)
def to_html(self):
"""Render a Paragraph MessageElement as html
Args:
None
Returns:
Str the html representation of the Paragraph MessageElement
Raises:
Errors are propagated
"""
if self.text is None:
return
else:
return '<p>%s</p>' % self.text.to_html()
def to_text(self):
"""Render a Paragraph MessageElement as plain text
Args:
None
Returns:
Str the plain text representation of the Paragraph MessageElement
Raises:
Errors are propagated
"""
if self.text is None:
return
else:
return " SUCCESS: %s\n" % self.text
| gpl-3.0 |
ktdreyer/firewalld | src/firewall/server/config.py | 2 | 41234 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2012 Red Hat, Inc.
#
# Authors:
# Thomas Woerner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# force use of pygobject3 in python-slip
from gi.repository import GObject
import sys
sys.modules['gobject'] = GObject
import dbus
import dbus.service
import slip.dbus
import slip.dbus.service
from firewall.config import *
from firewall.config.dbus import *
from firewall.core.watcher import Watcher
from firewall.core.logger import log
from firewall.server.decorators import *
from firewall.server.config_icmptype import FirewallDConfigIcmpType
from firewall.server.config_service import FirewallDConfigService
from firewall.server.config_zone import FirewallDConfigZone
from firewall.core.io.zone import Zone
from firewall.core.io.service import Service
from firewall.core.io.icmptype import IcmpType
from firewall.core.io.lockdown_whitelist import LockdownWhitelist
from firewall.core.io.direct import Direct
from firewall.dbus_utils import dbus_to_python, \
command_of_sender, context_of_sender, uid_of_sender, user_of_uid
from firewall.errors import *
############################################################################
#
# class FirewallDConfig
#
############################################################################
class FirewallDConfig(slip.dbus.service.Object):
"""FirewallD main class"""
persistent = True
""" Make FirewallD persistent. """
default_polkit_auth_required = PK_ACTION_CONFIG
""" Use PK_ACTION_INFO as a default """
@handle_exceptions
def __init__(self, config, *args, **kwargs):
super(FirewallDConfig, self).__init__(*args, **kwargs)
self.config = config
self.path = args[0]
self._init_vars()
self.watcher = Watcher(self.watch_updater, 5)
self.watcher.add_watch_dir(FIREWALLD_ICMPTYPES)
self.watcher.add_watch_dir(ETC_FIREWALLD_ICMPTYPES)
self.watcher.add_watch_dir(FIREWALLD_SERVICES)
self.watcher.add_watch_dir(ETC_FIREWALLD_SERVICES)
self.watcher.add_watch_dir(FIREWALLD_ZONES)
self.watcher.add_watch_dir(ETC_FIREWALLD_ZONES)
self.watcher.add_watch_file(LOCKDOWN_WHITELIST)
self.watcher.add_watch_file(FIREWALLD_DIRECT)
self.watcher.add_watch_file(FIREWALLD_CONF)
@handle_exceptions
def _init_vars(self):
self.icmptypes = [ ]
self.icmptype_idx = 0
self.services = [ ]
self.service_idx = 0
self.zones = [ ]
self.zone_idx = 0
for icmptype in self.config.get_icmptypes():
self._addIcmpType(self.config.get_icmptype(icmptype))
for service in self.config.get_services():
self._addService(self.config.get_service(service))
for zone in self.config.get_zones():
self._addZone(self.config.get_zone(zone))
@handle_exceptions
def __del__(self):
pass
@handle_exceptions
def reload(self):
while len(self.icmptypes) > 0:
x = self.icmptypes.pop()
x.unregister()
del x
while len(self.services) > 0:
x = self.services.pop()
x.unregister()
del x
while len(self.zones) > 0:
x = self.zones.pop()
x.unregister()
del x
self._init_vars()
@handle_exceptions
def watch_updater(self, name):
if name == FIREWALLD_CONF:
old_props = self.GetAll(DBUS_INTERFACE_CONFIG)
log.debug1("config: Reloading firewalld config file '%s'",
FIREWALLD_CONF)
self.config.update_firewalld_conf()
props = self.GetAll(DBUS_INTERFACE_CONFIG)
for key in props.keys():
if key in old_props and \
old_props[key] == props[key]:
del props[key]
if len(props) > 0:
self.PropertiesChanged(DBUS_INTERFACE_CONFIG, props, [])
return
if (name.startswith(FIREWALLD_ICMPTYPES) or \
name.startswith(ETC_FIREWALLD_ICMPTYPES)) and \
name.endswith(".xml"):
(what, obj) = self.config.update_icmptype_from_path(name)
if what == "new":
self._addIcmpType(obj)
elif what == "remove":
self.removeIcmpType(obj)
elif what == "update":
self._updateIcmpType(obj)
elif (name.startswith(FIREWALLD_SERVICES) or \
name.startswith(ETC_FIREWALLD_SERVICES)) and \
name.endswith(".xml"):
(what, obj) = self.config.update_service_from_path(name)
if what == "new":
self._addService(obj)
elif what == "remove":
self.removeService(obj)
elif what == "update":
self._updateService(obj)
elif (name.startswith(FIREWALLD_ZONES) or \
name.startswith(ETC_FIREWALLD_ZONES)) and \
name.endswith(".xml"):
(what, obj) = self.config.update_zone_from_path(name)
if what == "new":
self._addZone(obj)
elif what == "remove":
self.removeZone(obj)
elif what == "update":
self._updateZone(obj)
elif name == LOCKDOWN_WHITELIST:
self.config.update_lockdown_whitelist()
self.LockdownWhitelistUpdated()
elif name == FIREWALLD_DIRECT:
self.config.update_direct()
self.Updated()
@handle_exceptions
def _addIcmpType(self, obj):
# TODO: check for idx overflow
config_icmptype = FirewallDConfigIcmpType(self, \
self.config, obj, self.icmptype_idx, self.path,
"%s/%d" % (DBUS_PATH_CONFIG_ICMPTYPE, self.icmptype_idx))
self.icmptypes.append(config_icmptype)
self.icmptype_idx += 1
self.IcmpTypeAdded(obj.name)
return config_icmptype
@handle_exceptions
def _updateIcmpType(self, obj):
for icmptype in self.icmptypes:
if icmptype.obj.name == obj.name and \
icmptype.obj.path == obj.path and \
icmptype.obj.filename == obj.filename:
icmptype.obj = obj
icmptype.Updated(obj.name)
@handle_exceptions
def removeIcmpType(self, obj):
index = 7 # see IMPORT_EXPORT_STRUCTURE in class Zone(IO_Object)
for zone in self.zones:
settings = zone.getSettings()
# if this IcmpType is used in a zone remove it from that zone first
if obj.name in settings[index]:
settings[index].remove(obj.name)
zone.obj = self.config.set_zone_config(zone.obj, settings)
zone.Updated(zone.obj.name)
for icmptype in self.icmptypes:
if icmptype.obj == obj:
icmptype.Removed(obj.name)
icmptype.unregister()
self.icmptypes.remove(icmptype)
del icmptype
@handle_exceptions
def _addService(self, obj):
# TODO: check for idx overflow
config_service = FirewallDConfigService(self, \
self.config, obj, self.service_idx, self.path,
"%s/%d" % (DBUS_PATH_CONFIG_SERVICE, self.service_idx))
self.services.append(config_service)
self.service_idx += 1
self.ServiceAdded(obj.name)
return config_service
@handle_exceptions
def _updateService(self, obj):
for service in self.services:
if service.obj.name == obj.name and \
service.obj.path == obj.path and \
service.obj.filename == obj.filename:
service.obj = obj
service.Updated(obj.name)
@handle_exceptions
def removeService(self, obj):
index = 5 # see IMPORT_EXPORT_STRUCTURE in class Zone(IO_Object)
for zone in self.zones:
settings = zone.getSettings()
# if this Service is used in a zone remove it from that zone first
if obj.name in settings[index]:
settings[index].remove(obj.name)
zone.obj = self.config.set_zone_config(zone.obj, settings)
zone.Updated(zone.obj.name)
for service in self.services:
if service.obj == obj:
service.Removed(obj.name)
service.unregister()
self.services.remove(service)
del service
@handle_exceptions
def _addZone(self, obj):
# TODO: check for idx overflow
config_zone = FirewallDConfigZone(self, \
self.config, obj, self.zone_idx, self.path,
"%s/%d" % (DBUS_PATH_CONFIG_ZONE, self.zone_idx))
self.zones.append(config_zone)
self.zone_idx += 1
self.ZoneAdded(obj.name)
return config_zone
@handle_exceptions
def _updateZone(self, obj):
for zone in self.zones:
if zone.obj.name == obj.name and zone.obj.path == obj.path and \
zone.obj.filename == obj.filename:
zone.obj = obj
zone.Updated(obj.name)
@handle_exceptions
def removeZone(self, obj):
for zone in self.zones:
if zone.obj == obj:
zone.Removed(obj.name)
zone.unregister()
self.zones.remove(zone)
del zone
# access check
@dbus_handle_exceptions
def accessCheck(self, sender):
if self.config.lockdown_enabled():
if sender is None:
log.error("Lockdown not possible, sender not set.")
return
bus = dbus.SystemBus()
context = context_of_sender(bus, sender)
if self.config.access_check("context", context):
return
uid = uid_of_sender(bus, sender)
if self.config.access_check("uid", uid):
return
user = user_of_uid(uid)
if self.config.access_check("user", user):
return
command = command_of_sender(bus, sender)
if self.config.access_check("command", command):
return
raise FirewallError(ACCESS_DENIED, "lockdown is enabled")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# P R O P E R T I E S
@dbus_handle_exceptions
def _get_property(self, prop):
if prop in [ "DefaultZone", "MinimalMark", "CleanupOnExit",
"Lockdown", "IPv6_rpfilter" ]:
value = self.config.get_firewalld_conf().get(prop)
if value is not None:
if prop == "MinimalMark":
value = int(value)
return value
else:
if prop == "DefaultZone":
return FALLBACK_ZONE
elif prop == "MinimalMark":
return FALLBACK_MINIMAL_MARK
elif prop == "CleanupOnExit":
return "yes" if FALLBACK_CLEANUP_ON_EXIT else "no"
elif prop == "Lockdown":
return "yes" if FALLBACK_LOCKDOWN else "no"
elif prop == "IPv6_rpfilter":
return "yes" if FALLBACK_IPV6_RPFILTER else "no"
else:
raise dbus.exceptions.DBusException(
"org.freedesktop.DBus.Error.AccessDenied: "
"Property '%s' isn't exported (or may not exist)" % prop)
@dbus_service_method(dbus.PROPERTIES_IFACE, in_signature='ss',
out_signature='v')
@dbus_handle_exceptions
def Get(self, interface_name, property_name, sender=None):
# get a property
interface_name = dbus_to_python(interface_name, str)
property_name = dbus_to_python(property_name, str)
log.debug1("config.Get('%s', '%s')", interface_name, property_name)
if interface_name != DBUS_INTERFACE_CONFIG:
raise dbus.exceptions.DBusException(
"org.freedesktop.DBus.Error.UnknownInterface: "
"FirewallD does not implement %s" % interface_name)
return self._get_property(property_name)
@dbus_service_method(dbus.PROPERTIES_IFACE, in_signature='s',
out_signature='a{sv}')
@dbus_handle_exceptions
def GetAll(self, interface_name, sender=None):
interface_name = dbus_to_python(interface_name, str)
log.debug1("config.GetAll('%s')", interface_name)
if interface_name != DBUS_INTERFACE_CONFIG:
raise dbus.exceptions.DBusException(
"org.freedesktop.DBus.Error.UnknownInterface: "
"FirewallD does not implement %s" % interface_name)
return {
'DefaultZone': self._get_property("DefaultZone"),
'MinimalMark': self._get_property("MinimalMark"),
'CleanupOnExit': self._get_property("CleanupOnExit"),
'Lockdown': self._get_property("Lockdown"),
'IPv6_rpfilter': self._get_property("IPv6_rpfilter"),
}
@slip.dbus.polkit.require_auth(PK_ACTION_CONFIG)
@dbus_service_method(dbus.PROPERTIES_IFACE, in_signature='ssv')
@dbus_handle_exceptions
def Set(self, interface_name, property_name, new_value, sender=None):
interface_name = dbus_to_python(interface_name, str)
property_name = dbus_to_python(property_name, str)
new_value = dbus_to_python(new_value)
log.debug1("config.Set('%s', '%s', '%s')", interface_name,
property_name, new_value)
self.accessCheck(sender)
if interface_name != DBUS_INTERFACE_CONFIG:
raise dbus.exceptions.DBusException(
"org.freedesktop.DBus.Error.UnknownInterface: "
"FirewallD does not implement %s" % interface_name)
if property_name in [ "MinimalMark", "CleanupOnExit", "Lockdown",
"IPv6_rpfilter" ]:
if property_name == "MinimalMark":
try:
int(new_value)
except ValueError:
raise FirewallError(INVALID_MARK, new_value)
try:
new_value = str(new_value)
except:
raise FirewallError(INVALID_VALUE, "'%s' for %s" % \
(new_value, property_name))
if property_name in [ "CleanupOnExit", "Lockdown",
"IPv6_rpfilter" ]:
if new_value.lower() not in [ "yes", "no", "true", "false" ]:
raise FirewallError(INVALID_VALUE, "'%s' for %s" % \
(new_value, property_name))
self.config.get_firewalld_conf().set(property_name, new_value)
self.config.get_firewalld_conf().write()
self.PropertiesChanged(interface_name,
{ property_name: new_value }, [ ])
elif property_name in [ "DefaultZone" ]:
raise dbus.exceptions.DBusException(
"org.freedesktop.DBus.Error.PropertyReadOnly: "
"Property '%s' is read-only" % property_name)
else:
raise dbus.exceptions.DBusException(
"org.freedesktop.DBus.Error.AccessDenied: "
"Property '%s' does not exist" % property_name)
@dbus.service.signal(dbus.PROPERTIES_IFACE, signature='sa{sv}as')
def PropertiesChanged(self, interface_name, changed_properties,
invalidated_properties):
log.debug1("config.PropertiesChanged('%s', '%s', '%s')", interface_name,
changed_properties, invalidated_properties)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# policies
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES,
out_signature=LockdownWhitelist.DBUS_SIGNATURE)
@dbus_handle_exceptions
def getLockdownWhitelist(self, sender=None):
log.debug1("config.policies.getLockdownWhitelist()")
return self.config.get_policies().lockdown_whitelist.export_config()
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES,
in_signature=LockdownWhitelist.DBUS_SIGNATURE)
@dbus_handle_exceptions
def setLockdownWhitelist(self, settings, sender=None):
log.debug1("config.policies.setLockdownWhitelist(...)")
settings = dbus_to_python(settings)
self.config.get_policies().lockdown_whitelist.import_config(settings)
self.config.get_policies().lockdown_whitelist.write()
self.LockdownWhitelistUpdated()
@dbus.service.signal(DBUS_INTERFACE_CONFIG_POLICIES)
@dbus_handle_exceptions
def LockdownWhitelistUpdated(self):
log.debug1("config.policies.LockdownWhitelistUpdated()")
# command
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s')
@dbus_handle_exceptions
def addLockdownWhitelistCommand(self, command, sender=None):
command = dbus_to_python(command)
log.debug1("config.policies.addLockdownWhitelistCommand('%s')", command)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if command in settings[0]:
raise FirewallError(ALREADY_ENABLED, command)
settings[0].append(command)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s')
@dbus_handle_exceptions
def removeLockdownWhitelistCommand(self, command, sender=None):
command = dbus_to_python(command)
log.debug1("config.policies.removeLockdownWhitelistCommand('%s')", command)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if command not in settings[0]:
raise FirewallError(NOT_ENABLED, command)
settings[0].remove(command)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s',
out_signature='b')
@dbus_handle_exceptions
def queryLockdownWhitelistCommand(self, command, sender=None):
command = dbus_to_python(command)
log.debug1("config.policies.queryLockdownWhitelistCommand('%s')", command)
return command in self.getLockdownWhitelist()[0]
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, out_signature='as')
@dbus_handle_exceptions
def getLockdownWhitelistCommands(self, sender=None):
log.debug1("config.policies.getLockdownWhitelistCommands()")
return self.getLockdownWhitelist()[0]
# context
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s')
@dbus_handle_exceptions
def addLockdownWhitelistContext(self, context, sender=None):
context = dbus_to_python(context)
log.debug1("config.policies.addLockdownWhitelistContext('%s')", context)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if context in settings[1]:
raise FirewallError(ALREADY_ENABLED, context)
settings[1].append(context)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s')
@dbus_handle_exceptions
def removeLockdownWhitelistContext(self, context, sender=None):
context = dbus_to_python(context)
log.debug1("config.policies.removeLockdownWhitelistContext('%s')", context)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if context not in settings[1]:
raise FirewallError(NOT_ENABLED, context)
settings[1].remove(context)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s',
out_signature='b')
@dbus_handle_exceptions
def queryLockdownWhitelistContext(self, context, sender=None):
context = dbus_to_python(context)
log.debug1("config.policies.queryLockdownWhitelistContext('%s')", context)
return context in self.getLockdownWhitelist()[1]
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, out_signature='as')
@dbus_handle_exceptions
def getLockdownWhitelistContexts(self, sender=None):
log.debug1("config.policies.getLockdownWhitelistContexts()")
return self.getLockdownWhitelist()[1]
# user
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s')
@dbus_handle_exceptions
def addLockdownWhitelistUser(self, user, sender=None):
user = dbus_to_python(user)
log.debug1("config.policies.addLockdownWhitelistUser('%s')", user)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if user in settings[2]:
raise FirewallError(ALREADY_ENABLED, user)
settings[2].append(user)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s')
@dbus_handle_exceptions
def removeLockdownWhitelistUser(self, user, sender=None):
user = dbus_to_python(user)
log.debug1("config.policies.removeLockdownWhitelistUser('%s')", user)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if user not in settings[2]:
raise FirewallError(NOT_ENABLED, user)
settings[2].remove(user)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s',
out_signature='b')
@dbus_handle_exceptions
def queryLockdownWhitelistUser(self, user, sender=None):
user = dbus_to_python(user)
log.debug1("config.policies.queryLockdownWhitelistUser('%s')", user)
return user in self.getLockdownWhitelist()[2]
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, out_signature='as')
@dbus_handle_exceptions
def getLockdownWhitelistUsers(self, sender=None):
log.debug1("config.policies.getLockdownWhitelistUsers()")
return self.getLockdownWhitelist()[2]
# uid
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='i')
@dbus_handle_exceptions
def addLockdownWhitelistUid(self, uid, sender=None):
uid = dbus_to_python(uid)
log.debug1("config.policies.addLockdownWhitelistUid(%d)", uid)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if uid in settings[3]:
raise FirewallError(ALREADY_ENABLED, uid)
settings[3].append(uid)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='i')
@dbus_handle_exceptions
def removeLockdownWhitelistUid(self, uid, sender=None):
uid = dbus_to_python(uid)
log.debug1("config.policies.removeLockdownWhitelistUid(%d)", uid)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if uid not in settings[3]:
raise FirewallError(NOT_ENABLED, uid)
settings[3].remove(uid)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='i',
out_signature='b')
@dbus_handle_exceptions
def queryLockdownWhitelistUid(self, uid, sender=None):
uid = dbus_to_python(uid)
log.debug1("config.policies.queryLockdownWhitelistUid(%d)", uid)
return uid in self.getLockdownWhitelist()[3]
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, out_signature='ai')
@dbus_handle_exceptions
def getLockdownWhitelistUids(self, sender=None):
log.debug1("config.policies.getLockdownWhitelistUids()")
return self.getLockdownWhitelist()[3]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# I C M P T Y P E S
@dbus_service_method(DBUS_INTERFACE_CONFIG, out_signature='ao')
@dbus_handle_exceptions
def listIcmpTypes(self, sender=None):
"""list icmptypes objects paths
"""
log.debug1("config.listIcmpTypes()")
return self.icmptypes
@dbus_service_method(DBUS_INTERFACE_CONFIG, in_signature='s',
out_signature='o')
@dbus_handle_exceptions
def getIcmpTypeByName(self, icmptype, sender=None):
"""object path of icmptype with given name
"""
icmptype = dbus_to_python(icmptype, str)
log.debug1("config.getIcmpTypeByName('%s')", icmptype)
for obj in self.icmptypes:
if obj.obj.name == icmptype:
return obj
raise FirewallError(INVALID_ICMPTYPE, icmptype)
@dbus_service_method(DBUS_INTERFACE_CONFIG,
in_signature='s'+IcmpType.DBUS_SIGNATURE,
out_signature='o')
@dbus_handle_exceptions
def addIcmpType(self, icmptype, settings, sender=None):
"""add icmptype with given name and settings
"""
icmptype = dbus_to_python(icmptype, str)
settings = dbus_to_python(settings)
log.debug1("config.addIcmpType('%s')", icmptype)
self.accessCheck(sender)
obj = self.config.new_icmptype(icmptype, settings)
config_icmptype = self._addIcmpType(obj)
return config_icmptype
@dbus.service.signal(DBUS_INTERFACE_CONFIG, signature='s')
@dbus_handle_exceptions
def IcmpTypeAdded(self, icmptype):
log.debug1("config.IcmpTypeAdded('%s')" % (icmptype))
# S E R V I C E S
@dbus_service_method(DBUS_INTERFACE_CONFIG, out_signature='ao')
@dbus_handle_exceptions
def listServices(self, sender=None):
"""list services objects paths
"""
log.debug1("config.listServices()")
return self.services
@dbus_service_method(DBUS_INTERFACE_CONFIG, in_signature='s',
out_signature='o')
@dbus_handle_exceptions
def getServiceByName(self, service, sender=None):
"""object path of service with given name
"""
service = dbus_to_python(service, str)
log.debug1("config.getServiceByName('%s')", service)
for obj in self.services:
if obj.obj.name == service:
return obj
raise FirewallError(INVALID_SERVICE, service)
@dbus_service_method(DBUS_INTERFACE_CONFIG,
in_signature='s'+Service.DBUS_SIGNATURE,
out_signature='o')
@dbus_handle_exceptions
def addService(self, service, settings, sender=None):
"""add service with given name and settings
"""
service = dbus_to_python(service, str)
settings = dbus_to_python(settings)
log.debug1("config.addService('%s')", service)
self.accessCheck(sender)
obj = self.config.new_service(service, settings)
config_service = self._addService(obj)
return config_service
@dbus.service.signal(DBUS_INTERFACE_CONFIG, signature='s')
@dbus_handle_exceptions
def ServiceAdded(self, service):
log.debug1("config.ServiceAdded('%s')" % (service))
# Z O N E S
@dbus_service_method(DBUS_INTERFACE_CONFIG, out_signature='ao')
@dbus_handle_exceptions
def listZones(self, sender=None):
"""list zones objects paths
"""
log.debug1("config.listZones()")
return self.zones
@dbus_service_method(DBUS_INTERFACE_CONFIG, in_signature='s',
out_signature='o')
@dbus_handle_exceptions
def getZoneByName(self, zone, sender=None):
"""object path of zone with given name
"""
zone = dbus_to_python(zone, str)
log.debug1("config.getZoneByName('%s')", zone)
for obj in self.zones:
if obj.obj.name == zone:
return obj
raise FirewallError(INVALID_ZONE, zone)
@dbus_service_method(DBUS_INTERFACE_CONFIG, in_signature='s',
out_signature='s')
@dbus_handle_exceptions
def getZoneOfInterface(self, iface, sender=None):
"""name of zone the given interface belongs to
"""
iface = dbus_to_python(iface, str)
log.debug1("config.getZoneOfInterface('%s')", iface)
ret = []
for obj in self.zones:
if iface in obj.obj.interfaces:
ret.append(obj.obj.name)
if len(ret) > 1:
# Even it shouldn't happen, it's actually possible that
# the same interface is in several zone XML files
return " ".join(ret) + " (ERROR: interface '%s' is in %s zone XML files, can be only in one)" % (iface, len(ret))
return ret[0] if ret else ""
@dbus_service_method(DBUS_INTERFACE_CONFIG, in_signature='s',
out_signature='s')
@dbus_handle_exceptions
def getZoneOfSource(self, source, sender=None):
"""name of zone the given source belongs to
"""
source = dbus_to_python(source, str)
log.debug1("config.getZoneOfSource('%s')", source)
ret = []
for obj in self.zones:
if source in obj.obj.sources:
ret.append(obj.obj.name)
if len(ret) > 1:
# Even it shouldn't happen, it's actually possible that
# the same source is in several zone XML files
return " ".join(ret) + " (ERROR: source '%s' is in %s zone XML files, can be only in one)" % (iface, len(ret))
return ret[0] if ret else ""
@dbus_service_method(DBUS_INTERFACE_CONFIG,
in_signature='s'+Zone.DBUS_SIGNATURE,
out_signature='o')
@dbus_handle_exceptions
def addZone(self, zone, settings, sender=None):
"""add zone with given name and settings
"""
zone = dbus_to_python(zone, str)
settings = dbus_to_python(settings)
log.debug1("config.addZone('%s')", zone)
self.accessCheck(sender)
obj = self.config.new_zone(zone, settings)
config_zone = self._addZone(obj)
return config_zone
@dbus.service.signal(DBUS_INTERFACE_CONFIG, signature='s')
@dbus_handle_exceptions
def ZoneAdded(self, zone):
log.debug1("config.ZoneAdded('%s')" % (zone))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# DIRECT
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT,
out_signature=Direct.DBUS_SIGNATURE)
@dbus_handle_exceptions
def getSettings(self, sender=None):
# returns list ipv, table, list of chains
log.debug1("config.direct.getSettings()")
return self.config.get_direct().export_config()
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT,
in_signature=Direct.DBUS_SIGNATURE)
@dbus_handle_exceptions
def update(self, settings, sender=None):
# returns list ipv, table, list of chains
log.debug1("config.direct.update()")
settings = dbus_to_python(settings)
self.config.get_direct().import_config(settings)
self.config.get_direct().write()
self.Updated()
@dbus.service.signal(DBUS_INTERFACE_CONFIG_DIRECT)
@dbus_handle_exceptions
def Updated(self):
log.debug1("config.direct.Updated()")
# chain
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sss')
@dbus_handle_exceptions
def addChain(self, ipv, table, chain, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
log.debug1("config.direct.addChain('%s', '%s', '%s')" % \
(ipv, table, chain))
self.accessCheck(sender)
idx = tuple((ipv, table, chain))
settings = list(self.getSettings())
if idx in settings[0]:
raise FirewallError(ALREADY_ENABLED,
"chain '%s' already is in '%s:%s'" % (chain, ipv, table))
settings[0].append(idx)
self.update(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sss')
@dbus_handle_exceptions
def removeChain(self, ipv, table, chain, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
log.debug1("config.direct.removeChain('%s', '%s', '%s')" % \
(ipv, table, chain))
self.accessCheck(sender)
idx = tuple((ipv, table, chain))
settings = list(self.getSettings())
if idx not in settings[0]:
raise FirewallError(NOT_ENABLED,
"chain '%s' is not in '%s:%s'" % (chain, ipv, table))
settings[0].remove(idx)
self.update(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sss',
out_signature='b')
@dbus_handle_exceptions
def queryChain(self, ipv, table, chain, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
log.debug1("config.direct.queryChain('%s', '%s', '%s')" % \
(ipv, table, chain))
idx = tuple((ipv, table, chain))
return idx in self.getSettings()[0]
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='ss',
out_signature='as')
@dbus_handle_exceptions
def getChains(self, ipv, table, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
log.debug1("config.direct.getChains('%s', '%s')" % (ipv, table))
ret = [ ]
for idx in self.getSettings()[0]:
if idx[0] == ipv and idx[1] == table:
ret.append(idx[2])
return ret
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='',
out_signature='a(sss)')
@dbus_handle_exceptions
def getAllChains(self, sender=None):
log.debug1("config.direct.getAllChains()")
return self.getSettings()[0]
# rule
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sssias')
@dbus_handle_exceptions
def addRule(self, ipv, table, chain, priority, args, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
priority = dbus_to_python(priority)
args = dbus_to_python(args)
log.debug1("config.direct.addRule('%s', '%s', '%s', %d, '%s')" % \
(ipv, table, chain, priority, "','".join(args)))
self.accessCheck(sender)
idx = (ipv, table, chain, priority, args)
settings = list(self.getSettings())
if idx in settings[1]:
raise FirewallError(ALREADY_ENABLED,
"rule '%s' already is in '%s:%s:%s'" % \
(args, ipv, table, chain))
settings[1].append(idx)
self.update(tuple(settings))
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sssias')
@dbus_handle_exceptions
def removeRule(self, ipv, table, chain, priority, args, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
priority = dbus_to_python(priority)
args = dbus_to_python(args)
log.debug1("config.direct.removeRule('%s', '%s', '%s', %d, '%s')" % \
(ipv, table, chain, priority, "','".join(args)))
self.accessCheck(sender)
idx = (ipv, table, chain, priority, args)
settings = list(self.getSettings())
if idx not in settings[1]:
raise FirewallError(NOT_ENABLED,
"rule '%s' is not in '%s:%s:%s'" % \
(args, ipv, table, chain))
settings[1].remove(idx)
self.update(tuple(settings))
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sssias',
out_signature='b')
@dbus_handle_exceptions
def queryRule(self, ipv, table, chain, priority, args, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
priority = dbus_to_python(priority)
args = dbus_to_python(args)
log.debug1("config.direct.queryRule('%s', '%s', '%s', %d, '%s')" % \
(ipv, table, chain, priority, "','".join(args)))
idx = (ipv, table, chain, priority, args)
return idx in self.getSettings()[1]
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sss')
@dbus_handle_exceptions
def removeRules(self, ipv, table, chain, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
log.debug1("config.direct.removeRules('%s', '%s', '%s')" %
(ipv, table, chain, ))
self.accessCheck(sender)
settings = list(self.getSettings())
settings[1] = []
self.update(tuple(settings))
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sss',
out_signature='a(ias)')
@dbus_handle_exceptions
def getRules(self, ipv, table, chain, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
log.debug1("config.direct.getRules('%s', '%s', '%s')" % \
(ipv, table, chain))
ret = [ ]
for idx in self.getSettings()[1]:
if idx[0] == ipv and idx[1] == table and idx[2] == chain:
ret.append((idx[3], idx[4]))
return ret
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='',
out_signature='a(sssias)')
@dbus_handle_exceptions
def getAllRules(self, sender=None):
log.debug1("config.direct.getAllRules()")
return self.getSettings()[1]
# passthrough
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sas')
@dbus_handle_exceptions
def addPassthrough(self, ipv, args, sender=None):
ipv = dbus_to_python(ipv)
args = dbus_to_python(args)
log.debug1("config.direct.addPassthrough('%s', '%s')" % \
(ipv, "','".join(args)))
self.accessCheck(sender)
idx = (ipv, args)
settings = list(self.getSettings())
if idx in settings[2]:
raise FirewallError(ALREADY_ENABLED,
"passthrough '%s', '%s'" % (ipv, args))
settings[2].append(idx)
self.update(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sas')
@dbus_handle_exceptions
def removePassthrough(self, ipv, args, sender=None):
ipv = dbus_to_python(ipv)
args = dbus_to_python(args)
log.debug1("config.direct.removePassthrough('%s', '%s')" % \
(ipv, "','".join(args)))
self.accessCheck(sender)
idx = (ipv, args)
settings = list(self.getSettings())
if idx not in settings[2]:
raise FirewallError(NOT_ENABLED,
"passthrough '%s', '%s'" % (ipv, args))
settings[2].remove(idx)
self.update(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sas',
out_signature='b')
@dbus_handle_exceptions
def queryPassthrough(self, ipv, args, sender=None):
ipv = dbus_to_python(ipv)
args = dbus_to_python(args)
log.debug1("config.direct.queryPassthrough('%s', '%s')" % \
(ipv, "','".join(args)))
idx = (ipv, args)
return idx in self.getSettings()[2]
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='s',
out_signature='aas')
@dbus_handle_exceptions
def getPassthroughs(self, ipv, sender=None):
ipv = dbus_to_python(ipv)
log.debug1("config.direct.getPassthroughs('%s')" % (ipv))
ret = [ ]
for idx in self.getSettings()[2]:
if idx[0] == ipv:
ret.append(idx[1])
return ret
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, out_signature='a(sas)')
@dbus_handle_exceptions
def getAllPassthroughs(self, sender=None):
log.debug1("config.direct.getAllPassthroughs()")
return self.getSettings()[2]
| gpl-2.0 |
kazzz24/deep-learning | transfer-learning/tensorflow_vgg/test_vgg19_trainable.py | 152 | 1435 | """
Simple tester for the vgg19_trainable
"""
import tensorflow as tf
from tensoflow_vgg import vgg19_trainable as vgg19
from tensoflow_vgg import utils
img1 = utils.load_image("./test_data/tiger.jpeg")
img1_true_result = [1 if i == 292 else 0 for i in range(1000)] # 1-hot result for tiger
batch1 = img1.reshape((1, 224, 224, 3))
with tf.device('/cpu:0'):
sess = tf.Session()
images = tf.placeholder(tf.float32, [1, 224, 224, 3])
true_out = tf.placeholder(tf.float32, [1, 1000])
train_mode = tf.placeholder(tf.bool)
vgg = vgg19.Vgg19('./vgg19.npy')
vgg.build(images, train_mode)
# print number of variables used: 143667240 variables, i.e. ideal size = 548MB
print(vgg.get_var_count())
sess.run(tf.global_variables_initializer())
# test classification
prob = sess.run(vgg.prob, feed_dict={images: batch1, train_mode: False})
utils.print_prob(prob[0], './synset.txt')
# simple 1-step training
cost = tf.reduce_sum((vgg.prob - true_out) ** 2)
train = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
sess.run(train, feed_dict={images: batch1, true_out: [img1_true_result], train_mode: True})
# test classification again, should have a higher probability about tiger
prob = sess.run(vgg.prob, feed_dict={images: batch1, train_mode: False})
utils.print_prob(prob[0], './synset.txt')
# test save
vgg.save_npy(sess, './test-save.npy')
| mit |
devGregA/code | build/lib.linux-x86_64-2.7/scrapy/utils/defer.py | 33 | 3241 | """
Helper functions for dealing with Twisted deferreds
"""
from twisted.internet import defer, reactor, task
from twisted.python import failure
from scrapy.exceptions import IgnoreRequest
def defer_fail(_failure):
"""Same as twisted.internet.defer.fail, but delay calling errback until
next reactor loop
"""
d = defer.Deferred()
reactor.callLater(0, d.errback, _failure)
return d
def defer_succeed(result):
"""Same as twsited.internet.defer.succed, but delay calling callback until
next reactor loop
"""
d = defer.Deferred()
reactor.callLater(0, d.callback, result)
return d
def defer_result(result):
if isinstance(result, defer.Deferred):
return result
elif isinstance(result, failure.Failure):
return defer_fail(result)
else:
return defer_succeed(result)
def mustbe_deferred(f, *args, **kw):
"""Same as twisted.internet.defer.maybeDeferred, but delay calling
callback/errback to next reactor loop
"""
try:
result = f(*args, **kw)
# FIXME: Hack to avoid introspecting tracebacks. This to speed up
# processing of IgnoreRequest errors which are, by far, the most common
# exception in Scrapy - see #125
except IgnoreRequest as e:
return defer_fail(failure.Failure(e))
except:
return defer_fail(failure.Failure())
else:
return defer_result(result)
def parallel(iterable, count, callable, *args, **named):
"""Execute a callable over the objects in the given iterable, in parallel,
using no more than ``count`` concurrent calls.
Taken from: http://jcalderone.livejournal.com/24285.html
"""
coop = task.Cooperator()
work = (callable(elem, *args, **named) for elem in iterable)
return defer.DeferredList([coop.coiterate(work) for i in xrange(count)])
def process_chain(callbacks, input, *a, **kw):
"""Return a Deferred built by chaining the given callbacks"""
d = defer.Deferred()
for x in callbacks:
d.addCallback(x, *a, **kw)
d.callback(input)
return d
def process_chain_both(callbacks, errbacks, input, *a, **kw):
"""Return a Deferred built by chaining the given callbacks and errbacks"""
d = defer.Deferred()
for cb, eb in zip(callbacks, errbacks):
d.addCallbacks(cb, eb, callbackArgs=a, callbackKeywords=kw,
errbackArgs=a, errbackKeywords=kw)
if isinstance(input, failure.Failure):
d.errback(input)
else:
d.callback(input)
return d
def process_parallel(callbacks, input, *a, **kw):
"""Return a Deferred with the output of all successful calls to the given
callbacks
"""
dfds = [defer.succeed(input).addCallback(x, *a, **kw) for x in callbacks]
d = defer.DeferredList(dfds, fireOnOneErrback=1, consumeErrors=1)
d.addCallbacks(lambda r: [x[1] for x in r], lambda f: f.value.subFailure)
return d
def iter_errback(iterable, errback, *a, **kw):
"""Wraps an iterable calling an errback if an error is caught while
iterating it.
"""
it = iter(iterable)
while 1:
try:
yield next(it)
except StopIteration:
break
except:
errback(failure.Failure(), *a, **kw)
| bsd-3-clause |
mm1ke/portage | pym/portage/util/_desktop_entry.py | 6 | 2619 | # Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import io
import re
import subprocess
import sys
import portage
from portage import _encodings, _unicode_encode, _unicode_decode
from portage.util import writemsg
from portage.util.configparser import (ConfigParserError, RawConfigParser,
read_configs)
def parse_desktop_entry(path):
"""
Parse the given file with RawConfigParser and return the
result. This may raise an IOError from io.open(), or a
ParsingError from RawConfigParser.
"""
parser = RawConfigParser()
read_configs(parser, [path])
return parser
_trivial_warnings = re.compile(r' looks redundant with value ')
_ignored_errors = (
# Ignore error for emacs.desktop:
# https://bugs.freedesktop.org/show_bug.cgi?id=35844#c6
'error: (will be fatal in the future): value "TextEditor" in key "Categories" in group "Desktop Entry" requires another category to be present among the following categories: Utility',
'warning: key "Encoding" in group "Desktop Entry" is deprecated'
)
_ShowIn_exemptions = (
# See bug #480586.
'contains an unregistered value "Pantheon"',
)
def validate_desktop_entry(path):
args = ["desktop-file-validate", path]
if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
# Python 3.1 _execvp throws TypeError for non-absolute executable
# path passed as bytes (see https://bugs.python.org/issue8513).
fullname = portage.process.find_binary(args[0])
if fullname is None:
raise portage.exception.CommandNotFound(args[0])
args[0] = fullname
args = [_unicode_encode(x, errors='strict') for x in args]
proc = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output_lines = _unicode_decode(proc.communicate()[0]).splitlines()
proc.wait()
if output_lines:
filtered_output = []
for line in output_lines:
msg = line[len(path)+2:]
# "hint:" output is new in desktop-file-utils-0.21
if msg.startswith('hint: ') or msg in _ignored_errors:
continue
if 'for key "NotShowIn" in group "Desktop Entry"' in msg or \
'for key "OnlyShowIn" in group "Desktop Entry"' in msg:
exempt = False
for s in _ShowIn_exemptions:
if s in msg:
exempt = True
break
if exempt:
continue
filtered_output.append(line)
output_lines = filtered_output
if output_lines:
output_lines = [line for line in output_lines
if _trivial_warnings.search(line) is None]
return output_lines
if __name__ == "__main__":
for arg in sys.argv[1:]:
for line in validate_desktop_entry(arg):
writemsg(line + "\n", noiselevel=-1)
| gpl-2.0 |
jbenden/ansible | lib/ansible/modules/network/cloudengine/ce_startup.py | 47 | 15451 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_startup
version_added: "2.4"
short_description: Manages a system startup information on HUAWEI CloudEngine switches.
description:
- Manages a system startup information on HUAWEI CloudEngine switches.
author:
- Li Yanfeng (@CloudEngine-Ansible)
options:
cfg_file:
description:
- Name of the configuration file that is applied for the next startup.
The value is a string of 5 to 255 characters.
required: false
default: present
software_file:
description:
- File name of the system software that is applied for the next startup.
The value is a string of 5 to 255 characters.
required: false
default: null
patch_file:
description:
- Name of the patch file that is applied for the next startup.
required: false
default: null
slot:
description:
- Position of the device.The value is a string of 1 to 32 characters.
The possible value of slot is all, slave-board, or the specific slotID.
required: false
default: null
action:
description:
- Display the startup information.
required: false
choices: ['display']
default: null
'''
EXAMPLES = '''
- name: startup module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Display startup information
ce_startup:
action: display
provider: "{{ cli }}"
- name: Set startup patch file
ce_startup:
patch_file: 2.PAT
slot: all
provider: "{{ cli }}"
- name: Set startup software file
ce_startup:
software_file: aa.cc
slot: 1
provider: "{{ cli }}"
- name: Set startup cfg file
ce_startup:
cfg_file: 2.cfg
slot: 1
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"patch_file": "2.PAT",
"slot": "all"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {
"configSysSoft": "flash:/CE12800-V200R002C20_issuB071.cc",
"curentPatchFile": "NULL",
"curentStartupFile": "NULL",
"curentSysSoft": "flash:/CE12800-V200R002C20_issuB071.cc",
"nextPatchFile": "flash:/1.PAT",
"nextStartupFile": "flash:/1.cfg",
"nextSysSoft": "flash:/CE12800-V200R002C20_issuB071.cc",
"position": "5"
}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"StartupInfos": null}
updates:
description: command sent to the device
returned: always
type: list
sample: {"startup patch 2.PAT all"}
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_nc_config, ce_argument_spec, run_commands
CE_NC_GET_STARTUP_INFO = """
<filter type="subtree">
<cfg xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<startupInfos>
<startupInfo>
<position></position>
<configedSysSoft></configedSysSoft>
<curSysSoft></curSysSoft>
<nextSysSoft></nextSysSoft>
<curStartupFile></curStartupFile>
<nextStartupFile></nextStartupFile>
<curPatchFile></curPatchFile>
<nextPatchFile></nextPatchFile>
</startupInfo>
</startupInfos>
</cfg>
</filter>
"""
class StartUp(object):
"""
Manages system startup information.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.cfg_file = self.module.params['cfg_file']
self.software_file = self.module.params['software_file']
self.patch_file = self.module.params['patch_file']
self.slot = self.module.params['slot']
self.action = self.module.params['action']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.existing = dict()
self.proposed = dict()
self.end_state = dict()
# system startup info
self.startup_info = None
def init_module(self):
""" init module """
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_startup_dict(self):
""" get rollback attributes dict."""
startup_info = dict()
conf_str = CE_NC_GET_STARTUP_INFO
xml_str = get_nc_config(self.module, conf_str)
startup_info["StartupInfos"] = list()
if "<data/>" in xml_str:
return startup_info
else:
re_find = re.findall(r'.*<position>(.*)</position>.*\s*'
r'<nextStartupFile>(.*)</nextStartupFile>.*\s*'
r'<configedSysSoft>(.*)</configedSysSoft>.*\s*'
r'<curSysSoft>(.*)</curSysSoft>.*\s*'
r'<nextSysSoft>(.*)</nextSysSoft>.*\s*'
r'<curStartupFile>(.*)</curStartupFile>.*\s*'
r'<curPatchFile>(.*)</curPatchFile>.*\s*'
r'<nextPatchFile>(.*)</nextPatchFile>.*', xml_str)
for mem in re_find:
startup_info["StartupInfos"].append(
dict(position=mem[0], nextStartupFile=mem[1], configSysSoft=mem[2], curentSysSoft=mem[3],
nextSysSoft=mem[4], curentStartupFile=mem[5], curentPatchFile=mem[6], nextPatchFile=mem[7]))
return startup_info
def get_cfg_filename_type(self, filename):
"""Gets the type of cfg filename, such as cfg, zip, dat..."""
if filename is None:
return None
if ' ' in filename:
self.module.fail_json(
msg='Error: Configuration file name include spaces.')
iftype = None
if filename.endswith('.cfg'):
iftype = 'cfg'
elif filename.endswith('.zip'):
iftype = 'zip'
elif filename.endswith('.dat'):
iftype = 'dat'
else:
return None
return iftype.lower()
def get_pat_filename_type(self, filename):
"""Gets the type of patch filename, such as cfg, zip, dat..."""
if filename is None:
return None
if ' ' in filename:
self.module.fail_json(
msg='Error: Patch file name include spaces.')
iftype = None
if filename.endswith('.PAT'):
iftype = 'PAT'
else:
return None
return iftype.upper()
def get_software_filename_type(self, filename):
"""Gets the type of software filename, such as cfg, zip, dat..."""
if filename is None:
return None
if ' ' in filename:
self.module.fail_json(
msg='Error: Software file name include spaces.')
iftype = None
if filename.endswith('.cc'):
iftype = 'cc'
else:
return None
return iftype.lower()
def startup_next_cfg_file(self):
"""set next cfg file"""
commands = list()
cmd = {'output': None, 'command': ''}
if self.slot:
cmd['command'] = "startup saved-configuration %s slot %s" % (
self.cfg_file, self.slot)
commands.append(cmd)
self.updates_cmd.append(
"startup saved-configuration %s slot %s" % (self.cfg_file, self.slot))
run_commands(self.module, commands)
self.changed = True
else:
cmd['command'] = "startup saved-configuration %s" % self.cfg_file
commands.append(cmd)
self.updates_cmd.append(
"startup saved-configuration %s" % self.cfg_file)
run_commands(self.module, commands)
self.changed = True
def startup_next_software_file(self):
"""set next software file"""
commands = list()
cmd = {'output': None, 'command': ''}
if self.slot:
if self.slot == "all" or self.slot == "slave-board":
cmd['command'] = "startup system-software %s %s" % (
self.software_file, self.slot)
commands.append(cmd)
self.updates_cmd.append(
"startup system-software %s %s" % (self.software_file, self.slot))
run_commands(self.module, commands)
self.changed = True
else:
cmd['command'] = "startup system-software %s slot %s" % (
self.software_file, self.slot)
commands.append(cmd)
self.updates_cmd.append(
"startup system-software %s slot %s" % (self.software_file, self.slot))
run_commands(self.module, commands)
self.changed = True
if not self.slot:
cmd['command'] = "startup system-software %s" % self.software_file
commands.append(cmd)
self.updates_cmd.append(
"startup system-software %s" % self.software_file)
run_commands(self.module, commands)
self.changed = True
def startup_next_pat_file(self):
"""set next patch file"""
commands = list()
cmd = {'output': None, 'command': ''}
if self.slot:
if self.slot == "all":
cmd['command'] = "startup patch %s %s" % (
self.patch_file, self.slot)
commands.append(cmd)
self.updates_cmd.append(
"startup patch %s %s" % (self.patch_file, self.slot))
run_commands(self.module, commands)
self.changed = True
else:
cmd['command'] = "startup patch %s slot %s" % (
self.patch_file, self.slot)
commands.append(cmd)
self.updates_cmd.append(
"startup patch %s slot %s" % (self.patch_file, self.slot))
run_commands(self.module, commands)
self.changed = True
if not self.slot:
cmd['command'] = "startup patch %s" % self.patch_file
commands.append(cmd)
self.updates_cmd.append(
"startup patch %s" % self.patch_file)
run_commands(self.module, commands)
self.changed = True
def check_params(self):
"""Check all input params"""
# cfg_file check
if self.cfg_file:
if not self.get_cfg_filename_type(self.cfg_file):
self.module.fail_json(
msg='Error: Invalid cfg file name or cfg file name extension ( *.cfg, *.zip, *.dat ).')
# software_file check
if self.software_file:
if not self.get_software_filename_type(self.software_file):
self.module.fail_json(
msg='Error: Invalid software file name or software file name extension ( *.cc).')
# patch_file check
if self.patch_file:
if not self.get_pat_filename_type(self.patch_file):
self.module.fail_json(
msg='Error: Invalid patch file name or patch file name extension ( *.PAT ).')
# slot check
if self.slot:
if self.slot.isdigit():
if int(self.slot) <= 0 or int(self.slot) > 16:
self.module.fail_json(
msg='Error: The number of slot is not in the range from 1 to 16.')
else:
if len(self.slot) <= 0 or len(self.slot) > 32:
self.module.fail_json(
msg='Error: The length of slot is not in the range from 1 to 32.')
def get_proposed(self):
"""get proposed info"""
if self.cfg_file:
self.proposed["cfg_file"] = self.cfg_file
if self.software_file:
self.proposed["system_file"] = self.software_file
if self.patch_file:
self.proposed["patch_file"] = self.patch_file
if self.slot:
self.proposed["slot"] = self.slot
def get_existing(self):
"""get existing info"""
if not self.startup_info:
return
self.existing["StartupInfos"] = self.startup_info["StartupInfos"]
def get_end_state(self):
"""get end state info"""
self.end_state["StartupInfos"] = None
def work(self):
"""worker"""
self.check_params()
self.get_proposed()
self.startup_info = self.get_startup_dict()
self.get_existing()
if self.cfg_file:
self.startup_next_cfg_file()
if self.software_file:
self.startup_next_software_file()
if self.patch_file:
self.startup_next_pat_file()
if self.action == "display":
self.startup_info = self.get_startup_dict()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
cfg_file=dict(type='str'),
software_file=dict(type='str'),
patch_file=dict(type='str'),
slot=dict(type='str'),
action=dict(type='str', choices=['display'])
)
argument_spec.update(ce_argument_spec)
module = StartUp(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
unreal666/youtube-dl | youtube_dl/extractor/parliamentliveuk.py | 30 | 1636 | from __future__ import unicode_literals
from .common import InfoExtractor
class ParliamentLiveUKIE(InfoExtractor):
IE_NAME = 'parliamentlive.tv'
IE_DESC = 'UK parliament videos'
_VALID_URL = r'(?i)https?://(?:www\.)?parliamentlive\.tv/Event/Index/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
_TESTS = [{
'url': 'http://parliamentlive.tv/Event/Index/c1e9d44d-fd6c-4263-b50f-97ed26cc998b',
'info_dict': {
'id': '1_af9nv9ym',
'ext': 'mp4',
'title': 'Home Affairs Committee',
'uploader_id': 'FFMPEG-01',
'timestamp': 1422696664,
'upload_date': '20150131',
},
}, {
'url': 'http://parliamentlive.tv/event/index/3f24936f-130f-40bf-9a5d-b3d6479da6a4',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://vodplayer.parliamentlive.tv/?mid=' + video_id, video_id)
widget_config = self._parse_json(self._search_regex(
r'(?s)kWidgetConfig\s*=\s*({.+});',
webpage, 'kaltura widget config'), video_id)
kaltura_url = 'kaltura:%s:%s' % (
widget_config['wid'][1:], widget_config['entry_id'])
event_title = self._download_json(
'http://parliamentlive.tv/Event/GetShareVideo/' + video_id, video_id)['event']['title']
return {
'_type': 'url_transparent',
'title': event_title,
'description': '',
'url': kaltura_url,
'ie_key': 'Kaltura',
}
| unlicense |
venkatarajasekhar/kernel_raybst | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
carljm/django | tests/forms_tests/field_tests/test_splitdatetimefield.py | 12 | 3228 | from __future__ import unicode_literals
import datetime
from django.forms import SplitDateTimeField, ValidationError
from django.forms.widgets import SplitDateTimeWidget
from django.test import SimpleTestCase
from django.utils import six
class SplitDateTimeFieldTest(SimpleTestCase):
def test_splitdatetimefield_1(self):
f = SplitDateTimeField()
self.assertIsInstance(f.widget, SplitDateTimeWidget)
self.assertEqual(
datetime.datetime(2006, 1, 10, 7, 30),
f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)])
)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
with self.assertRaisesMessage(ValidationError, "'Enter a list of values.'"):
f.clean('hello')
with six.assertRaisesRegex(self, ValidationError, r"'Enter a valid date\.', u?'Enter a valid time\.'"):
f.clean(['hello', 'there'])
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean(['2006-01-10', 'there'])
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean(['hello', '07:30'])
def test_splitdatetimefield_2(self):
f = SplitDateTimeField(required=False)
self.assertEqual(
datetime.datetime(2006, 1, 10, 7, 30),
f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)])
)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean(['2006-01-10', '07:30']))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(['']))
self.assertIsNone(f.clean(['', '']))
with self.assertRaisesMessage(ValidationError, "'Enter a list of values.'"):
f.clean('hello')
with six.assertRaisesRegex(self, ValidationError, r"'Enter a valid date\.', u?'Enter a valid time\.'"):
f.clean(['hello', 'there'])
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean(['2006-01-10', 'there'])
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean(['hello', '07:30'])
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean(['2006-01-10', ''])
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean(['2006-01-10'])
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean(['', '07:30'])
def test_splitdatetimefield_changed(self):
f = SplitDateTimeField(input_date_formats=['%d/%m/%Y'])
self.assertFalse(f.has_changed(['11/01/2012', '09:18:15'], ['11/01/2012', '09:18:15']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['2008-05-06', '12:40:00']))
self.assertFalse(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:40']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:41']))
| bsd-3-clause |
alexmandujano/django | tests/i18n/commands/extraction.py | 44 | 22998 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import re
import shutil
import warnings
from django.core import management
from django.test import SimpleTestCase
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils import six
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
from django.utils.unittest import SkipTest
LOCALE='de'
class ExtractorTests(SimpleTestCase):
PO_FILE='locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.abspath(os.path.dirname(upath(__file__)))
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def assertMsgId(self, msgid, s, use_quotes=True):
q = '"'
if use_quotes:
msgid = '"%s"' % msgid
q = "'"
needle = 'msgid %s' % msgid
msgid = re.escape(msgid)
return self.assertTrue(re.search('^msgid %s' % msgid, s, re.MULTILINE), 'Could not find %(q)s%(n)s%(q)s in generated PO file' % {'n':needle, 'q':q})
def assertNotMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
msgid = re.escape(msgid)
return self.assertTrue(not re.search('^msgid %s' % msgid, s, re.MULTILINE))
class BasicExtractorTests(ExtractorTests):
def test_comments_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with io.open(self.PO_FILE, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
self.assertTrue('#. Translators: This comment should be extracted' in po_contents)
self.assertTrue('This comment should not be extracted' not in po_contents)
# Comments in templates
self.assertTrue('#. Translators: Django template comment for translators' in po_contents)
self.assertTrue("#. Translators: Django comment block for translators\n#. string's meaning unveiled" in po_contents)
self.assertTrue('#. Translators: One-line translator comment #1' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #1\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #2' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #2\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #3' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #3\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #4' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #4\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #5 -- with non ASCII characters: áéíóúö' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #5 -- with non ASCII characters: áéíóúö\n#. continued here.' in po_contents)
def test_templatize_trans_tag(self):
# ticket #11240
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Literal with a percent symbol at the end %%', po_contents)
self.assertMsgId('Literal with a percent %% symbol in the middle', po_contents)
self.assertMsgId('Completed 50%% of all the tasks', po_contents)
self.assertMsgId('Completed 99%% of all the tasks', po_contents)
self.assertMsgId("Shouldn't double escape this sequence: %% (two percent signs)", po_contents)
self.assertMsgId("Shouldn't double escape this sequence %% either", po_contents)
self.assertMsgId("Looks like a str fmt spec %%s but shouldn't be interpreted as such", po_contents)
self.assertMsgId("Looks like a str fmt spec %% o but shouldn't be interpreted as such", po_contents)
def test_templatize_blocktrans_tag(self):
# ticket #11966
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('I think that 100%% is more that 50%% of anything.', po_contents)
self.assertMsgId('I think that 100%% is more that 50%% of %(obj)s.', po_contents)
self.assertMsgId("Blocktrans extraction shouldn't double escape this: %%, a=%(a)s", po_contents)
def test_force_en_us_locale(self):
"""Value of locale-munging option used by the command is the right one"""
from django.core.management.commands.makemessages import Command
self.assertTrue(Command.leave_locale_alone)
def test_extraction_error(self):
os.chdir(self.test_dir)
self.assertRaises(SyntaxError, management.call_command, 'makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
with self.assertRaises(SyntaxError) as context_manager:
management.call_command('makemessages', locale=LOCALE, extensions=['tpl'], verbosity=0)
six.assertRegex(self, str(context_manager.exception),
r'Translation blocks must not include other block tags: blocktrans \(file templates[/\\]template_with_error\.tpl, line 3\)'
)
# Check that the temporary file was cleaned up
self.assertFalse(os.path.exists('./templates/template_with_error.tpl.py'))
def test_unicode_decode_error(self):
os.chdir(self.test_dir)
shutil.copyfile('./not_utf8.sample', './not_utf8.txt')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'not_utf8.txt'))
stdout = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=stdout)
self.assertIn("UnicodeDecodeError: skipped file not_utf8.txt in .",
force_text(stdout.getvalue()))
def test_extraction_warning(self):
"""test xgettext warning about multiple bare interpolation placeholders"""
os.chdir(self.test_dir)
shutil.copyfile('./code.sample', './code_sample.py')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'code_sample.py'))
stdout = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=stdout)
self.assertIn("code_sample.py:4", force_text(stdout.getvalue()))
def test_template_message_context_extractor(self):
"""
Ensure that message contexts are correctly extracted for the
{% trans %} and {% blocktrans %} template tags.
Refs #14806.
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertTrue('msgctxt "Special trans context #1"' in po_contents)
self.assertMsgId("Translatable literal #7a", po_contents)
self.assertTrue('msgctxt "Special trans context #2"' in po_contents)
self.assertMsgId("Translatable literal #7b", po_contents)
self.assertTrue('msgctxt "Special trans context #3"' in po_contents)
self.assertMsgId("Translatable literal #7c", po_contents)
# {% blocktrans %}
self.assertTrue('msgctxt "Special blocktrans context #1"' in po_contents)
self.assertMsgId("Translatable literal #8a", po_contents)
self.assertTrue('msgctxt "Special blocktrans context #2"' in po_contents)
self.assertMsgId("Translatable literal #8b-singular", po_contents)
self.assertTrue("Translatable literal #8b-plural" in po_contents)
self.assertTrue('msgctxt "Special blocktrans context #3"' in po_contents)
self.assertMsgId("Translatable literal #8c-singular", po_contents)
self.assertTrue("Translatable literal #8c-plural" in po_contents)
self.assertTrue('msgctxt "Special blocktrans context #4"' in po_contents)
self.assertMsgId("Translatable literal #8d %(a)s", po_contents)
def test_context_in_single_quotes(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertTrue('msgctxt "Context wrapped in double quotes"' in po_contents)
self.assertTrue('msgctxt "Context wrapped in single quotes"' in po_contents)
# {% blocktrans %}
self.assertTrue('msgctxt "Special blocktrans context wrapped in double quotes"' in po_contents)
self.assertTrue('msgctxt "Special blocktrans context wrapped in single quotes"' in po_contents)
def test_template_comments(self):
"""Template comment tags on the same line of other constructs (#19552)"""
os.chdir(self.test_dir)
# Test detection/end user reporting of old, incorrect templates
# translator comments syntax
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
management.call_command('makemessages', locale=[LOCALE], extensions=['thtml'], verbosity=0)
self.assertEqual(len(ws), 3)
for w in ws:
self.assertTrue(issubclass(w.category, TranslatorCommentWarning))
six.assertRegex(self, str(ws[0].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #1' \(file templates[/\\]comments.thtml, line 4\) was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(self, str(ws[1].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #3' \(file templates[/\\]comments.thtml, line 6\) was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(self, str(ws[2].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #4' \(file templates[/\\]comments.thtml, line 8\) was ignored, because it wasn't the last item on the line\."
)
# Now test .po file contents
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Translatable literal #9a', po_contents)
self.assertFalse('ignored comment #1' in po_contents)
self.assertFalse('Translators: ignored i18n comment #1' in po_contents)
self.assertMsgId("Translatable literal #9b", po_contents)
self.assertFalse('ignored i18n comment #2' in po_contents)
self.assertFalse('ignored comment #2' in po_contents)
self.assertMsgId('Translatable literal #9c', po_contents)
self.assertFalse('ignored comment #3' in po_contents)
self.assertFalse('ignored i18n comment #3' in po_contents)
self.assertMsgId('Translatable literal #9d', po_contents)
self.assertFalse('ignored comment #4' in po_contents)
self.assertMsgId('Translatable literal #9e', po_contents)
self.assertFalse('ignored comment #5' in po_contents)
self.assertFalse('ignored i18n comment #4' in po_contents)
self.assertMsgId('Translatable literal #9f', po_contents)
self.assertTrue('#. Translators: valid i18n comment #5' in po_contents)
self.assertMsgId('Translatable literal #9g', po_contents)
self.assertTrue('#. Translators: valid i18n comment #6' in po_contents)
self.assertMsgId('Translatable literal #9h', po_contents)
self.assertTrue('#. Translators: valid i18n comment #7' in po_contents)
self.assertMsgId('Translatable literal #9i', po_contents)
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #8')
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #9')
self.assertMsgId("Translatable literal #9j", po_contents)
class JavascriptExtractorTests(ExtractorTests):
PO_FILE='locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
management.call_command('makemessages', domain='djangojs', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = fp.read()
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('This one as well.', po_contents)
self.assertMsgId(r'He said, \"hello\".', po_contents)
self.assertMsgId("okkkk", po_contents)
self.assertMsgId("TEXT", po_contents)
self.assertMsgId("It's at http://example.com", po_contents)
self.assertMsgId("String", po_contents)
self.assertMsgId("/* but this one will be too */ 'cause there is no way of telling...", po_contents)
self.assertMsgId("foo", po_contents)
self.assertMsgId("bar", po_contents)
self.assertMsgId("baz", po_contents)
self.assertMsgId("quz", po_contents)
self.assertMsgId("foobar", po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_option(self):
os.chdir(self.test_dir)
ignore_patterns = [
os.path.join('ignore_dir', '*'),
'xxx_*',
]
stdout = StringIO()
management.call_command('makemessages', locale=[LOCALE], verbosity=2,
ignore_patterns=ignore_patterns, stdout=stdout)
data = stdout.getvalue()
self.assertTrue("ignoring directory ignore_dir" in data)
self.assertTrue("ignoring file xxx_ignored.html" in data)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = fp.read()
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
self.assertNotMsgId('This should be ignored too.', po_contents)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.abspath(os.path.dirname(upath(__file__)))
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
# On Python < 3.2 os.symlink() exists only on Unix
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assertTrue(os.path.islink(self.symlinked_dir))
else:
# On Python >= 3.2) os.symlink() exists always but then can
# fail at runtime when user hasn't the needed permissions on
# WIndows versions that support symbolink links (>= 6/Vista).
# See Python issue 9333 (http://bugs.python.org/issue9333).
# Skip the test in that case
try:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
except (OSError, NotImplementedError):
raise SkipTest("os.symlink() is available on this OS but can't be used by this user.")
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, symlinks=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should be included.', po_contents)
self.assertTrue('templates_symlinked/test.html' in po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
PO_FILE_ES = 'locale/es/LC_MESSAGES/django.po'
def tearDown(self):
super(CopyPluralFormsExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
self._rmrf('locale/es')
except OSError:
pass
os.chdir(self._cwd)
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertTrue('Plural-Forms: nplurals=2; plural=(n != 1)' in po_contents)
def test_override_plural_forms(self):
"""Ticket #20311."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['es'], extensions=['djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_ES))
with io.open(self.PO_FILE_ES, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
found = re.findall(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', po_contents, re.MULTILINE | re.DOTALL)
self.assertEqual(1, len(found))
class NoWrapExtractorTests(ExtractorTests):
def test_no_wrap_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should also be included wrapped or not wrapped depending on the use of the --no-wrap option.', po_contents)
def test_no_wrap_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=False)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('""\n"This literal should also be included wrapped or not wrapped depending on the "\n"use of the --no-wrap option."', po_contents, use_quotes=False)
class LocationCommentsTests(ExtractorTests):
def test_no_location_enabled(self):
"""Behavior is correct if --no-location switch is specified. See #16903."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
needle = os.sep.join(['#: templates', 'test.html:55'])
self.assertFalse(needle in po_contents, '"%s" shouldn\'t be in final .po file.' % needle)
def test_no_location_disabled(self):
"""Behavior is correct if --no-location switch isn't specified."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=False)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
# Standard comment with source file relative path should be present -- #16903
po_contents = force_text(fp.read())
if os.name == 'nt':
# #: .\path\to\file.html:123
cwd_prefix = '%s%s' % (os.curdir, os.sep)
else:
# #: path/to/file.html:123
cwd_prefix = ''
needle = os.sep.join(['#: %stemplates' % cwd_prefix, 'test.html:55'])
self.assertTrue(needle in po_contents, '"%s" not found in final .po file.' % needle)
# #21208 -- Leaky paths in comments on Windows e.g. #: path\to\file.html.py:123
bad_suffix = '.py'
bad_string = 'templates%stest.html%s' % (os.sep, bad_suffix) #
self.assertFalse(bad_string in po_contents, '"%s" shouldn\'t be in final .po file.' % bad_string)
class KeepPotFileExtractorTests(ExtractorTests):
POT_FILE='locale/django.pot'
def setUp(self):
super(KeepPotFileExtractorTests, self).setUp()
def tearDown(self):
super(KeepPotFileExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.unlink(self.POT_FILE)
except OSError:
pass
os.chdir(self._cwd)
def test_keep_pot_disabled_by_default(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_explicitly_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=False)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=True)
self.assertTrue(os.path.exists(self.POT_FILE))
class MultipleLocaleExtractionTests(ExtractorTests):
PO_FILE_PT = 'locale/pt/LC_MESSAGES/django.po'
PO_FILE_DE = 'locale/de/LC_MESSAGES/django.po'
LOCALES = ['pt', 'de', 'ch']
def tearDown(self):
os.chdir(self.test_dir)
for locale in self.LOCALES:
try:
self._rmrf('locale/%s' % locale)
except OSError:
pass
os.chdir(self._cwd)
def test_multiple_locales(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['pt','de'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_PT))
self.assertTrue(os.path.exists(self.PO_FILE_DE))
| bsd-3-clause |
holmeszyx/pyandroid_tools | aapt.py | 1 | 1392 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
try:
import sys
from os import popen3 as pipe
except ImportError, e:
print("require mode no found")
exit(-1)
class AAPT():
__aapt_path = None
__output = None
__error = None
def __init__(self, aapt_path = None):
self.__aapt_path = aapt_path
def __clean__(self):
self.__output = None
self.__error = None
def __read_output__(self,fd):
ret = ''
while True:
line = fd.readline()
if not line:
break
ret += line
if len(ret) == 0:
ret = None
return ret
def __run_commond__(self, cmd):
self.__clean__()
try:
w, r, e = pipe(self.__build_commond__(cmd), mode = "r")
self.__output = self.__read_output__(r)
self.__error = self.__read_output__(e)
w.close()
r.close()
e.close()
except Exception, exce:
print(exce)
print("run commd error")
def __build_commond__(self, cmd):
return self.__aapt_path + " " + cmd
def set_aapt_path(self, path):
self.__aapt_path = path
def __build_dump_cmd__(self, cmd):
return "d " + cmd
def dump_badging(self, apk):
self.__run_commond__("d badging " + apk)
return self.__output
| apache-2.0 |
fuhongliang/odoo | addons/hw_escpos/escpos/escpos.py | 174 | 31717 | # -*- coding: utf-8 -*-
import time
import copy
import io
import base64
import math
import md5
import re
import traceback
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
from PIL import Image
try:
import jcconv
except ImportError:
jcconv = None
try:
import qrcode
except ImportError:
qrcode = None
from constants import *
from exceptions import *
def utfstr(stuff):
""" converts stuff to string and does without failing if stuff is a utf8 string """
if isinstance(stuff,basestring):
return stuff
else:
return str(stuff)
class StyleStack:
"""
The stylestack is used by the xml receipt serializer to compute the active styles along the xml
document. Styles are just xml attributes, there is no css mechanism. But the style applied by
the attributes are inherited by deeper nodes.
"""
def __init__(self):
self.stack = []
self.defaults = { # default style values
'align': 'left',
'underline': 'off',
'bold': 'off',
'size': 'normal',
'font' : 'a',
'width': 48,
'indent': 0,
'tabwidth': 2,
'bullet': ' - ',
'line-ratio':0.5,
'color': 'black',
'value-decimals': 2,
'value-symbol': '',
'value-symbol-position': 'after',
'value-autoint': 'off',
'value-decimals-separator': '.',
'value-thousands-separator': ',',
'value-width': 0,
}
self.types = { # attribute types, default is string and can be ommitted
'width': 'int',
'indent': 'int',
'tabwidth': 'int',
'line-ratio': 'float',
'value-decimals': 'int',
'value-width': 'int',
}
self.cmds = {
# translation from styles to escpos commands
# some style do not correspond to escpos command are used by
# the serializer instead
'align': {
'left': TXT_ALIGN_LT,
'right': TXT_ALIGN_RT,
'center': TXT_ALIGN_CT,
'_order': 1,
},
'underline': {
'off': TXT_UNDERL_OFF,
'on': TXT_UNDERL_ON,
'double': TXT_UNDERL2_ON,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'bold': {
'off': TXT_BOLD_OFF,
'on': TXT_BOLD_ON,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'font': {
'a': TXT_FONT_A,
'b': TXT_FONT_B,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'size': {
'normal': TXT_NORMAL,
'double-height': TXT_2HEIGHT,
'double-width': TXT_2WIDTH,
'double': TXT_DOUBLE,
'_order': 1,
},
'color': {
'black': TXT_COLOR_BLACK,
'red': TXT_COLOR_RED,
'_order': 1,
},
}
self.push(self.defaults)
def get(self,style):
""" what's the value of a style at the current stack level"""
level = len(self.stack) -1
while level >= 0:
if style in self.stack[level]:
return self.stack[level][style]
else:
level = level - 1
return None
def enforce_type(self, attr, val):
"""converts a value to the attribute's type"""
if not attr in self.types:
return utfstr(val)
elif self.types[attr] == 'int':
return int(float(val))
elif self.types[attr] == 'float':
return float(val)
else:
return utfstr(val)
def push(self, style={}):
"""push a new level on the stack with a style dictionnary containing style:value pairs"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
_style[attr] = self.enforce_type(attr, style[attr])
self.stack.append(_style)
def set(self, style={}):
"""overrides style values at the current stack level"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
self.stack[-1][attr] = self.enforce_type(attr, style[attr])
def pop(self):
""" pop a style stack level """
if len(self.stack) > 1 :
self.stack = self.stack[:-1]
def to_escpos(self):
""" converts the current style to an escpos command string """
cmd = ''
ordered_cmds = self.cmds.keys()
ordered_cmds.sort(lambda x,y: cmp(self.cmds[x]['_order'], self.cmds[y]['_order']))
for style in ordered_cmds:
cmd += self.cmds[style][self.get(style)]
return cmd
class XmlSerializer:
"""
Converts the xml inline / block tree structure to a string,
keeping track of newlines and spacings.
The string is outputted asap to the provided escpos driver.
"""
def __init__(self,escpos):
self.escpos = escpos
self.stack = ['block']
self.dirty = False
def start_inline(self,stylestack=None):
""" starts an inline entity with an optional style definition """
self.stack.append('inline')
if self.dirty:
self.escpos._raw(' ')
if stylestack:
self.style(stylestack)
def start_block(self,stylestack=None):
""" starts a block entity with an optional style definition """
if self.dirty:
self.escpos._raw('\n')
self.dirty = False
self.stack.append('block')
if stylestack:
self.style(stylestack)
def end_entity(self):
""" ends the entity definition. (but does not cancel the active style!) """
if self.stack[-1] == 'block' and self.dirty:
self.escpos._raw('\n')
self.dirty = False
if len(self.stack) > 1:
self.stack = self.stack[:-1]
def pre(self,text):
""" puts a string of text in the entity keeping the whitespace intact """
if text:
self.escpos.text(text)
self.dirty = True
def text(self,text):
""" puts text in the entity. Whitespace and newlines are stripped to single spaces. """
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self.dirty = True
self.escpos.text(text)
def linebreak(self):
""" inserts a linebreak in the entity """
self.dirty = False
self.escpos._raw('\n')
def style(self,stylestack):
""" apply a style to the entity (only applies to content added after the definition) """
self.raw(stylestack.to_escpos())
def raw(self,raw):
""" puts raw text or escpos command in the entity without affecting the state of the serializer """
self.escpos._raw(raw)
class XmlLineSerializer:
"""
This is used to convert a xml tree into a single line, with a left and a right part.
The content is not output to escpos directly, and is intended to be fedback to the
XmlSerializer as the content of a block entity.
"""
def __init__(self, indent=0, tabwidth=2, width=48, ratio=0.5):
self.tabwidth = tabwidth
self.indent = indent
self.width = max(0, width - int(tabwidth*indent))
self.lwidth = int(self.width*ratio)
self.rwidth = max(0, self.width - self.lwidth)
self.clwidth = 0
self.crwidth = 0
self.lbuffer = ''
self.rbuffer = ''
self.left = True
def _txt(self,txt):
if self.left:
if self.clwidth < self.lwidth:
txt = txt[:max(0, self.lwidth - self.clwidth)]
self.lbuffer += txt
self.clwidth += len(txt)
else:
if self.crwidth < self.rwidth:
txt = txt[:max(0, self.rwidth - self.crwidth)]
self.rbuffer += txt
self.crwidth += len(txt)
def start_inline(self,stylestack=None):
if (self.left and self.clwidth) or (not self.left and self.crwidth):
self._txt(' ')
def start_block(self,stylestack=None):
self.start_inline(stylestack)
def end_entity(self):
pass
def pre(self,text):
if text:
self._txt(text)
def text(self,text):
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self._txt(text)
def linebreak(self):
pass
def style(self,stylestack):
pass
def raw(self,raw):
pass
def start_right(self):
self.left = False
def get_line(self):
return ' ' * self.indent * self.tabwidth + self.lbuffer + ' ' * (self.width - self.clwidth - self.crwidth) + self.rbuffer
class Escpos:
""" ESC/POS Printer object """
device = None
encoding = None
img_cache = {}
def _check_image_size(self, size):
""" Check and fix the size of the image to 32 bits """
if size % 32 == 0:
return (0, 0)
else:
image_border = 32 - (size % 32)
if (image_border % 2) == 0:
return (image_border / 2, image_border / 2)
else:
return (image_border / 2, (image_border / 2) + 1)
def _print_image(self, line, size):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
self._raw(S_RASTER_N)
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
self._raw(buffer.decode('hex'))
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
self._raw(buffer.decode("hex"))
buffer = ""
cont = 0
def _raw_print_image(self, line, size, output=None ):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
raw = ""
def __raw(string):
if output:
output(string)
else:
self._raw(string)
raw += S_RASTER_N
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
raw += buffer.decode('hex')
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
raw += buffer.decode("hex")
buffer = ""
cont = 0
return raw
def _convert_image(self, im):
""" Parse image and prepare it to a printable format """
pixels = []
pix_line = ""
im_left = ""
im_right = ""
switch = 0
img_size = [ 0, 0 ]
if im.size[0] > 512:
print "WARNING: Image is wider than 512 and could be truncated at print time "
if im.size[1] > 255:
raise ImageSizeError()
im_border = self._check_image_size(im.size[0])
for i in range(im_border[0]):
im_left += "0"
for i in range(im_border[1]):
im_right += "0"
for y in range(im.size[1]):
img_size[1] += 1
pix_line += im_left
img_size[0] += im_border[0]
for x in range(im.size[0]):
img_size[0] += 1
RGB = im.getpixel((x, y))
im_color = (RGB[0] + RGB[1] + RGB[2])
im_pattern = "1X0"
pattern_len = len(im_pattern)
switch = (switch - 1 ) * (-1)
for x in range(pattern_len):
if im_color <= (255 * 3 / pattern_len * (x+1)):
if im_pattern[x] == "X":
pix_line += "%d" % switch
else:
pix_line += im_pattern[x]
break
elif im_color > (255 * 3 / pattern_len * pattern_len) and im_color <= (255 * 3):
pix_line += im_pattern[-1]
break
pix_line += im_right
img_size[0] += im_border[1]
return (pix_line, img_size)
def image(self,path_img):
""" Open image file """
im_open = Image.open(path_img)
im = im_open.convert("RGB")
# Convert the RGB image in printable image
pix_line, img_size = self._convert_image(im)
self._print_image(pix_line, img_size)
def print_base64_image(self,img):
print 'print_b64_img'
id = md5.new(img).digest()
if id not in self.img_cache:
print 'not in cache'
img = img[img.find(',')+1:]
f = io.BytesIO('img')
f.write(base64.decodestring(img))
f.seek(0)
img_rgba = Image.open(f)
img = Image.new('RGB', img_rgba.size, (255,255,255))
channels = img_rgba.split()
if len(channels) > 1:
# use alpha channel as mask
img.paste(img_rgba, mask=channels[3])
else:
img.paste(img_rgba)
print 'convert image'
pix_line, img_size = self._convert_image(img)
print 'print image'
buffer = self._raw_print_image(pix_line, img_size)
self.img_cache[id] = buffer
print 'raw image'
self._raw(self.img_cache[id])
def qr(self,text):
""" Print QR Code for the provided string """
qr_code = qrcode.QRCode(version=4, box_size=4, border=1)
qr_code.add_data(text)
qr_code.make(fit=True)
qr_img = qr_code.make_image()
im = qr_img._img.convert("RGB")
# Convert the RGB image in printable image
self._convert_image(im)
def barcode(self, code, bc, width=255, height=2, pos='below', font='a'):
""" Print Barcode """
# Align Bar Code()
self._raw(TXT_ALIGN_CT)
# Height
if height >=2 or height <=6:
self._raw(BARCODE_HEIGHT)
else:
raise BarcodeSizeError()
# Width
if width >= 1 or width <=255:
self._raw(BARCODE_WIDTH)
else:
raise BarcodeSizeError()
# Font
if font.upper() == "B":
self._raw(BARCODE_FONT_B)
else: # DEFAULT FONT: A
self._raw(BARCODE_FONT_A)
# Position
if pos.upper() == "OFF":
self._raw(BARCODE_TXT_OFF)
elif pos.upper() == "BOTH":
self._raw(BARCODE_TXT_BTH)
elif pos.upper() == "ABOVE":
self._raw(BARCODE_TXT_ABV)
else: # DEFAULT POSITION: BELOW
self._raw(BARCODE_TXT_BLW)
# Type
if bc.upper() == "UPC-A":
self._raw(BARCODE_UPC_A)
elif bc.upper() == "UPC-E":
self._raw(BARCODE_UPC_E)
elif bc.upper() == "EAN13":
self._raw(BARCODE_EAN13)
elif bc.upper() == "EAN8":
self._raw(BARCODE_EAN8)
elif bc.upper() == "CODE39":
self._raw(BARCODE_CODE39)
elif bc.upper() == "ITF":
self._raw(BARCODE_ITF)
elif bc.upper() == "NW7":
self._raw(BARCODE_NW7)
else:
raise BarcodeTypeError()
# Print Code
if code:
self._raw(code)
else:
raise exception.BarcodeCodeError()
def receipt(self,xml):
"""
Prints an xml based receipt definition
"""
def strclean(string):
if not string:
string = ''
string = string.strip()
string = re.sub('\s+',' ',string)
return string
def format_value(value, decimals=3, width=0, decimals_separator='.', thousands_separator=',', autoint=False, symbol='', position='after'):
decimals = max(0,int(decimals))
width = max(0,int(width))
value = float(value)
if autoint and math.floor(value) == value:
decimals = 0
if width == 0:
width = ''
if thousands_separator:
formatstr = "{:"+str(width)+",."+str(decimals)+"f}"
else:
formatstr = "{:"+str(width)+"."+str(decimals)+"f}"
ret = formatstr.format(value)
ret = ret.replace(',','COMMA')
ret = ret.replace('.','DOT')
ret = ret.replace('COMMA',thousands_separator)
ret = ret.replace('DOT',decimals_separator)
if symbol:
if position == 'after':
ret = ret + symbol
else:
ret = symbol + ret
return ret
def print_elem(stylestack, serializer, elem, indent=0):
elem_styles = {
'h1': {'bold': 'on', 'size':'double'},
'h2': {'size':'double'},
'h3': {'bold': 'on', 'size':'double-height'},
'h4': {'size': 'double-height'},
'h5': {'bold': 'on'},
'em': {'font': 'b'},
'b': {'bold': 'on'},
}
stylestack.push()
if elem.tag in elem_styles:
stylestack.set(elem_styles[elem.tag])
stylestack.set(elem.attrib)
if elem.tag in ('p','div','section','article','receipt','header','footer','li','h1','h2','h3','h4','h5'):
serializer.start_block(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag in ('span','em','b','left','right'):
serializer.start_inline(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag == 'value':
serializer.start_inline(stylestack)
serializer.pre(format_value(
elem.text,
decimals=stylestack.get('value-decimals'),
width=stylestack.get('value-width'),
decimals_separator=stylestack.get('value-decimals-separator'),
thousands_separator=stylestack.get('value-thousands-separator'),
autoint=(stylestack.get('value-autoint') == 'on'),
symbol=stylestack.get('value-symbol'),
position=stylestack.get('value-symbol-position')
))
serializer.end_entity()
elif elem.tag == 'line':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
lineserializer = XmlLineSerializer(stylestack.get('indent')+indent,stylestack.get('tabwidth'),width,stylestack.get('line-ratio'))
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'left':
print_elem(stylestack,lineserializer,child,indent=indent)
elif child.tag == 'right':
lineserializer.start_right()
print_elem(stylestack,lineserializer,child,indent=indent)
serializer.pre(lineserializer.get_line())
serializer.end_entity()
elif elem.tag == 'ul':
serializer.start_block(stylestack)
bullet = stylestack.get('bullet')
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + bullet)
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'ol':
cwidth = len(str(len(elem))) + 2
i = 1
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + ' ' + (str(i)+')').ljust(cwidth))
i = i + 1
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'pre':
serializer.start_block(stylestack)
serializer.pre(elem.text)
serializer.end_entity()
elif elem.tag == 'hr':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
serializer.start_block(stylestack)
serializer.text('-'*width)
serializer.end_entity()
elif elem.tag == 'br':
serializer.linebreak()
elif elem.tag == 'img':
if 'src' in elem.attrib and 'data:' in elem.attrib['src']:
self.print_base64_image(elem.attrib['src'])
elif elem.tag == 'barcode' and 'encoding' in elem.attrib:
serializer.start_block(stylestack)
self.barcode(strclean(elem.text),elem.attrib['encoding'])
serializer.end_entity()
elif elem.tag == 'cut':
self.cut()
elif elem.tag == 'partialcut':
self.cut(mode='part')
elif elem.tag == 'cashdraw':
self.cashdraw(2)
self.cashdraw(5)
stylestack.pop()
try:
stylestack = StyleStack()
serializer = XmlSerializer(self)
root = ET.fromstring(xml.encode('utf-8'))
self._raw(stylestack.to_escpos())
print_elem(stylestack,serializer,root)
if 'open-cashdrawer' in root.attrib and root.attrib['open-cashdrawer'] == 'true':
self.cashdraw(2)
self.cashdraw(5)
if not 'cut' in root.attrib or root.attrib['cut'] == 'true' :
self.cut()
except Exception as e:
errmsg = str(e)+'\n'+'-'*48+'\n'+traceback.format_exc() + '-'*48+'\n'
self.text(errmsg)
self.cut()
raise e
def text(self,txt):
""" Print Utf8 encoded alpha-numeric text """
if not txt:
return
try:
txt = txt.decode('utf-8')
except:
try:
txt = txt.decode('utf-16')
except:
pass
self.extra_chars = 0
def encode_char(char):
"""
Encodes a single utf-8 character into a sequence of
esc-pos code page change instructions and character declarations
"""
char_utf8 = char.encode('utf-8')
encoded = ''
encoding = self.encoding # we reuse the last encoding to prevent code page switches at every character
encodings = {
# TODO use ordering to prevent useless switches
# TODO Support other encodings not natively supported by python ( Thai, Khazakh, Kanjis )
'cp437': TXT_ENC_PC437,
'cp850': TXT_ENC_PC850,
'cp852': TXT_ENC_PC852,
'cp857': TXT_ENC_PC857,
'cp858': TXT_ENC_PC858,
'cp860': TXT_ENC_PC860,
'cp863': TXT_ENC_PC863,
'cp865': TXT_ENC_PC865,
'cp866': TXT_ENC_PC866,
'cp862': TXT_ENC_PC862,
'cp720': TXT_ENC_PC720,
'cp936': TXT_ENC_PC936,
'iso8859_2': TXT_ENC_8859_2,
'iso8859_7': TXT_ENC_8859_7,
'iso8859_9': TXT_ENC_8859_9,
'cp1254' : TXT_ENC_WPC1254,
'cp1255' : TXT_ENC_WPC1255,
'cp1256' : TXT_ENC_WPC1256,
'cp1257' : TXT_ENC_WPC1257,
'cp1258' : TXT_ENC_WPC1258,
'katakana' : TXT_ENC_KATAKANA,
}
remaining = copy.copy(encodings)
if not encoding :
encoding = 'cp437'
while True: # Trying all encoding until one succeeds
try:
if encoding == 'katakana': # Japanese characters
if jcconv:
# try to convert japanese text to a half-katakanas
kata = jcconv.kata2half(jcconv.hira2kata(char_utf8))
if kata != char_utf8:
self.extra_chars += len(kata.decode('utf-8')) - 1
# the conversion may result in multiple characters
return encode_str(kata.decode('utf-8'))
else:
kata = char_utf8
if kata in TXT_ENC_KATAKANA_MAP:
encoded = TXT_ENC_KATAKANA_MAP[kata]
break
else:
raise ValueError()
else:
encoded = char.encode(encoding)
break
except ValueError: #the encoding failed, select another one and retry
if encoding in remaining:
del remaining[encoding]
if len(remaining) >= 1:
encoding = remaining.items()[0][0]
else:
encoding = 'cp437'
encoded = '\xb1' # could not encode, output error character
break;
if encoding != self.encoding:
# if the encoding changed, remember it and prefix the character with
# the esc-pos encoding change sequence
self.encoding = encoding
encoded = encodings[encoding] + encoded
return encoded
def encode_str(txt):
buffer = ''
for c in txt:
buffer += encode_char(c)
return buffer
txt = encode_str(txt)
# if the utf-8 -> codepage conversion inserted extra characters,
# remove double spaces to try to restore the original string length
# and prevent printing alignment issues
while self.extra_chars > 0:
dspace = txt.find(' ')
if dspace > 0:
txt = txt[:dspace] + txt[dspace+1:]
self.extra_chars -= 1
else:
break
self._raw(txt)
def set(self, align='left', font='a', type='normal', width=1, height=1):
""" Set text properties """
# Align
if align.upper() == "CENTER":
self._raw(TXT_ALIGN_CT)
elif align.upper() == "RIGHT":
self._raw(TXT_ALIGN_RT)
elif align.upper() == "LEFT":
self._raw(TXT_ALIGN_LT)
# Font
if font.upper() == "B":
self._raw(TXT_FONT_B)
else: # DEFAULT FONT: A
self._raw(TXT_FONT_A)
# Type
if type.upper() == "B":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_OFF)
elif type.upper() == "U":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "U2":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL2_ON)
elif type.upper() == "BU":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "BU2":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL2_ON)
elif type.upper == "NORMAL":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_OFF)
# Width
if width == 2 and height != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2WIDTH)
elif height == 2 and width != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2HEIGHT)
elif height == 2 and width == 2:
self._raw(TXT_2WIDTH)
self._raw(TXT_2HEIGHT)
else: # DEFAULT SIZE: NORMAL
self._raw(TXT_NORMAL)
def cut(self, mode=''):
""" Cut paper """
# Fix the size between last line and cut
# TODO: handle this with a line feed
self._raw("\n\n\n\n\n\n")
if mode.upper() == "PART":
self._raw(PAPER_PART_CUT)
else: # DEFAULT MODE: FULL CUT
self._raw(PAPER_FULL_CUT)
def cashdraw(self, pin):
""" Send pulse to kick the cash drawer """
if pin == 2:
self._raw(CD_KICK_2)
elif pin == 5:
self._raw(CD_KICK_5)
else:
raise CashDrawerError()
def hw(self, hw):
""" Hardware operations """
if hw.upper() == "INIT":
self._raw(HW_INIT)
elif hw.upper() == "SELECT":
self._raw(HW_SELECT)
elif hw.upper() == "RESET":
self._raw(HW_RESET)
else: # DEFAULT: DOES NOTHING
pass
def control(self, ctl):
""" Feed control sequences """
if ctl.upper() == "LF":
self._raw(CTL_LF)
elif ctl.upper() == "FF":
self._raw(CTL_FF)
elif ctl.upper() == "CR":
self._raw(CTL_CR)
elif ctl.upper() == "HT":
self._raw(CTL_HT)
elif ctl.upper() == "VT":
self._raw(CTL_VT)
| agpl-3.0 |
windyuuy/opera | chromium/src/third_party/python_26/Lib/site-packages/win32comext/axdebug/debugger.py | 17 | 6941 | import sys, traceback, string
from win32com.axscript import axscript
from win32com.axdebug import codecontainer, axdebug, gateways, documents, contexts, adb, expressions
from win32com.axdebug.util import trace, _wrap, _wrap_remove
import pythoncom
import win32api, winerror
import os
currentDebugger = None
class ModuleTreeNode:
"""Helper class for building a module tree
"""
def __init__(self, module):
modName = module.__name__
self.moduleName = modName
self.module = module
self.realNode = None
self.cont = codecontainer.SourceModuleContainer(module)
def __repr__(self):
return "<ModuleTreeNode wrapping %s>" % (self.module)
def Attach(self, parentRealNode):
self.realNode.Attach(parentRealNode)
def Close(self):
self.module = None
self.cont = None
self.realNode = None
def BuildModule(module, built_nodes, rootNode, create_node_fn, create_node_args ):
if module:
keep = module.__name__
keep = keep and (built_nodes.get(module) is None)
if keep and hasattr(module, '__file__'):
keep = string.lower(os.path.splitext(module.__file__)[1]) not in [".pyd", ".dll"]
# keep = keep and module.__name__=='__main__'
if module and keep:
# print "keeping", module.__name__
node = ModuleTreeNode(module)
built_nodes[module] = node
realNode = apply(create_node_fn, (node,)+create_node_args)
node.realNode = realNode
# Split into parent nodes.
parts = string.split(module.__name__, '.')
if parts[-1][:8]=='__init__': parts = parts[:-1]
parent = string.join(parts[:-1], '.')
parentNode = rootNode
if parent:
parentModule = sys.modules[parent]
BuildModule(parentModule, built_nodes, rootNode, create_node_fn, create_node_args)
if parentModule in built_nodes:
parentNode = built_nodes[parentModule].realNode
node.Attach(parentNode)
def RefreshAllModules(builtItems, rootNode, create_node, create_node_args):
for module in sys.modules.values():
BuildModule(module, builtItems, rootNode, create_node, create_node_args)
# realNode = pdm.CreateDebugDocumentHelper(None) # DebugDocumentHelper node?
# app.CreateApplicationNode() # doc provider node.
class CodeContainerProvider(documents.CodeContainerProvider):
def __init__(self, axdebugger):
self.axdebugger = axdebugger
documents.CodeContainerProvider.__init__(self)
self.currentNumModules = len(sys.modules)
self.nodes = {}
self.axdebugger.RefreshAllModules(self.nodes, self)
def FromFileName(self, fname):
### It appears we cant add modules during a debug session!
# if self.currentNumModules != len(sys.modules):
# self.axdebugger.RefreshAllModules(self.nodes, self)
# self.currentNumModules = len(sys.modules)
# for key in self.ccsAndNodes.keys():
# print "File:", key
return documents.CodeContainerProvider.FromFileName(self, fname)
def Close(self):
documents.CodeContainerProvider.Close(self)
self.axdebugger = None
print "Closing %d nodes" % (len(self.nodes))
for node in self.nodes.values():
node.Close()
self.nodes = {}
class OriginalInterfaceMaker:
def MakeInterfaces(self, pdm):
app = self.pdm.CreateApplication()
self.cookie = pdm.AddApplication(app)
root = app.GetRootNode()
return app, root
def CloseInterfaces(self, pdm):
pdm.RemoveApplication(self.cookie)
class SimpleHostStyleInterfaceMaker:
def MakeInterfaces(self, pdm):
app = pdm.GetDefaultApplication()
root = app.GetRootNode()
return app, root
def CloseInterfaces(self, pdm):
pass
class AXDebugger:
def __init__(self, interfaceMaker = None, processName = None):
if processName is None: processName = "Python Process"
if interfaceMaker is None: interfaceMaker = SimpleHostStyleInterfaceMaker()
self.pydebugger = adb.Debugger()
self.pdm=pythoncom.CoCreateInstance(axdebug.CLSID_ProcessDebugManager,None,pythoncom.CLSCTX_ALL, axdebug.IID_IProcessDebugManager)
self.app, self.root = interfaceMaker.MakeInterfaces(self.pdm)
self.app.SetName(processName)
self.interfaceMaker = interfaceMaker
expressionProvider = _wrap(expressions.ProvideExpressionContexts(), axdebug.IID_IProvideExpressionContexts)
self.expressionCookie = self.app.AddGlobalExpressionContextProvider(expressionProvider)
contProvider = CodeContainerProvider(self)
self.pydebugger.AttachApp(self.app, contProvider)
def Break(self):
# Get the frame we start debugging from - this is the frame 1 level up
try:
1 + ''
except:
frame = sys.exc_info()[2].tb_frame.f_back
# Get/create the debugger, and tell it to break.
self.app.StartDebugSession()
# self.app.CauseBreak()
self.pydebugger.SetupAXDebugging(None, frame)
self.pydebugger.set_trace()
def Close(self):
self.pydebugger.ResetAXDebugging()
self.interfaceMaker.CloseInterfaces(self.pdm)
self.pydebugger.CloseApp()
self.app.RemoveGlobalExpressionContextProvider(self.expressionCookie)
self.expressionCookie = None
self.pdm = None
self.app = None
self.pydebugger = None
self.root = None
def RefreshAllModules(self, nodes, containerProvider):
RefreshAllModules(nodes, self.root, self.CreateApplicationNode, (containerProvider,))
def CreateApplicationNode(self, node, containerProvider):
realNode = self.app.CreateApplicationNode()
document = documents.DebugDocumentText(node.cont)
document = _wrap(document, axdebug.IID_IDebugDocument)
node.cont.debugDocument = document
provider = documents.DebugDocumentProvider(document)
provider = _wrap(provider, axdebug.IID_IDebugDocumentProvider)
realNode.SetDocumentProvider(provider)
containerProvider.AddCodeContainer(node.cont, realNode)
return realNode
def _GetCurrentDebugger():
global currentDebugger
if currentDebugger is None:
currentDebugger = AXDebugger()
return currentDebugger
def Break():
_GetCurrentDebugger().Break()
brk = Break
set_trace = Break
def dosomethingelse():
a=2
b = "Hi there"
def dosomething():
a=1
b=2
dosomethingelse()
def test():
Break()
raw_input("Waiting...")
dosomething()
print "Done"
if __name__=='__main__':
print "About to test the debugging interfaces!"
test()
print " %d/%d com objects still alive" % (pythoncom._GetInterfaceCount(), pythoncom._GetGatewayCount())
| bsd-3-clause |
basuke/kanshin-export | bin/queue/keyword_parse.py | 1 | 2648 | #!/usr/bin/env python
from queue import *
from kanshin.com.keyword import KeywordPage
from kanshin.data import save_keyword, save_connection
from kanshin.com.export import is_imported
from kanshin.com.browser import KanshinBrowser
keyword_parse = queues.keyword_parse
image_download = queues.image_download
user_download = queues.user_download
keyword_download = queues.keyword_download
def job(keyword_id):
def with_html(html):
page = KeywordPage(keyword_id, html)
record = page.record
if record['images']:
record['images'] = convert_kanshin_images(record['images'], image_download)
if page.more_comments:
logger.info('download whole keyword comments for {}'.format(keyword_id))
browser = KanshinBrowser()
browser.open('/keyword/{kid}/comment'.format(kid=keyword_id))
browser.save_page()
record['comments'] = KeywordPage(keyword_id, browser.response.text).comments
if page.more_connections:
logger.info('download whole keyword connections for {}'.format(keyword_id))
browser = KanshinBrowser()
p = 1
result = []
while True:
browser.open('/keyword/{kid}/connect?p={p}'.format(kid=keyword_id, p=p))
browser.save_page()
connections = KeywordPage(keyword_id, browser.response.text).connections
if not connections:
break
result += connections
p += 1
record['connections'] = result
logger.info('save keyword {}'.format(keyword_id))
connections = record['connections']
del(record['connections'])
save_keyword(record)
ids = set()
logger.info('save connections {}'.format(keyword_id))
for connection in connections:
# {'id': 11526,
# 'out': u'iBook\u306b\u3064\u306a\u3052\u305f\u3044',
# 'title': u'iBook\u306eUS\u30ad\u30fc\u30dc\u30fc\u30c9',
# 'user': u'\u3046\u305a\u3089'}
save_connection(record['id'], connection['id'], connection.get('out'), connection.get('in'))
ids.add(connection['id'])
for kid in ids:
if not is_imported('keyword', kid):
keyword_download.send(kid)
ids = set([record['user_id']] + [comment['user_id'] for comment in record['comments']])
for uid in ids:
if not is_imported('user', uid):
user_download.send(uid)
page = fetch_page('keyword', keyword_id, with_html)
cli(keyword_parse, job)
| mit |
hassanabidpk/django | django/db/models/deletion.py | 144 | 14337 | from collections import Counter, OrderedDict
from itertools import chain
from operator import attrgetter
from django.db import IntegrityError, connections, transaction
from django.db.models import signals, sql
from django.utils import six
class ProtectedError(IntegrityError):
def __init__(self, msg, protected_objects):
self.protected_objects = protected_objects
super(ProtectedError, self).__init__(msg, protected_objects)
def CASCADE(collector, field, sub_objs, using):
collector.collect(sub_objs, source=field.remote_field.model,
source_attr=field.name, nullable=field.null)
if field.null and not connections[using].features.can_defer_constraint_checks:
collector.add_field_update(field, None, sub_objs)
def PROTECT(collector, field, sub_objs, using):
raise ProtectedError("Cannot delete some instances of model '%s' because "
"they are referenced through a protected foreign key: '%s.%s'" % (
field.remote_field.model.__name__, sub_objs[0].__class__.__name__, field.name
),
sub_objs
)
def SET(value):
if callable(value):
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value(), sub_objs)
else:
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value, sub_objs)
set_on_delete.deconstruct = lambda: ('django.db.models.SET', (value,), {})
return set_on_delete
def SET_NULL(collector, field, sub_objs, using):
collector.add_field_update(field, None, sub_objs)
def SET_DEFAULT(collector, field, sub_objs, using):
collector.add_field_update(field, field.get_default(), sub_objs)
def DO_NOTHING(collector, field, sub_objs, using):
pass
def get_candidate_relations_to_delete(opts):
# Collect models that contain candidate relations to delete. This may include
# relations coming from proxy models.
candidate_models = {opts}
candidate_models = candidate_models.union(opts.concrete_model._meta.proxied_children)
# For each model, get all candidate fields.
candidate_model_fields = chain.from_iterable(
opts.get_fields(include_hidden=True) for opts in candidate_models
)
# The candidate relations are the ones that come from N-1 and 1-1 relations.
# N-N (i.e., many-to-many) relations aren't candidates for deletion.
return (
f for f in candidate_model_fields
if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many)
)
class Collector(object):
def __init__(self, using):
self.using = using
# Initially, {model: {instances}}, later values become lists.
self.data = {}
self.field_updates = {} # {model: {(field, value): {instances}}}
# fast_deletes is a list of queryset-likes that can be deleted without
# fetching the objects into memory.
self.fast_deletes = []
# Tracks deletion-order dependency for databases without transactions
# or ability to defer constraint checks. Only concrete model classes
# should be included, as the dependencies exist only between actual
# database tables; proxy models are represented here by their concrete
# parent.
self.dependencies = {} # {model: {models}}
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
"""
Adds 'objs' to the collection of objects to be deleted. If the call is
the result of a cascade, 'source' should be the model that caused it,
and 'nullable' should be set to True if the relation can be null.
Returns a list of all objects that were not already collected.
"""
if not objs:
return []
new_objs = []
model = objs[0].__class__
instances = self.data.setdefault(model, set())
for obj in objs:
if obj not in instances:
new_objs.append(obj)
instances.update(new_objs)
# Nullable relationships can be ignored -- they are nulled out before
# deleting, and therefore do not affect the order in which objects have
# to be deleted.
if source is not None and not nullable:
if reverse_dependency:
source, model = model, source
self.dependencies.setdefault(
source._meta.concrete_model, set()).add(model._meta.concrete_model)
return new_objs
def add_field_update(self, field, value, objs):
"""
Schedules a field update. 'objs' must be a homogeneous iterable
collection of model instances (e.g. a QuerySet).
"""
if not objs:
return
model = objs[0].__class__
self.field_updates.setdefault(
model, {}).setdefault(
(field, value), set()).update(objs)
def can_fast_delete(self, objs, from_field=None):
"""
Determines if the objects in the given queryset-like can be
fast-deleted. This can be done if there are no cascades, no
parents and no signal listeners for the object class.
The 'from_field' tells where we are coming from - we need this to
determine if the objects are in fact to be deleted. Allows also
skipping parent -> child -> parent chain preventing fast delete of
the child.
"""
if from_field and from_field.remote_field.on_delete is not CASCADE:
return False
if not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete')):
return False
model = objs.model
if (signals.pre_delete.has_listeners(model)
or signals.post_delete.has_listeners(model)
or signals.m2m_changed.has_listeners(model)):
return False
# The use of from_field comes from the need to avoid cascade back to
# parent when parent delete is cascading to child.
opts = model._meta
if any(link != from_field for link in opts.concrete_model._meta.parents.values()):
return False
# Foreign keys pointing to this model, both from m2m and other
# models.
for related in get_candidate_relations_to_delete(opts):
if related.field.remote_field.on_delete is not DO_NOTHING:
return False
for field in model._meta.virtual_fields:
if hasattr(field, 'bulk_related_objects'):
# It's something like generic foreign key.
return False
return True
def get_del_batches(self, objs, field):
"""
Returns the objs in suitably sized batches for the used connection.
"""
conn_batch_size = max(
connections[self.using].ops.bulk_batch_size([field.name], objs), 1)
if len(objs) > conn_batch_size:
return [objs[i:i + conn_batch_size]
for i in range(0, len(objs), conn_batch_size)]
else:
return [objs]
def collect(self, objs, source=None, nullable=False, collect_related=True,
source_attr=None, reverse_dependency=False, keep_parents=False):
"""
Adds 'objs' to the collection of objects to be deleted as well as all
parent instances. 'objs' must be a homogeneous iterable collection of
model instances (e.g. a QuerySet). If 'collect_related' is True,
related objects will be handled by their respective on_delete handler.
If the call is the result of a cascade, 'source' should be the model
that caused it and 'nullable' should be set to True, if the relation
can be null.
If 'reverse_dependency' is True, 'source' will be deleted before the
current model, rather than after. (Needed for cascading to parent
models, the one case in which the cascade follows the forwards
direction of an FK rather than the reverse direction.)
If 'keep_parents' is True, data of parent model's will be not deleted.
"""
if self.can_fast_delete(objs):
self.fast_deletes.append(objs)
return
new_objs = self.add(objs, source, nullable,
reverse_dependency=reverse_dependency)
if not new_objs:
return
model = new_objs[0].__class__
if not keep_parents:
# Recursively collect concrete model's parent models, but not their
# related objects. These will be found by meta.get_fields()
concrete_model = model._meta.concrete_model
for ptr in six.itervalues(concrete_model._meta.parents):
if ptr:
# FIXME: This seems to be buggy and execute a query for each
# parent object fetch. We have the parent data in the obj,
# but we don't have a nice way to turn that data into parent
# object instance.
parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
self.collect(parent_objs, source=model,
source_attr=ptr.remote_field.related_name,
collect_related=False,
reverse_dependency=True)
if collect_related:
for related in get_candidate_relations_to_delete(model._meta):
field = related.field
if field.remote_field.on_delete == DO_NOTHING:
continue
batches = self.get_del_batches(new_objs, field)
for batch in batches:
sub_objs = self.related_objects(related, batch)
if self.can_fast_delete(sub_objs, from_field=field):
self.fast_deletes.append(sub_objs)
elif sub_objs:
field.remote_field.on_delete(self, field, sub_objs, self.using)
for field in model._meta.virtual_fields:
if hasattr(field, 'bulk_related_objects'):
# Its something like generic foreign key.
sub_objs = field.bulk_related_objects(new_objs, self.using)
self.collect(sub_objs,
source=model,
source_attr=field.remote_field.related_name,
nullable=True)
def related_objects(self, related, objs):
"""
Gets a QuerySet of objects related to ``objs`` via the relation ``related``.
"""
return related.related_model._base_manager.using(self.using).filter(
**{"%s__in" % related.field.name: objs}
)
def instances_with_model(self):
for model, instances in six.iteritems(self.data):
for obj in instances:
yield model, obj
def sort(self):
sorted_models = []
concrete_models = set()
models = list(self.data)
while len(sorted_models) < len(models):
found = False
for model in models:
if model in sorted_models:
continue
dependencies = self.dependencies.get(model._meta.concrete_model)
if not (dependencies and dependencies.difference(concrete_models)):
sorted_models.append(model)
concrete_models.add(model._meta.concrete_model)
found = True
if not found:
return
self.data = OrderedDict((model, self.data[model])
for model in sorted_models)
def delete(self):
# sort instance collections
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer constraint checks until the
# end of a transaction.
self.sort()
# number of objects deleted for each model label
deleted_counter = Counter()
with transaction.atomic(using=self.using, savepoint=False):
# send pre_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.pre_delete.send(
sender=model, instance=obj, using=self.using
)
# fast deletes
for qs in self.fast_deletes:
count = qs._raw_delete(using=self.using)
deleted_counter[qs.model._meta.label] += count
# update fields
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
query = sql.UpdateQuery(model)
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
# reverse instance collections
for instances in six.itervalues(self.data):
instances.reverse()
# delete instances
for model, instances in six.iteritems(self.data):
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
count = query.delete_batch(pk_list, self.using)
deleted_counter[model._meta.label] += count
if not model._meta.auto_created:
for obj in instances:
signals.post_delete.send(
sender=model, instance=obj, using=self.using
)
# update collected instances
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
for obj in instances:
setattr(obj, field.attname, value)
for model, instances in six.iteritems(self.data):
for instance in instances:
setattr(instance, model._meta.pk.attname, None)
return sum(deleted_counter.values()), dict(deleted_counter)
| bsd-3-clause |
karaage0703/python-image-processing | extract_color.py | 1 | 1695 | # -*- coding: utf-8 -*-
import cv2
import sys
import numpy as np
# extract color function
def extract_color( src, h_th_low, h_th_up, s_th, v_th ):
hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
if h_th_low > h_th_up:
ret, h_dst_1 = cv2.threshold(h, h_th_low, 255, cv2.THRESH_BINARY)
ret, h_dst_2 = cv2.threshold(h, h_th_up, 255, cv2.THRESH_BINARY_INV)
dst = cv2.bitwise_or(h_dst_1, h_dst_2)
else:
ret, dst = cv2.threshold(h, h_th_low, 255, cv2.THRESH_TOZERO)
ret, dst = cv2.threshold(dst, h_th_up, 255, cv2.THRESH_TOZERO_INV)
ret, dst = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY)
ret, s_dst = cv2.threshold(s, s_th, 255, cv2.THRESH_BINARY)
ret, v_dst = cv2.threshold(v, v_th, 255, cv2.THRESH_BINARY)
dst = cv2.bitwise_and(dst, s_dst)
dst = cv2.bitwise_and(dst, v_dst)
return dst
if __name__ == '__main__':
param = sys.argv
if (len(param) != 6):
print ("Usage: $ python " + param[0] + " sample.jpg h_min, h_max, s_th, v_th")
quit()
# open image file
try:
input_img = cv2.imread(param[1])
except:
print ('faild to load %s' % param[1])
quit()
if input_img is None:
print ('faild to load %s' % param[1])
quit()
# parameter setting
h_min = int(param[2])
h_max = int(param[3])
s_th = int(param[4])
v_th = int(param[5])
# making mask using by extract color function
msk_img = extract_color(input_img, h_min, h_max, s_th, v_th)
# mask processing
output_img = cv2.bitwise_and(input_img, input_img, mask = msk_img)
cv2.imwrite("extract_" + param[1], output_img)
| mit |
M4rtinK/tsubame | core/bundle/requests/packages/urllib3/util/connection.py | 221 | 4237 | from __future__ import absolute_import
import socket
from .wait import wait_for_read
from .selectors import HAS_SELECT, SelectorError
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not HAS_SELECT:
return False
try:
return bool(wait_for_read(sock, timeout=0.0))
except SelectorError:
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
# One additional modification is that we avoid binding to IPv6 servers
# discovered in DNS if the system doesn't have IPv6 functionality.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
if host.startswith('['):
host = host.strip('[]')
err = None
# Using the value from allowed_gai_family() in the context of getaddrinfo lets
# us select whether to work with IPv4 DNS records, IPv6 records, or both.
# The original create_connection function always returns all records.
family = allowed_gai_family()
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
def allowed_gai_family():
"""This function is designed to work in the context of
getaddrinfo, where family=socket.AF_UNSPEC is the default and
will perform a DNS search for both IPv6 and IPv4 records."""
family = socket.AF_INET
if HAS_IPV6:
family = socket.AF_UNSPEC
return family
def _has_ipv6(host):
""" Returns True if the system can bind an IPv6 address. """
sock = None
has_ipv6 = False
if socket.has_ipv6:
# has_ipv6 returns true if cPython was compiled with IPv6 support.
# It does not tell us if the system has IPv6 support enabled. To
# determine that we must bind to an IPv6 address.
# https://github.com/shazow/urllib3/pull/611
# https://bugs.python.org/issue658327
try:
sock = socket.socket(socket.AF_INET6)
sock.bind((host, 0))
has_ipv6 = True
except Exception:
pass
if sock:
sock.close()
return has_ipv6
HAS_IPV6 = _has_ipv6('::1')
| gpl-3.0 |
ntymtsiv/tempest | tempest/api/image/v1/test_image_members.py | 2 | 2476 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.image import base
from tempest.test import attr
class ImageMembersTest(base.BaseV1ImageMembersTest):
@attr(type='gate')
def test_add_image_member(self):
image = self._create_image()
resp = self.client.add_member(self.alt_tenant_id, image)
self.assertEqual(204, resp.status)
resp, body = self.client.get_image_membership(image)
self.assertEqual(200, resp.status)
members = body['members']
members = map(lambda x: x['member_id'], members)
self.assertIn(self.alt_tenant_id, members)
# get image as alt user
resp, body = self.alt_img_cli.get_image(image)
self.assertEqual(200, resp.status)
@attr(type='gate')
def test_get_shared_images(self):
image = self._create_image()
resp = self.client.add_member(self.alt_tenant_id, image)
self.assertEqual(204, resp.status)
share_image = self._create_image()
resp = self.client.add_member(self.alt_tenant_id, share_image)
self.assertEqual(204, resp.status)
resp, body = self.client.get_shared_images(self.alt_tenant_id)
self.assertEqual(200, resp.status)
images = body['shared_images']
images = map(lambda x: x['image_id'], images)
self.assertIn(share_image, images)
self.assertIn(image, images)
@attr(type='gate')
def test_remove_member(self):
image_id = self._create_image()
resp = self.client.add_member(self.alt_tenant_id, image_id)
self.assertEqual(204, resp.status)
resp = self.client.delete_member(self.alt_tenant_id, image_id)
self.assertEqual(204, resp.status)
resp, body = self.client.get_image_membership(image_id)
self.assertEqual(200, resp.status)
members = body['members']
self.assertEqual(0, len(members), str(members))
| apache-2.0 |
ddzialak/boto | tests/unit/dynamodb/test_batch.py | 136 | 4073 | #!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.unit import unittest
from boto.dynamodb.batch import Batch
from boto.dynamodb.table import Table
from boto.dynamodb.layer2 import Layer2
from boto.dynamodb.batch import BatchList
DESCRIBE_TABLE_1 = {
'Table': {
'CreationDateTime': 1349910554.478,
'ItemCount': 1,
'KeySchema': {'HashKeyElement': {'AttributeName': u'foo',
'AttributeType': u'S'}},
'ProvisionedThroughput': {'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10},
'TableName': 'testtable',
'TableSizeBytes': 54,
'TableStatus': 'ACTIVE'}
}
DESCRIBE_TABLE_2 = {
'Table': {
'CreationDateTime': 1349910554.478,
'ItemCount': 1,
'KeySchema': {'HashKeyElement': {'AttributeName': u'baz',
'AttributeType': u'S'},
'RangeKeyElement': {'AttributeName': 'myrange',
'AttributeType': 'N'}},
'ProvisionedThroughput': {'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10},
'TableName': 'testtable2',
'TableSizeBytes': 54,
'TableStatus': 'ACTIVE'}
}
class TestBatchObjects(unittest.TestCase):
maxDiff = None
def setUp(self):
self.layer2 = Layer2('access_key', 'secret_key')
self.table = Table(self.layer2, DESCRIBE_TABLE_1)
self.table2 = Table(self.layer2, DESCRIBE_TABLE_2)
def test_batch_to_dict(self):
b = Batch(self.table, ['k1', 'k2'], attributes_to_get=['foo'],
consistent_read=True)
self.assertDictEqual(
b.to_dict(),
{'AttributesToGet': ['foo'],
'Keys': [{'HashKeyElement': {'S': 'k1'}},
{'HashKeyElement': {'S': 'k2'}}],
'ConsistentRead': True}
)
def test_batch_consistent_read_defaults_to_false(self):
b = Batch(self.table, ['k1'])
self.assertDictEqual(
b.to_dict(),
{'Keys': [{'HashKeyElement': {'S': 'k1'}}],
'ConsistentRead': False}
)
def test_batch_list_consistent_read(self):
b = BatchList(self.layer2)
b.add_batch(self.table, ['k1'], ['foo'], consistent_read=True)
b.add_batch(self.table2, [('k2', 54)], ['bar'], consistent_read=False)
self.assertDictEqual(
b.to_dict(),
{'testtable': {'AttributesToGet': ['foo'],
'Keys': [{'HashKeyElement': {'S': 'k1'}}],
'ConsistentRead': True},
'testtable2': {'AttributesToGet': ['bar'],
'Keys': [{'HashKeyElement': {'S': 'k2'},
'RangeKeyElement': {'N': '54'}}],
'ConsistentRead': False}})
if __name__ == '__main__':
unittest.main()
| mit |
incaser/odoo-odoo | addons/anonymization/__openerp__.py | 260 | 2129 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Database Anonymization',
'version': '1.0',
'category': 'Tools',
'description': """
This module allows you to anonymize a database.
===============================================
This module allows you to keep your data confidential for a given database.
This process is useful, if you want to use the migration process and protect
your own or your customer’s confidential data. The principle is that you run
an anonymization tool which will hide your confidential data(they are replaced
by ‘XXX’ characters). Then you can send the anonymized database to the migration
team. Once you get back your migrated database, you restore it and reverse the
anonymization process to recover your previous data.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['base'],
'demo': ['anonymization_demo.xml'],
'data': [
'ir.model.fields.anonymization.csv',
'security/ir.model.access.csv',
'anonymization_view.xml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ptitjes/quodlibet | gdist/tests.py | 1 | 5708 | # Copyright 2014-2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
import os
import sys
import subprocess
import tarfile
import fnmatch
from distutils import dir_util
from .util import Command, get_dist_class
class test_cmd(Command):
description = "run automated tests"
user_options = [
("to-run=", None, "list of tests to run (default all)"),
("suite=", None, "test suite (folder) to run (default 'tests')"),
("strict", None, "make glib warnings / errors fatal"),
("all", None, "run all suites"),
("exitfirst", "x", "stop after first failing test"),
("no-network", "n", "skip tests requiring a network connection"),
("no-quality", "n", "skip tests for code quality"),
]
def initialize_options(self):
self.to_run = []
self.suite = None
self.strict = False
self.all = False
self.exitfirst = False
self.no_network = False
self.no_quality = False
def finalize_options(self):
if self.to_run:
self.to_run = self.to_run.split(",")
self.strict = bool(self.strict)
self.all = bool(self.all)
self.suite = self.suite and str(self.suite)
self.exitfirst = bool(self.exitfirst)
self.no_network = bool(self.no_network)
self.no_quality = bool(self.no_quality)
def run(self):
import tests
suite = self.suite
if self.all:
suite = None
status = tests.unit(run=self.to_run, suite=suite,
strict=self.strict, exitfirst=self.exitfirst,
network=(not self.no_network or self.all),
quality=(not self.no_quality or self.all))
if status != 0:
raise SystemExit(status)
class quality_cmd(Command):
description = "Run flake8/mypy tests"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import tests
status = tests.unit(suite="quality", quality=True)
if status != 0:
raise SystemExit(status)
sdist = get_dist_class("sdist")
class distcheck_cmd(sdist):
description = "run tests on a fresh sdist"
def _check_manifest(self):
assert self.get_archive_files()
# make sure MANIFEST.in includes all tracked files
if subprocess.call(["git", "status"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0:
# contains the packaged files after run() is finished
included_files = self.filelist.files
assert included_files
process = subprocess.Popen(
["git", "ls-tree", "-r", "HEAD", "--name-only"],
stdout=subprocess.PIPE, universal_newlines=True)
out, err = process.communicate()
assert process.returncode == 0
tracked_files = out.splitlines()
ignore_tracked = [
"dev-utils/*",
".github/*",
".azure/*",
".docker/*",
".travis*",
".circleci*",
".codecov.yml",
".git*",
]
tracked_files = [
p for p in tracked_files if not
any(fnmatch.fnmatch(p, i) for i in ignore_tracked)]
diff = set(tracked_files) - set(included_files)
assert not diff, (
"Not all tracked files included in tarball, check MANIFEST.in",
diff)
def _check_dist(self):
assert self.get_archive_files()
distcheck_dir = os.path.join(self.dist_dir, "distcheck")
if os.path.exists(distcheck_dir):
dir_util.remove_tree(distcheck_dir)
self.mkpath(distcheck_dir)
archive = self.get_archive_files()[0]
tfile = tarfile.open(archive, "r:gz")
tfile.extractall(distcheck_dir)
tfile.close()
name = self.distribution.get_fullname()
extract_dir = os.path.join(distcheck_dir, name)
old_pwd = os.getcwd()
os.chdir(extract_dir)
self.spawn([sys.executable, "setup.py", "test"])
self.spawn([sys.executable, "setup.py", "build"])
self.spawn([sys.executable, "setup.py", "build_sphinx"])
self.spawn([sys.executable, "setup.py", "install",
"--root", "../prefix", "--record", "../log.txt"])
os.chdir(old_pwd)
def run(self):
sdist.run(self)
self._check_manifest()
self._check_dist()
| gpl-2.0 |
ademmers/ansible | hacking/shippable/download.py | 32 | 12281 | #!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
# (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""CLI tool for downloading results from Shippable CI runs."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# noinspection PyCompatibility
import argparse
import json
import os
import re
import sys
import requests
try:
import argcomplete
except ImportError:
argcomplete = None
def main():
"""Main program body."""
args = parse_args()
download_run(args)
def parse_args():
"""Parse and return args."""
api_key = get_api_key()
parser = argparse.ArgumentParser(description='Download results from a Shippable run.')
parser.add_argument('run_id',
metavar='RUN',
help='shippable run id, run url or run name formatted as: account/project/run_number')
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true',
help='show what is being downloaded')
parser.add_argument('-t', '--test',
dest='test',
action='store_true',
help='show what would be downloaded without downloading')
parser.add_argument('--key',
dest='api_key',
default=api_key,
required=api_key is None,
help='api key for accessing Shippable')
parser.add_argument('--console-logs',
action='store_true',
help='download console logs')
parser.add_argument('--test-results',
action='store_true',
help='download test results')
parser.add_argument('--coverage-results',
action='store_true',
help='download code coverage results')
parser.add_argument('--job-metadata',
action='store_true',
help='download job metadata')
parser.add_argument('--run-metadata',
action='store_true',
help='download run metadata')
parser.add_argument('--all',
action='store_true',
help='download everything')
parser.add_argument('--job-number',
metavar='N',
action='append',
type=int,
help='limit downloads to the given job number')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
old_runs_prefix = 'https://app.shippable.com/runs/'
if args.run_id.startswith(old_runs_prefix):
args.run_id = args.run_id[len(old_runs_prefix):]
if args.all:
args.console_logs = True
args.test_results = True
args.coverage_results = True
args.job_metadata = True
args.run_metadata = True
selections = (
args.console_logs,
args.test_results,
args.coverage_results,
args.job_metadata,
args.run_metadata,
)
if not any(selections):
parser.error('At least one download option is required.')
return args
def download_run(args):
"""Download a Shippable run."""
headers = dict(
Authorization='apiToken %s' % args.api_key,
)
match = re.search(
r'^https://app.shippable.com/github/(?P<account>[^/]+)/(?P<project>[^/]+)/runs/(?P<run_number>[0-9]+)(?:/summary|(/(?P<job_number>[0-9]+)))?$',
args.run_id)
if not match:
match = re.search(r'^(?P<account>[^/]+)/(?P<project>[^/]+)/(?P<run_number>[0-9]+)$', args.run_id)
if match:
account = match.group('account')
project = match.group('project')
run_number = int(match.group('run_number'))
job_number = int(match.group('job_number')) if match.group('job_number') else None
if job_number:
if args.job_number:
sys.exit('ERROR: job number found in url and specified with --job-number')
args.job_number = [job_number]
url = 'https://api.shippable.com/projects'
response = requests.get(url, dict(projectFullNames='%s/%s' % (account, project)), headers=headers)
if response.status_code != 200:
raise Exception(response.content)
project_id = response.json()[0]['id']
url = 'https://api.shippable.com/runs?projectIds=%s&runNumbers=%s' % (project_id, run_number)
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise Exception(response.content)
run = [run for run in response.json() if run['runNumber'] == run_number][0]
args.run_id = run['id']
elif re.search('^[a-f0-9]+$', args.run_id):
url = 'https://api.shippable.com/runs/%s' % args.run_id
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise Exception(response.content)
run = response.json()
account = run['subscriptionOrgName']
project = run['projectName']
run_number = run['runNumber']
else:
sys.exit('ERROR: invalid run: %s' % args.run_id)
output_dir = '%s/%s/%s' % (account, project, run_number)
if not args.test:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if args.run_metadata:
path = os.path.join(output_dir, 'run.json')
contents = json.dumps(run, sort_keys=True, indent=4)
if args.verbose or args.test:
print(path)
if not args.test:
with open(path, 'w') as metadata_fd:
metadata_fd.write(contents)
download_run_recursive(args, headers, output_dir, run, True)
def download_run_recursive(args, headers, output_dir, run, is_given=False):
# Notes:
# - The /runs response tells us if we need to eventually go up another layer
# or not (if we are a re-run attempt or not).
# - Given a run id, /jobs will tell us all the jobs in that run, and whether
# or not we can pull results from them.
#
# When we initially run (i.e., in download_run), we'll have a /runs output
# which we can use to get a /jobs output. Using the /jobs output, we filter
# on the jobs we need to fetch (usually only successful ones unless we are
# processing the initial/given run and not one of its parent runs) and
# download them accordingly.
#
# Lastly, we check if the run we are currently processing has another
# parent (reRunBatchId). If it does, we pull that /runs result and
# recurse using it to start the process over again.
response = requests.get('https://api.shippable.com/jobs?runIds=%s' % run['id'], headers=headers)
if response.status_code != 200:
raise Exception(response.content)
jobs = sorted(response.json(), key=lambda job: int(job['jobNumber']))
if is_given:
needed_jobs = [j for j in jobs if j['isConsoleArchived']]
else:
needed_jobs = [j for j in jobs if j['isConsoleArchived'] and j['statusCode'] == 30]
if not args.test:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
download_jobs(args, needed_jobs, headers, output_dir)
rerun_batch_id = run.get('reRunBatchId')
if rerun_batch_id:
print('Downloading previous run: %s' % rerun_batch_id)
response = requests.get('https://api.shippable.com/runs/%s' % rerun_batch_id, headers=headers)
if response.status_code != 200:
raise Exception(response.content)
run = response.json()
download_run_recursive(args, headers, output_dir, run)
def download_jobs(args, jobs, headers, output_dir):
"""Download Shippable jobs."""
for j in jobs:
job_id = j['id']
job_number = j['jobNumber']
if args.job_number and job_number not in args.job_number:
continue
if args.job_metadata:
path = os.path.join(output_dir, '%s/job.json' % job_number)
contents = json.dumps(j, sort_keys=True, indent=4).encode('utf-8')
if args.verbose or args.test:
print(path)
if not args.test:
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(path, 'wb') as metadata_fd:
metadata_fd.write(contents)
if args.console_logs:
path = os.path.join(output_dir, '%s/console.log' % job_number)
url = 'https://api.shippable.com/jobs/%s/consoles?download=true' % job_id
download(args, headers, path, url, is_json=False)
if args.test_results:
path = os.path.join(output_dir, '%s/test.json' % job_number)
url = 'https://api.shippable.com/jobs/%s/jobTestReports' % job_id
download(args, headers, path, url)
extract_contents(args, path, os.path.join(output_dir, '%s/test' % job_number))
if args.coverage_results:
path = os.path.join(output_dir, '%s/coverage.json' % job_number)
url = 'https://api.shippable.com/jobs/%s/jobCoverageReports' % job_id
download(args, headers, path, url)
extract_contents(args, path, os.path.join(output_dir, '%s/coverage' % job_number))
def extract_contents(args, path, output_dir):
"""
:type args: any
:type path: str
:type output_dir: str
"""
if not args.test:
if not os.path.exists(path):
return
with open(path, 'r') as json_fd:
items = json.load(json_fd)
for item in items:
contents = item['contents'].encode('utf-8')
path = output_dir + '/' + re.sub('^/*', '', item['path'])
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
if args.verbose:
print(path)
if path.endswith('.json'):
contents = json.dumps(json.loads(contents), sort_keys=True, indent=4).encode('utf-8')
if not os.path.exists(path):
with open(path, 'wb') as output_fd:
output_fd.write(contents)
def download(args, headers, path, url, is_json=True):
"""
:type args: any
:type headers: dict[str, str]
:type path: str
:type url: str
:type is_json: bool
"""
if args.verbose or args.test:
print(path)
if os.path.exists(path):
return
if not args.test:
response = requests.get(url, headers=headers)
if response.status_code != 200:
path += '.error'
if is_json:
content = json.dumps(response.json(), sort_keys=True, indent=4).encode(response.encoding)
else:
content = response.content
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(path, 'wb') as content_fd:
content_fd.write(content)
def get_api_key():
"""
rtype: str
"""
key = os.environ.get('SHIPPABLE_KEY', None)
if key:
return key
path = os.path.join(os.environ['HOME'], '.shippable.key')
try:
with open(path, 'r') as key_fd:
return key_fd.read().strip()
except IOError:
return None
if __name__ == '__main__':
main()
| gpl-3.0 |
rspavel/spack | lib/spack/external/py/_path/local.py | 24 | 32638 | """
local path implementation.
"""
from __future__ import with_statement
from contextlib import contextmanager
import sys, os, re, atexit, io
import py
from py._path import common
from py._path.common import iswin32, fspath
from stat import S_ISLNK, S_ISDIR, S_ISREG
from os.path import abspath, normcase, normpath, isabs, exists, isdir, isfile, islink, dirname
if sys.version_info > (3,0):
def map_as_list(func, iter):
return list(map(func, iter))
else:
map_as_list = map
class Stat(object):
def __getattr__(self, name):
return getattr(self._osstatresult, "st_" + name)
def __init__(self, path, osstatresult):
self.path = path
self._osstatresult = osstatresult
@property
def owner(self):
if iswin32:
raise NotImplementedError("XXX win32")
import pwd
entry = py.error.checked_call(pwd.getpwuid, self.uid)
return entry[0]
@property
def group(self):
""" return group name of file. """
if iswin32:
raise NotImplementedError("XXX win32")
import grp
entry = py.error.checked_call(grp.getgrgid, self.gid)
return entry[0]
def isdir(self):
return S_ISDIR(self._osstatresult.st_mode)
def isfile(self):
return S_ISREG(self._osstatresult.st_mode)
def islink(self):
st = self.path.lstat()
return S_ISLNK(self._osstatresult.st_mode)
class PosixPath(common.PathBase):
def chown(self, user, group, rec=0):
""" change ownership to the given user and group.
user and group may be specified by a number or
by a name. if rec is True change ownership
recursively.
"""
uid = getuserid(user)
gid = getgroupid(group)
if rec:
for x in self.visit(rec=lambda x: x.check(link=0)):
if x.check(link=0):
py.error.checked_call(os.chown, str(x), uid, gid)
py.error.checked_call(os.chown, str(self), uid, gid)
def readlink(self):
""" return value of a symbolic link. """
return py.error.checked_call(os.readlink, self.strpath)
def mklinkto(self, oldname):
""" posix style hard link to another name. """
py.error.checked_call(os.link, str(oldname), str(self))
def mksymlinkto(self, value, absolute=1):
""" create a symbolic link with the given value (pointing to another name). """
if absolute:
py.error.checked_call(os.symlink, str(value), self.strpath)
else:
base = self.common(value)
# with posix local paths '/' is always a common base
relsource = self.__class__(value).relto(base)
reldest = self.relto(base)
n = reldest.count(self.sep)
target = self.sep.join(('..', )*n + (relsource, ))
py.error.checked_call(os.symlink, target, self.strpath)
def getuserid(user):
import pwd
if not isinstance(user, int):
user = pwd.getpwnam(user)[2]
return user
def getgroupid(group):
import grp
if not isinstance(group, int):
group = grp.getgrnam(group)[2]
return group
FSBase = not iswin32 and PosixPath or common.PathBase
class LocalPath(FSBase):
""" object oriented interface to os.path and other local filesystem
related information.
"""
class ImportMismatchError(ImportError):
""" raised on pyimport() if there is a mismatch of __file__'s"""
sep = os.sep
class Checkers(common.Checkers):
def _stat(self):
try:
return self._statcache
except AttributeError:
try:
self._statcache = self.path.stat()
except py.error.ELOOP:
self._statcache = self.path.lstat()
return self._statcache
def dir(self):
return S_ISDIR(self._stat().mode)
def file(self):
return S_ISREG(self._stat().mode)
def exists(self):
return self._stat()
def link(self):
st = self.path.lstat()
return S_ISLNK(st.mode)
def __init__(self, path=None, expanduser=False):
""" Initialize and return a local Path instance.
Path can be relative to the current directory.
If path is None it defaults to the current working directory.
If expanduser is True, tilde-expansion is performed.
Note that Path instances always carry an absolute path.
Note also that passing in a local path object will simply return
the exact same path object. Use new() to get a new copy.
"""
if path is None:
self.strpath = py.error.checked_call(os.getcwd)
else:
try:
path = fspath(path)
except TypeError:
raise ValueError("can only pass None, Path instances "
"or non-empty strings to LocalPath")
if expanduser:
path = os.path.expanduser(path)
self.strpath = abspath(path)
def __hash__(self):
return hash(self.strpath)
def __eq__(self, other):
s1 = fspath(self)
try:
s2 = fspath(other)
except TypeError:
return False
if iswin32:
s1 = s1.lower()
try:
s2 = s2.lower()
except AttributeError:
return False
return s1 == s2
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return fspath(self) < fspath(other)
def __gt__(self, other):
return fspath(self) > fspath(other)
def samefile(self, other):
""" return True if 'other' references the same file as 'self'.
"""
other = fspath(other)
if not isabs(other):
other = abspath(other)
if self == other:
return True
if iswin32:
return False # there is no samefile
return py.error.checked_call(
os.path.samefile, self.strpath, other)
def remove(self, rec=1, ignore_errors=False):
""" remove a file or directory (or a directory tree if rec=1).
if ignore_errors is True, errors while removing directories will
be ignored.
"""
if self.check(dir=1, link=0):
if rec:
# force remove of readonly files on windows
if iswin32:
self.chmod(0o700, rec=1)
py.error.checked_call(py.std.shutil.rmtree, self.strpath,
ignore_errors=ignore_errors)
else:
py.error.checked_call(os.rmdir, self.strpath)
else:
if iswin32:
self.chmod(0o700)
py.error.checked_call(os.remove, self.strpath)
def computehash(self, hashtype="md5", chunksize=524288):
""" return hexdigest of hashvalue for this file. """
try:
try:
import hashlib as mod
except ImportError:
if hashtype == "sha1":
hashtype = "sha"
mod = __import__(hashtype)
hash = getattr(mod, hashtype)()
except (AttributeError, ImportError):
raise ValueError("Don't know how to compute %r hash" %(hashtype,))
f = self.open('rb')
try:
while 1:
buf = f.read(chunksize)
if not buf:
return hash.hexdigest()
hash.update(buf)
finally:
f.close()
def new(self, **kw):
""" create a modified version of this path.
the following keyword arguments modify various path parts::
a:/some/path/to/a/file.ext
xx drive
xxxxxxxxxxxxxxxxx dirname
xxxxxxxx basename
xxxx purebasename
xxx ext
"""
obj = object.__new__(self.__class__)
if not kw:
obj.strpath = self.strpath
return obj
drive, dirname, basename, purebasename,ext = self._getbyspec(
"drive,dirname,basename,purebasename,ext")
if 'basename' in kw:
if 'purebasename' in kw or 'ext' in kw:
raise ValueError("invalid specification %r" % kw)
else:
pb = kw.setdefault('purebasename', purebasename)
try:
ext = kw['ext']
except KeyError:
pass
else:
if ext and not ext.startswith('.'):
ext = '.' + ext
kw['basename'] = pb + ext
if ('dirname' in kw and not kw['dirname']):
kw['dirname'] = drive
else:
kw.setdefault('dirname', dirname)
kw.setdefault('sep', self.sep)
obj.strpath = normpath(
"%(dirname)s%(sep)s%(basename)s" % kw)
return obj
def _getbyspec(self, spec):
""" see new for what 'spec' can be. """
res = []
parts = self.strpath.split(self.sep)
args = filter(None, spec.split(',') )
append = res.append
for name in args:
if name == 'drive':
append(parts[0])
elif name == 'dirname':
append(self.sep.join(parts[:-1]))
else:
basename = parts[-1]
if name == 'basename':
append(basename)
else:
i = basename.rfind('.')
if i == -1:
purebasename, ext = basename, ''
else:
purebasename, ext = basename[:i], basename[i:]
if name == 'purebasename':
append(purebasename)
elif name == 'ext':
append(ext)
else:
raise ValueError("invalid part specification %r" % name)
return res
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
if not kwargs:
path = object.__new__(self.__class__)
path.strpath = dirname(self.strpath)
if args:
path = path.join(*args)
return path
return super(LocalPath, self).dirpath(*args, **kwargs)
def join(self, *args, **kwargs):
""" return a new path by appending all 'args' as path
components. if abs=1 is used restart from root if any
of the args is an absolute path.
"""
sep = self.sep
strargs = [fspath(arg) for arg in args]
strpath = self.strpath
if kwargs.get('abs'):
newargs = []
for arg in reversed(strargs):
if isabs(arg):
strpath = arg
strargs = newargs
break
newargs.insert(0, arg)
for arg in strargs:
arg = arg.strip(sep)
if iswin32:
# allow unix style paths even on windows.
arg = arg.strip('/')
arg = arg.replace('/', sep)
strpath = strpath + sep + arg
obj = object.__new__(self.__class__)
obj.strpath = normpath(strpath)
return obj
def open(self, mode='r', ensure=False, encoding=None):
""" return an opened file with the given mode.
If ensure is True, create parent directories if needed.
"""
if ensure:
self.dirpath().ensure(dir=1)
if encoding:
return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
return py.error.checked_call(open, self.strpath, mode)
def _fastjoin(self, name):
child = object.__new__(self.__class__)
child.strpath = self.strpath + self.sep + name
return child
def islink(self):
return islink(self.strpath)
def check(self, **kw):
if not kw:
return exists(self.strpath)
if len(kw) == 1:
if "dir" in kw:
return not kw["dir"] ^ isdir(self.strpath)
if "file" in kw:
return not kw["file"] ^ isfile(self.strpath)
return super(LocalPath, self).check(**kw)
_patternchars = set("*?[" + os.path.sep)
def listdir(self, fil=None, sort=None):
""" list directory contents, possibly filter by the given fil func
and possibly sorted.
"""
if fil is None and sort is None:
names = py.error.checked_call(os.listdir, self.strpath)
return map_as_list(self._fastjoin, names)
if isinstance(fil, py.builtin._basestring):
if not self._patternchars.intersection(fil):
child = self._fastjoin(fil)
if exists(child.strpath):
return [child]
return []
fil = common.FNMatcher(fil)
names = py.error.checked_call(os.listdir, self.strpath)
res = []
for name in names:
child = self._fastjoin(name)
if fil is None or fil(child):
res.append(child)
self._sortlist(res, sort)
return res
def size(self):
""" return size of the underlying file object """
return self.stat().size
def mtime(self):
""" return last modification time of the path. """
return self.stat().mtime
def copy(self, target, mode=False, stat=False):
""" copy path to target.
If mode is True, will copy copy permission from path to target.
If stat is True, copy permission, last modification
time, last access time, and flags from path to target.
"""
if self.check(file=1):
if target.check(dir=1):
target = target.join(self.basename)
assert self!=target
copychunked(self, target)
if mode:
copymode(self.strpath, target.strpath)
if stat:
copystat(self, target)
else:
def rec(p):
return p.check(link=0)
for x in self.visit(rec=rec):
relpath = x.relto(self)
newx = target.join(relpath)
newx.dirpath().ensure(dir=1)
if x.check(link=1):
newx.mksymlinkto(x.readlink())
continue
elif x.check(file=1):
copychunked(x, newx)
elif x.check(dir=1):
newx.ensure(dir=1)
if mode:
copymode(x.strpath, newx.strpath)
if stat:
copystat(x, newx)
def rename(self, target):
""" rename this path to target. """
target = fspath(target)
return py.error.checked_call(os.rename, self.strpath, target)
def dump(self, obj, bin=1):
""" pickle object into path location"""
f = self.open('wb')
try:
py.error.checked_call(py.std.pickle.dump, obj, f, bin)
finally:
f.close()
def mkdir(self, *args):
""" create & return the directory joined with args. """
p = self.join(*args)
py.error.checked_call(os.mkdir, fspath(p))
return p
def write_binary(self, data, ensure=False):
""" write binary data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('wb') as f:
f.write(data)
def write_text(self, data, encoding, ensure=False):
""" write text data into path using the specified encoding.
If ensure is True create missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('w', encoding=encoding) as f:
f.write(data)
def write(self, data, mode='w', ensure=False):
""" write data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
if 'b' in mode:
if not py.builtin._isbytes(data):
raise ValueError("can only process bytes")
else:
if not py.builtin._istext(data):
if not py.builtin._isbytes(data):
data = str(data)
else:
data = py.builtin._totext(data, sys.getdefaultencoding())
f = self.open(mode)
try:
f.write(data)
finally:
f.close()
def _ensuredirs(self):
parent = self.dirpath()
if parent == self:
return self
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
try:
self.mkdir()
except py.error.EEXIST:
# race condition: file/dir created by another thread/process.
# complain if it is not a dir
if self.check(dir=0):
raise
return self
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'dir=True'
then the path is forced to be a directory path.
"""
p = self.join(*args)
if kwargs.get('dir', 0):
return p._ensuredirs()
else:
p.dirpath()._ensuredirs()
if not p.check(file=1):
p.open('w').close()
return p
def stat(self, raising=True):
""" Return an os.stat() tuple. """
if raising == True:
return Stat(self, py.error.checked_call(os.stat, self.strpath))
try:
return Stat(self, os.stat(self.strpath))
except KeyboardInterrupt:
raise
except Exception:
return None
def lstat(self):
""" Return an os.lstat() tuple. """
return Stat(self, py.error.checked_call(os.lstat, self.strpath))
def setmtime(self, mtime=None):
""" set modification time for the given path. if 'mtime' is None
(the default) then the file's mtime is set to current time.
Note that the resolution for 'mtime' is platform dependent.
"""
if mtime is None:
return py.error.checked_call(os.utime, self.strpath, mtime)
try:
return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
except py.error.EINVAL:
return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
def chdir(self):
""" change directory to self and return old current directory """
try:
old = self.__class__()
except py.error.ENOENT:
old = None
py.error.checked_call(os.chdir, self.strpath)
return old
@contextmanager
def as_cwd(self):
""" return context manager which changes to current dir during the
managed "with" context. On __enter__ it returns the old dir.
"""
old = self.chdir()
try:
yield old
finally:
old.chdir()
def realpath(self):
""" return a new path which contains no symbolic links."""
return self.__class__(os.path.realpath(self.strpath))
def atime(self):
""" return last access time of the path. """
return self.stat().atime
def __repr__(self):
return 'local(%r)' % self.strpath
def __str__(self):
""" return string representation of the Path. """
return self.strpath
def chmod(self, mode, rec=0):
""" change permissions to the given mode. If mode is an
integer it directly encodes the os-specific modes.
if rec is True perform recursively.
"""
if not isinstance(mode, int):
raise TypeError("mode %r must be an integer" % (mode,))
if rec:
for x in self.visit(rec=rec):
py.error.checked_call(os.chmod, str(x), mode)
py.error.checked_call(os.chmod, self.strpath, mode)
def pypkgpath(self):
""" return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Return None if a pkgpath can not be determined.
"""
pkgpath = None
for parent in self.parts(reverse=True):
if parent.isdir():
if not parent.join('__init__.py').exists():
break
if not isimportable(parent.basename):
break
pkgpath = parent
return pkgpath
def _ensuresyspath(self, ensuremode, path):
if ensuremode:
s = str(path)
if ensuremode == "append":
if s not in sys.path:
sys.path.append(s)
else:
if s != sys.path[0]:
sys.path.insert(0, s)
def pyimport(self, modname=None, ensuresyspath=True):
""" return path as an imported python module.
If modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
if ensuresyspath is True then the root dir for importing
the file (taking __init__.py files into account) will
be prepended to sys.path if it isn't there already.
If ensuresyspath=="append" the root dir will be appended
if it isn't already contained in sys.path.
if ensuresyspath is False no modification of syspath happens.
"""
if not self.check():
raise py.error.ENOENT(self)
pkgpath = None
if modname is None:
pkgpath = self.pypkgpath()
if pkgpath is not None:
pkgroot = pkgpath.dirpath()
names = self.new(ext="").relto(pkgroot).split(self.sep)
if names[-1] == "__init__":
names.pop()
modname = ".".join(names)
else:
pkgroot = self.dirpath()
modname = self.purebasename
self._ensuresyspath(ensuresyspath, pkgroot)
__import__(modname)
mod = sys.modules[modname]
if self.basename == "__init__.py":
return mod # we don't check anything as we might
# we in a namespace package ... too icky to check
modfile = mod.__file__
if modfile[-4:] in ('.pyc', '.pyo'):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
if modfile.endswith(os.path.sep + "__init__.py"):
if self.basename != "__init__.py":
modfile = modfile[:-12]
try:
issame = self.samefile(modfile)
except py.error.ENOENT:
issame = False
if not issame:
raise self.ImportMismatchError(modname, modfile, self)
return mod
else:
try:
return sys.modules[modname]
except KeyError:
# we have a custom modname, do a pseudo-import
mod = py.std.types.ModuleType(modname)
mod.__file__ = str(self)
sys.modules[modname] = mod
try:
py.builtin.execfile(str(self), mod.__dict__)
except:
del sys.modules[modname]
raise
return mod
def sysexec(self, *argv, **popen_opts):
""" return stdout text from executing a system child process,
where the 'self' path points to executable.
The process is directly invoked and not through a system shell.
"""
from subprocess import Popen, PIPE
argv = map_as_list(str, argv)
popen_opts['stdout'] = popen_opts['stderr'] = PIPE
proc = Popen([str(self)] + argv, **popen_opts)
stdout, stderr = proc.communicate()
ret = proc.wait()
if py.builtin._isbytes(stdout):
stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
if ret != 0:
if py.builtin._isbytes(stderr):
stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
raise py.process.cmdexec.Error(ret, ret, str(self),
stdout, stderr,)
return stdout
def sysfind(cls, name, checker=None, paths=None):
""" return a path object found by looking at the systems
underlying PATH specification. If the checker is not None
it will be invoked to filter matching paths. If a binary
cannot be found, None is returned
Note: This is probably not working on plain win32 systems
but may work on cygwin.
"""
if isabs(name):
p = py.path.local(name)
if p.check(file=1):
return p
else:
if paths is None:
if iswin32:
paths = py.std.os.environ['Path'].split(';')
if '' not in paths and '.' not in paths:
paths.append('.')
try:
systemroot = os.environ['SYSTEMROOT']
except KeyError:
pass
else:
paths = [re.sub('%SystemRoot%', systemroot, path)
for path in paths]
else:
paths = py.std.os.environ['PATH'].split(':')
tryadd = []
if iswin32:
tryadd += os.environ['PATHEXT'].split(os.pathsep)
tryadd.append("")
for x in paths:
for addext in tryadd:
p = py.path.local(x).join(name, abs=True) + addext
try:
if p.check(file=1):
if checker:
if not checker(p):
continue
return p
except py.error.EACCES:
pass
return None
sysfind = classmethod(sysfind)
def _gethomedir(cls):
try:
x = os.environ['HOME']
except KeyError:
try:
x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
except KeyError:
return None
return cls(x)
_gethomedir = classmethod(_gethomedir)
#"""
#special class constructors for local filesystem paths
#"""
def get_temproot(cls):
""" return the system's temporary directory
(where tempfiles are usually created in)
"""
return py.path.local(py.std.tempfile.gettempdir())
get_temproot = classmethod(get_temproot)
def mkdtemp(cls, rootdir=None):
""" return a Path object pointing to a fresh new temporary directory
(which we created ourself).
"""
import tempfile
if rootdir is None:
rootdir = cls.get_temproot()
return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
mkdtemp = classmethod(mkdtemp)
def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
lock_timeout = 172800): # two days
""" return unique directory with a number greater than the current
maximum one. The number is assumed to start directly after prefix.
if keep is true directories with a number less than (maxnum-keep)
will be removed.
"""
if rootdir is None:
rootdir = cls.get_temproot()
nprefix = normcase(prefix)
def parse_num(path):
""" parse the number out of a path (if it matches the prefix) """
nbasename = normcase(path.basename)
if nbasename.startswith(nprefix):
try:
return int(nbasename[len(nprefix):])
except ValueError:
pass
# compute the maximum number currently in use with the
# prefix
lastmax = None
while True:
maxnum = -1
for path in rootdir.listdir():
num = parse_num(path)
if num is not None:
maxnum = max(maxnum, num)
# make the new directory
try:
udir = rootdir.mkdir(prefix + str(maxnum+1))
except py.error.EEXIST:
# race condition: another thread/process created the dir
# in the meantime. Try counting again
if lastmax == maxnum:
raise
lastmax = maxnum
continue
break
# put a .lock file in the new directory that will be removed at
# process exit
if lock_timeout:
lockfile = udir.join('.lock')
mypid = os.getpid()
if hasattr(lockfile, 'mksymlinkto'):
lockfile.mksymlinkto(str(mypid))
else:
lockfile.write(str(mypid))
def try_remove_lockfile():
# in a fork() situation, only the last process should
# remove the .lock, otherwise the other processes run the
# risk of seeing their temporary dir disappear. For now
# we remove the .lock in the parent only (i.e. we assume
# that the children finish before the parent).
if os.getpid() != mypid:
return
try:
lockfile.remove()
except py.error.Error:
pass
atexit.register(try_remove_lockfile)
# prune old directories
if keep:
for path in rootdir.listdir():
num = parse_num(path)
if num is not None and num <= (maxnum - keep):
lf = path.join('.lock')
try:
t1 = lf.lstat().mtime
t2 = lockfile.lstat().mtime
if not lock_timeout or abs(t2-t1) < lock_timeout:
continue # skip directories still locked
except py.error.Error:
pass # assume that it means that there is no 'lf'
try:
path.remove(rec=1)
except KeyboardInterrupt:
raise
except: # this might be py.error.Error, WindowsError ...
pass
# make link...
try:
username = os.environ['USER'] #linux, et al
except KeyError:
try:
username = os.environ['USERNAME'] #windows
except KeyError:
username = 'current'
src = str(udir)
dest = src[:src.rfind('-')] + '-' + username
try:
os.unlink(dest)
except OSError:
pass
try:
os.symlink(src, dest)
except (OSError, AttributeError, NotImplementedError):
pass
return udir
make_numbered_dir = classmethod(make_numbered_dir)
def copymode(src, dest):
""" copy permission from src to dst. """
py.std.shutil.copymode(src, dest)
def copystat(src, dest):
""" copy permission, last modification time, last access time, and flags from src to dst."""
py.std.shutil.copystat(str(src), str(dest))
def copychunked(src, dest):
chunksize = 524288 # half a meg of bytes
fsrc = src.open('rb')
try:
fdest = dest.open('wb')
try:
while 1:
buf = fsrc.read(chunksize)
if not buf:
break
fdest.write(buf)
finally:
fdest.close()
finally:
fsrc.close()
def isimportable(name):
if name and (name[0].isalpha() or name[0] == '_'):
name = name.replace("_", '')
return not name or name.isalnum()
| lgpl-2.1 |
mengxn/tensorflow | tensorflow/python/training/sync_replicas_optimizer.py | 23 | 20139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Synchronize replicas for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import session_manager
from tensorflow.python.training import session_run_hook
# Please note that the gradients from replicas are averaged instead of summed
# (as in the old sync_replicas_optimizer) so you need to increase the learning
# rate according to the number of replicas. This change is introduced to be
# consistent with how gradients are aggregated (averaged) within a batch in a
# replica.
class SyncReplicasOptimizer(optimizer.Optimizer):
"""Class to synchronize, aggregate gradients and pass them to the optimizer.
In a typical asynchronous training environment, it's common to have some
stale gradients. For example, with a N-replica asynchronous training,
gradients will be applied to the variables N times independently. Depending
on each replica's training speed, some gradients might be calculated from
copies of the variable from several steps back (N-1 steps on average). This
optimizer avoids stale gradients by collecting gradients from all replicas,
averaging them, then applying them to the variables in one shot, after
which replicas can fetch the new variables and continue.
The following accumulators/queue are created:
<empty line>
* N `gradient accumulators`, one per variable to train. Gradients are pushed
to them and the chief worker will wait until enough gradients are collected
and then average them before applying to variables. The accumulator will
drop all stale gradients (more details in the accumulator op).
* 1 `token` queue where the optimizer pushes the new global_step value after
all variables are updated.
The following local variable is created:
* `sync_rep_local_step`, one per replica. Compared against the global_step in
each accumulator to check for staleness of the gradients.
The optimizer adds nodes to the graph to collect gradients and pause the
trainers until variables are updated.
For the Parameter Server job:
<empty line>
1. An accumulator is created for each variable, and each replica pushes the
gradients into the accumulators instead of directly applying them to the
variables.
2. Each accumulator averages once enough gradients (replicas_to_aggregate)
have been accumulated.
3. Apply the averaged gradients to the variables.
4. Only after all variables have been updated, increment the global step.
5. Only after step 4, pushes `global_step` in the `token_queue`, once for
each worker replica. The workers can now fetch the global step, use it to
update its local_step variable and start the next batch.
For the replicas:
<empty line>
1. Start a step: fetch variables and compute gradients.
2. Once the gradients have been computed, push them into gradient
accumulators. Each accumulator will check the staleness and drop the stale.
3. After pushing all the gradients, dequeue an updated value of global_step
from the token queue and record that step to its local_step variable. Note
that this is effectively a barrier.
4. Start the next batch.
### Usage
```python
# Create any optimizer to update the variables, say a simple SGD:
opt = GradientDescentOptimizer(learning_rate=0.1)
# Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each
# step the optimizer collects 50 gradients before applying to variables.
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
opt = tf.SyncReplicasOptimizer(opt, replicas_to_aggregate=50,
total_num_replicas=50)
# Some models have startup_delays to help stabilize the model but when using
# sync_replicas training, set it to 0.
# Now you can call `minimize()` or `compute_gradients()` and
# `apply_gradients()` normally
training_op = opt.minimize(total_loss, global_step=self.global_step)
# You can create the hook which handles initialization and queues.
sync_replicas_hook = opt.make_session_run_hook(is_chief)
```
In the training program, every worker will run the train_op as if not
synchronized.
```python
with training.MonitoredTrainingSession(
master=workers[worker_id].target, is_chief=is_chief,
hooks=[sync_replicas_hook]) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(training_op)
```
To use SyncReplicasOptimizer with an `Estimator`, you need to send
sync_replicas_hook while calling the fit.
```
my_estimator = DNNClassifier(..., optimizer=opt)
my_estimator.fit(..., hooks=[sync_replicas_hook])
```
"""
def __init__(self,
opt,
replicas_to_aggregate,
total_num_replicas=None,
variable_averages=None,
variables_to_average=None,
use_locking=False,
name="sync_replicas"):
"""Construct a sync_replicas optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
replicas_to_aggregate: number of replicas to aggregate for each variable
update.
total_num_replicas: Total number of tasks/workers/replicas, could be
different from replicas_to_aggregate.
If total_num_replicas > replicas_to_aggregate: it is backup_replicas +
replicas_to_aggregate.
If total_num_replicas < replicas_to_aggregate: Replicas compute
multiple batches per update to variables.
variable_averages: Optional `ExponentialMovingAverage` object, used to
maintain moving averages for the variables passed in
`variables_to_average`.
variables_to_average: a list of variables that need to be averaged. Only
needed if variable_averages is passed in.
use_locking: If True use locks for update operation.
name: string. Optional name of the returned operation.
"""
if total_num_replicas is None:
total_num_replicas = replicas_to_aggregate
super(SyncReplicasOptimizer, self).__init__(use_locking, name)
logging.info(
"SyncReplicasV2: replicas_to_aggregate=%s; total_num_replicas=%s",
replicas_to_aggregate, total_num_replicas)
self._opt = opt
self._replicas_to_aggregate = replicas_to_aggregate
self._gradients_applied = False
self._variable_averages = variable_averages
self._variables_to_average = variables_to_average
self._total_num_replicas = total_num_replicas
self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate)
self._global_step = None
self._sync_token_queue = None
# The synchronization op will be executed in a queue runner which should
# only be executed by one of the replicas (usually the chief).
self._chief_queue_runner = None
# Remember which accumulator is on which device to set the initial step in
# the accumulator to be global step. This list contains list of the
# following format: (accumulator, device).
self._accumulator_list = []
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
*args: Arguments for compute_gradients().
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
"""
return self._opt.compute_gradients(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This contains most of the synchronization implementation and also wraps the
apply_gradients() from the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
train_op: The op to dequeue a token so the replicas can exit this batch
and start the next one. This is executed by each replica.
Raises:
ValueError: If the grads_and_vars is empty.
ValueError: If global step is not provided, the staleness cannot be
checked.
"""
if not grads_and_vars:
raise ValueError("Must supply at least one variable")
if global_step is None:
raise ValueError("Global step is required to check staleness")
self._global_step = global_step
train_ops = []
aggregated_grad = []
var_list = []
self._local_step = variables.Variable(
initial_value=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=global_step.dtype.base_dtype,
name="sync_rep_local_step")
self.local_step_init_op = state_ops.assign(self._local_step, global_step)
chief_init_ops = [self.local_step_init_op]
self.ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
with ops.name_scope(None, self._name):
for grad, var in grads_and_vars:
var_list.append(var)
with ops.device(var.device):
# Dense gradients.
if grad is None:
aggregated_grad.append(None) # pass-through.
continue
elif isinstance(grad, ops.Tensor):
grad_accum = data_flow_ops.ConditionalAccumulator(
grad.dtype,
shape=var.get_shape(),
shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_grad(
self._replicas_to_aggregate))
else:
if not isinstance(grad, ops.IndexedSlices):
raise ValueError("Unknown grad type!")
grad_accum = data_flow_ops.SparseConditionalAccumulator(
grad.dtype, shape=(), shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_indexed_slices_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_indexed_slices_grad(
self._replicas_to_aggregate))
self._accumulator_list.append((grad_accum, var.device))
aggregated_grads_and_vars = zip(aggregated_grad, var_list)
# sync_op will be assigned to the same device as the global step.
with ops.device(global_step.device), ops.name_scope(""):
update_op = self._opt.apply_gradients(aggregated_grads_and_vars,
global_step)
# Create token queue.
with ops.device(global_step.device), ops.name_scope(""):
sync_token_queue = (
data_flow_ops.FIFOQueue(-1,
global_step.dtype.base_dtype,
shapes=(),
name="sync_token_q",
shared_name="sync_token_q"))
self._sync_token_queue = sync_token_queue
# dummy_queue is passed to the queue runner. Don't use the real queues
# because the queue runner doesn't automatically reopen it once it
# closed queues in PS devices.
dummy_queue = (
data_flow_ops.FIFOQueue(1,
types_pb2.DT_INT32,
shapes=(),
name="dummy_queue",
shared_name="dummy_queue"))
with ops.device(global_step.device), ops.name_scope(""):
# Replicas have to wait until they can get a token from the token queue.
with ops.control_dependencies(train_ops):
token = sync_token_queue.dequeue()
train_op = state_ops.assign(self._local_step, token)
with ops.control_dependencies([update_op]):
# Sync_op needs to insert tokens to the token queue at the end of the
# step so the replicas can fetch them to start the next step.
tokens = array_ops.fill([self._tokens_per_step], global_step)
sync_op = sync_token_queue.enqueue_many((tokens,))
if self._variable_averages is not None:
with ops.control_dependencies([sync_op]), ops.name_scope(""):
sync_op = self._variable_averages.apply(
self._variables_to_average)
self._chief_queue_runner = queue_runner.QueueRunner(dummy_queue,
[sync_op])
for accum, dev in self._accumulator_list:
with ops.device(dev):
chief_init_ops.append(
accum.set_global_step(
global_step, name="SetGlobalStep"))
self.chief_init_op = control_flow_ops.group(*(chief_init_ops))
self._gradients_applied = True
return train_op
def get_chief_queue_runner(self):
"""Returns the QueueRunner for the chief to execute.
This includes the operations to synchronize replicas: aggregate gradients,
apply to variables, increment global step, insert tokens to token queue.
Note that this can only be called after calling apply_gradients() which
actually generates this queuerunner.
Returns:
A `QueueRunner` for chief to execute.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError("Should be called after apply_gradients().")
return self._chief_queue_runner
def get_slot(self, *args, **kwargs):
"""Return a slot named "name" created for "var" by the Optimizer.
This simply wraps the get_slot() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
"""Return a list of the names of slots created by the `Optimizer`.
This simply wraps the get_slot_names() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
A list of strings.
"""
return self._opt.get_slot_names(*args, **kwargs)
def get_init_tokens_op(self, num_tokens=-1):
"""Returns the op to fill the sync_token_queue with the tokens.
This is supposed to be executed in the beginning of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_aggregate,
the model can still proceed as the replicas can compute multiple steps per
variable update. Make sure:
`num_tokens >= replicas_to_aggregate - total_num_replicas`.
Args:
num_tokens: Number of tokens to add to the queue.
Returns:
An op for the chief/sync replica to fill the token queue.
Raises:
ValueError: If this is called before apply_gradients().
ValueError: If num_tokens are smaller than replicas_to_aggregate -
total_num_replicas.
"""
if self._gradients_applied is False:
raise ValueError(
"get_init_tokens_op() should be called after apply_gradients().")
tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
if num_tokens == -1:
num_tokens = self._replicas_to_aggregate
elif num_tokens < tokens_needed:
raise ValueError(
"Too few tokens to finish the first step: %d (given) vs %d (needed)" %
(num_tokens, tokens_needed))
if num_tokens > 0:
with ops.device(self._global_step.device), ops.name_scope(""):
tokens = array_ops.fill([num_tokens], self._global_step)
init_tokens = self._sync_token_queue.enqueue_many((tokens,))
else:
init_tokens = control_flow_ops.no_op(name="no_init_tokens")
return init_tokens
def make_session_run_hook(self, is_chief, num_tokens=-1):
"""Creates a hook to handle SyncReplicasHook ops such as initialization."""
return _SyncReplicasOptimizerHook(self, is_chief, num_tokens)
class _SyncReplicasOptimizerHook(session_run_hook.SessionRunHook):
"""A SessionRunHook handles ops related to SyncReplicasOptimizer."""
def __init__(self, sync_optimizer, is_chief, num_tokens):
"""Creates hook to handle SyncReplicaOptimizer initialization ops.
Args:
sync_optimizer: `SyncReplicasOptimizer` which this hook will initialize.
is_chief: `Bool`, whether is this a chief replica or not.
num_tokens: Number of tokens to add to the queue.
"""
self._sync_optimizer = sync_optimizer
self._is_chief = is_chief
self._num_tokens = num_tokens
def begin(self):
if self._sync_optimizer._gradients_applied is False: # pylint: disable=protected-access
raise ValueError(
"SyncReplicasOptimizer.apply_gradient should be called before using "
"the hook.")
if self._is_chief:
self._local_init_op = self._sync_optimizer.chief_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = self._sync_optimizer.get_chief_queue_runner()
self._init_tokens_op = self._sync_optimizer.get_init_tokens_op(
self._num_tokens)
else:
self._local_init_op = self._sync_optimizer.local_step_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = None
self._init_tokens_op = None
def after_create_session(self, session, coord):
"""Runs SyncReplicasOptimizer initialization ops."""
local_init_success, msg = session_manager._ready( # pylint: disable=protected-access
self._ready_for_local_init_op, session,
"Model is not ready for SyncReplicasOptimizer local init.")
if not local_init_success:
raise RuntimeError(
"Init operations did not make model ready for SyncReplicasOptimizer "
"local_init. Init op: %s, error: %s" %
(self._local_init_op.name, msg))
session.run(self._local_init_op)
if self._init_tokens_op is not None:
session.run(self._init_tokens_op)
if self._q_runner is not None:
self._q_runner.create_threads(
session, coord=coord, daemon=True, start=True)
| apache-2.0 |
MicroDreamIT/ShapeCss | node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py | 2736 | 6387 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
| mit |
yongshengwang/builthue | apps/beeswax/src/beeswax/api.py | 1 | 22722 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponse, Http404
from django.utils.translation import ugettext as _
from thrift.transport.TTransport import TTransportException
from desktop.context_processors import get_app_name
from desktop.lib.i18n import force_unicode
from desktop.lib.exceptions_renderable import PopupException
from jobsub.parameterization import substitute_variables
import beeswax.models
from beeswax.forms import QueryForm
from beeswax.data_export import upload
from beeswax.design import HQLdesign
from beeswax.server import dbms
from beeswax.server.dbms import expand_exception, get_query_server_config, QueryServerException
from beeswax.views import authorized_get_design, authorized_get_query_history, make_parameterization_form,\
safe_get_design, save_design, massage_columns_for_json, _get_query_handle_and_state,\
_parse_out_hadoop_jobs
LOG = logging.getLogger(__name__)
def error_handler(view_fn):
def decorator(request, *args, **kwargs):
try:
return view_fn(request, *args, **kwargs)
except Http404, e:
raise e
except Exception, e:
if not hasattr(e, 'message') or not e.message:
message = str(e)
else:
message = force_unicode(e.message, strings_only=True, errors='replace')
if 'Invalid OperationHandle' in message and 'id' in kwargs:
# Expired state.
query_history = authorized_get_query_history(request, kwargs['id'], must_exist=False)
if query_history:
query_history.set_to_expired()
query_history.save()
response = {
'status': -1,
'message': message,
}
if re.search('database is locked|Invalid query handle|not JSON serializable', message, re.IGNORECASE):
response['status'] = 2 # Frontend will not display this type of error
LOG.warn('error_handler silencing the exception: %s' % e)
return HttpResponse(json.dumps(response), mimetype="application/json", status=200)
return decorator
@error_handler
def autocomplete(request, database=None, table=None):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
do_as = request.user
if (request.user.is_superuser or request.user.has_hue_permission(action="impersonate", app="security")) and 'doas' in request.GET:
do_as = User.objects.get(username=request.GET.get('doas'))
db = dbms.get(do_as, query_server)
response = {}
try:
if database is None:
response['databases'] = db.get_databases()
elif table is None:
response['tables'] = db.get_tables(database=database)
else:
t = db.get_table(database, table)
response['hdfs_link'] = t.hdfs_link
response['columns'] = [column.name for column in t.cols]
response['extended_columns'] = massage_columns_for_json(t.cols)
except TTransportException, tx:
response['code'] = 503
response['error'] = tx.message
except Exception, e:
LOG.warn('Autocomplete data fetching error %s.%s: %s' % (database, table, e))
response['code'] = 500
response['error'] = e.message
return HttpResponse(json.dumps(response), mimetype="application/json")
@error_handler
def parameters(request, design_id=None):
response = {'status': -1, 'message': ''}
# Use POST request to not confine query length.
if request.method != 'POST':
response['message'] = _('A POST request is required.')
parameterization_form_cls = make_parameterization_form(request.POST.get('query-query', ''))
if parameterization_form_cls:
parameterization_form = parameterization_form_cls(prefix="parameterization")
response['parameters'] = [{'parameter': field.html_name, 'name': field.name} for field in parameterization_form]
response['status']= 0
else:
response['parameters'] = []
response['status']= 0
return HttpResponse(json.dumps(response), mimetype="application/json")
@error_handler
def execute_directly(request, query, design, query_server, tablename=None, **kwargs):
if design is not None:
design = authorized_get_design(request, design.id)
db = dbms.get(request.user, query_server)
database = query.query.get('database', 'default')
db.use(database)
history_obj = db.execute_query(query, design)
watch_url = reverse(get_app_name(request) + ':api_watch_query_refresh_json', kwargs={'id': history_obj.id})
response = {
'status': 0,
'id': history_obj.id,
'watch_url': watch_url,
'statement': history_obj.get_current_statement()
}
return HttpResponse(json.dumps(response), mimetype="application/json")
@error_handler
def watch_query_refresh_json(request, id):
query_history = authorized_get_query_history(request, id, must_exist=True)
db = dbms.get(request.user, query_history.get_query_server_config())
if not request.POST.get('next'): # We need this as multi query would fail as current query is closed
handle, state = _get_query_handle_and_state(query_history)
query_history.save_state(state)
# Go to next statement if asked to continue or when a statement with no dataset finished.
try:
if request.POST.get('next') or (not query_history.is_finished() and query_history.is_success() and not query_history.has_results):
query_history = db.execute_next_statement(query_history, request.POST.get('query-query'))
handle, state = _get_query_handle_and_state(query_history)
except QueryServerException, ex:
raise ex
except Exception, ex:
LOG.exception(ex)
handle, state = _get_query_handle_and_state(query_history)
try:
start_over = request.POST.get('log-start-over') == 'true'
log = db.get_log(handle, start_over=start_over)
except Exception, ex:
log = str(ex)
jobs = _parse_out_hadoop_jobs(log)
job_urls = massage_job_urls_for_json(jobs)
result = {
'status': -1,
'log': log,
'jobs': jobs,
'jobUrls': job_urls,
'isSuccess': query_history.is_success(),
'isFailure': query_history.is_failure(),
'id': id,
'statement': query_history.get_current_statement(),
'watch_url': reverse(get_app_name(request) + ':api_watch_query_refresh_json', kwargs={'id': query_history.id})
}
# Run time error
if query_history.is_failure():
res = db.get_operation_status(handle)
if query_history.is_canceled(res):
result['status'] = 0
elif hasattr(res, 'errorMessage') and res.errorMessage:
result['message'] = res.errorMessage
else:
result['message'] = _('Bad status for request %s:\n%s') % (id, res)
else:
result['status'] = 0
return HttpResponse(json.dumps(result), mimetype="application/json")
def massage_job_urls_for_json(jobs):
massaged_jobs = []
for job in jobs:
massaged_jobs.append({
'name': job,
'url': reverse('jobbrowser.views.single_job', kwargs={'job': job})
})
return massaged_jobs
def close_operation(request, query_history_id):
response = {
'status': -1,
'message': ''
}
if request.method != 'POST':
response['message'] = _('A POST request is required.')
else:
try:
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
db = dbms.get(query_history.owner, query_history.get_query_server_config())
handle = query_history.get_handle()
db.close_operation(handle)
query_history.set_to_expired()
query_history.save()
response['status'] = 0
except Exception, e:
response['message'] = unicode(e)
return HttpResponse(json.dumps(response), mimetype="application/json")
@error_handler
def explain_directly(request, query, design, query_server):
explanation = dbms.get(request.user, query_server).explain(query)
response = {
'status': 0,
'explanation': explanation.textual,
'statement': query.get_query_statement(0),
}
return HttpResponse(json.dumps(response), mimetype="application/json")
@error_handler
def execute(request, design_id=None):
response = {'status': -1, 'message': ''}
if request.method != 'POST':
response['message'] = _('A POST request is required.')
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
query_type = beeswax.models.SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
try:
query_form = get_query_form(request)
if query_form.is_valid():
query_str = query_form.query.cleaned_data["query"]
explain = request.GET.get('explain', 'false').lower() == 'true'
design = save_design(request, query_form, query_type, design, False)
if query_form.query.cleaned_data['is_parameterized']:
# Parameterized query
parameterization_form_cls = make_parameterization_form(query_str)
if parameterization_form_cls:
parameterization_form = parameterization_form_cls(request.REQUEST, prefix="parameterization")
if parameterization_form.is_valid():
real_query = substitute_variables(query_str, parameterization_form.cleaned_data)
query = HQLdesign(query_form, query_type=query_type)
query._data_dict['query']['query'] = real_query
try:
if explain:
return explain_directly(request, query, design, query_server)
else:
return execute_directly(request, query, design, query_server)
except Exception, ex:
db = dbms.get(request.user, query_server)
error_message, log = expand_exception(ex, db)
response['message'] = error_message
return HttpResponse(json.dumps(response), mimetype="application/json")
else:
response['errors'] = parameterization_form.errors
return HttpResponse(json.dumps(response), mimetype="application/json")
# Non-parameterized query
query = HQLdesign(query_form, query_type=query_type)
if request.GET.get('explain', 'false').lower() == 'true':
return explain_directly(request, query, design, query_server)
else:
return execute_directly(request, query, design, query_server)
else:
response['message'] = _('There was an error with your query.')
response['errors'] = {
'query': [query_form.query.errors],
'settings': query_form.settings.errors,
'file_resources': query_form.file_resources.errors,
'functions': query_form.functions.errors,
}
except RuntimeError, e:
response['message']= str(e)
return HttpResponse(json.dumps(response), mimetype="application/json")
@error_handler
def save_query_design(request, design_id=None):
response = {'status': -1, 'message': ''}
if request.method != 'POST':
response['message'] = _('A POST request is required.')
app_name = get_app_name(request)
query_type = beeswax.models.SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
try:
query_form = get_query_form(request)
if query_form.is_valid():
design = save_design(request, query_form, query_type, design, True)
response['design_id'] = design.id
response['status'] = 0
else:
response['errors'] = {
'query': [query_form.query.errors],
'settings': query_form.settings.errors,
'file_resources': query_form.file_resources.errors,
'functions': query_form.functions.errors
}
except RuntimeError, e:
response['message'] = str(e)
return HttpResponse(json.dumps(response), mimetype="application/json")
@error_handler
def fetch_saved_design(request, design_id):
response = {'status': 0, 'message': ''}
if request.method != 'GET':
response['message'] = _('A GET request is required.')
app_name = get_app_name(request)
query_type = beeswax.models.SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
response['design'] = design_to_dict(design)
return HttpResponse(json.dumps(response), mimetype="application/json")
@error_handler
def fetch_query_history(request, query_history_id):
response = {'status': 0, 'message': ''}
if request.method != 'GET':
response['message'] = _('A GET request is required.')
query = authorized_get_query_history(request, query_history_id, must_exist=True)
response['query_history'] = query_history_to_dict(request, query)
return HttpResponse(json.dumps(response), mimetype="application/json")
@error_handler
def cancel_query(request, query_history_id):
response = {'status': -1, 'message': ''}
if request.method != 'POST':
response['message'] = _('A POST request is required.')
else:
try:
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
db = dbms.get(request.user, query_history.get_query_server_config())
db.cancel_operation(query_history.get_handle())
_get_query_handle_and_state(query_history)
response['status'] = 0
except Exception, e:
response['message'] = unicode(e)
return HttpResponse(json.dumps(response), mimetype="application/json")
@error_handler
def save_results_hdfs_directory(request, query_history_id):
"""
Save the results of a query to an HDFS directory.
Rerun the query.
"""
response = {'status': 0, 'message': ''}
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
server_id, state = _get_query_handle_and_state(query_history)
query_history.save_state(state)
error_msg, log = None, None
if request.method != 'POST':
response['message'] = _('A POST request is required.')
else:
if not query_history.is_success():
response['message'] = _('This query is %(state)s. Results unavailable.') % {'state': state}
response['status'] = -1
return HttpResponse(json.dumps(response), mimetype="application/json")
db = dbms.get(request.user, query_history.get_query_server_config())
form = beeswax.forms.SaveResultsDirectoryForm({
'target_dir': request.POST.get('path')
}, fs=request.fs)
if form.is_valid():
target_dir = request.POST.get('path')
try:
response['type'] = 'hdfs-dir'
response['id'] = query_history.id
response['query'] = query_history.query
response['path'] = target_dir
response['success_url'] = '/filebrowser/view%s' % target_dir
query_history = db.insert_query_into_directory(query_history, target_dir)
response['watch_url'] = reverse(get_app_name(request) + ':api_watch_query_refresh_json', kwargs={'id': query_history.id})
except Exception, ex:
error_msg, log = expand_exception(ex, db)
response['message'] = _('The result could not be saved: %s.') % error_msg
response['status'] = -3
else:
response['status'] = 1
response['errors'] = form.errors
return HttpResponse(json.dumps(response), mimetype="application/json")
@error_handler
def save_results_hdfs_file(request, query_history_id):
"""
Save the results of a query to an HDFS file.
Do not rerun the query.
"""
response = {'status': 0, 'message': ''}
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
server_id, state = _get_query_handle_and_state(query_history)
query_history.save_state(state)
error_msg, log = None, None
if request.method != 'POST':
response['message'] = _('A POST request is required.')
else:
if not query_history.is_success():
response['message'] = _('This query is %(state)s. Results unavailable.') % {'state': state}
response['status'] = -1
return HttpResponse(json.dumps(response), mimetype="application/json")
db = dbms.get(request.user, query_history.get_query_server_config())
form = beeswax.forms.SaveResultsFileForm({
'target_file': request.POST.get('path'),
'overwrite': request.POST.get('overwrite', False),
})
if form.is_valid():
target_file = form.cleaned_data['target_file']
overwrite = form.cleaned_data['overwrite']
try:
handle, state = _get_query_handle_and_state(query_history)
except Exception, ex:
response['message'] = _('Cannot find query handle and state: %s') % str(query_history)
response['status'] = -2
return HttpResponse(json.dumps(response), mimetype="application/json")
try:
if overwrite and request.fs.exists(target_file):
if request.fs.isfile(target_file):
request.fs.do_as_user(request.user.username, request.fs.rmtree, target_file)
else:
raise PopupException(_("The target path is a directory"))
upload(target_file, handle, request.user, db, request.fs)
response['type'] = 'hdfs-file'
response['id'] = query_history.id
response['query'] = query_history.query
response['path'] = target_file
response['success_url'] = '/filebrowser/view%s' % target_file
response['watch_url'] = reverse(get_app_name(request) + ':api_watch_query_refresh_json', kwargs={'id': query_history.id})
except Exception, ex:
error_msg, log = expand_exception(ex, db)
response['message'] = _('The result could not be saved: %s.') % error_msg
response['status'] = -3
else:
response['status'] = 1
response['errors'] = form.errors
return HttpResponse(json.dumps(response), mimetype="application/json")
@error_handler
def save_results_hive_table(request, query_history_id):
"""
Save the results of a query to a hive table.
Rerun the query.
"""
response = {'status': 0, 'message': ''}
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
server_id, state = _get_query_handle_and_state(query_history)
query_history.save_state(state)
error_msg, log = None, None
if request.method != 'POST':
response['message'] = _('A POST request is required.')
else:
if not query_history.is_success():
response['message'] = _('This query is %(state)s. Results unavailable.') % {'state': state}
response['status'] = -1
return HttpResponse(json.dumps(response), mimetype="application/json")
db = dbms.get(request.user, query_history.get_query_server_config())
database = query_history.design.get_design().query.get('database', 'default')
form = beeswax.forms.SaveResultsTableForm({
'target_table': request.POST.get('table')
}, db=db, database=database)
if form.is_valid():
try:
handle, state = _get_query_handle_and_state(query_history)
result_meta = db.get_results_metadata(handle)
except Exception, ex:
response['message'] = _('Cannot find query handle and state: %s') % str(query_history)
response['status'] = -2
return HttpResponse(json.dumps(response), mimetype="application/json")
try:
query_history = db.create_table_as_a_select(request, query_history, form.target_database, form.cleaned_data['target_table'], result_meta)
response['id'] = query_history.id
response['query'] = query_history.query
response['type'] = 'hive-table'
response['path'] = form.cleaned_data['target_table']
response['success_url'] = reverse('metastore:describe_table', kwargs={'database': form.target_database, 'table': form.cleaned_data['target_table']})
response['watch_url'] = reverse(get_app_name(request) + ':api_watch_query_refresh_json', kwargs={'id': query_history.id})
except Exception, ex:
error_msg, log = expand_exception(ex, db)
response['message'] = _('The result could not be saved: %s.') % error_msg
response['status'] = -3
else:
response['status'] = 1
response['errors'] = form.errors
return HttpResponse(json.dumps(response), mimetype="application/json")
def design_to_dict(design):
hql_design = HQLdesign.loads(design.data)
return {
'id': design.id,
'query': hql_design.hql_query,
'name': design.name,
'desc': design.desc,
'database': hql_design.query.get('database', None),
'settings': hql_design.settings,
'file_resources': hql_design.file_resources,
'functions': hql_design.functions,
'is_parameterized': hql_design.query.get('is_parameterized', True),
'email_notify': hql_design.query.get('email_notify', True)
}
def query_history_to_dict(request, query_history):
query_history_dict = {
'id': query_history.id,
'state': query_history.last_state,
'query': query_history.query,
'has_results': query_history.has_results,
'statement_number': query_history.statement_number,
'watch_url': reverse(get_app_name(request) + ':api_watch_query_refresh_json', kwargs={'id': query_history.id}),
'results_url': reverse(get_app_name(request) + ':view_results', kwargs={'id': query_history.id, 'first_row': 0})
}
if query_history.design:
query_history_dict['design'] = design_to_dict(query_history.design)
return query_history_dict
# Proxy API for Metastore App
def describe_table(request, database, table):
try:
from metastore.views import describe_table
return describe_table(request, database, table)
except Exception, e:
raise PopupException(_('Problem accessing table metadata'), detail=e)
def get_query_form(request):
# Get database choices
query_server = dbms.get_query_server_config(get_app_name(request))
db = dbms.get(request.user, query_server)
databases = [(database, database) for database in db.get_databases()]
if not databases:
raise RuntimeError(_("No databases are available. Permissions could be missing."))
query_form = QueryForm()
query_form.bind(request.POST)
query_form.query.fields['database'].choices = databases # Could not do it in the form
return query_form
| apache-2.0 |
fergalbyrne/nupic | src/nupic/research/monitor_mixin/plot.py | 19 | 5187 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot class used in monitor mixin framework.
"""
import logging
import os
try:
# We import in here to avoid creating a matplotlib dependency in nupic.
import matplotlib.pyplot as plt
import matplotlib.cm as cm
except ImportError:
# Suppress; we log it at debug level to avoid polluting the logs of apps
# and services that don't care about plotting
logging.debug("Cannot import matplotlib. Plot class will not work.",
exc_info=True)
class Plot(object):
def __init__(self, monitor, title, show=True):
"""
@param monitor (MonitorMixinBase) Monitor Mixin instance that generated
this plot
@param title (string) Plot title
"""
self._monitor = monitor
self._title = title
self._fig = self._initFigure()
self._show = show
if self._show:
plt.ion()
plt.show()
def _initFigure(self):
fig = plt.figure()
fig.suptitle(self._prettyPrintTitle())
return fig
def _prettyPrintTitle(self):
if self._monitor.mmName is not None:
return "[{0}] {1}".format(self._monitor.mmName, self._title)
return self._title
def addGraph(self, data, position=111, xlabel=None, ylabel=None):
""" Adds a graph to the plot's figure.
@param data See matplotlib.Axes.plot documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.plot(data)
plt.draw()
def addHistogram(self, data, position=111, xlabel=None, ylabel=None,
bins=None):
""" Adds a histogram to the plot's figure.
@param data See matplotlib.Axes.hist documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.hist(data, bins=bins, color="green", alpha=0.8)
plt.draw()
def add2DArray(self, data, position=111, xlabel=None, ylabel=None, cmap=None,
aspect="auto", interpolation="nearest", name=None):
""" Adds an image to the plot's figure.
@param data a 2D array. See matplotlib.Axes.imshow documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@param cmap color map used in the rendering
@param aspect how aspect ratio is handled during resize
@param interpolation interpolation method
"""
if cmap is None:
# The default colormodel is an ugly blue-red model.
cmap = cm.Greys
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.imshow(data, cmap=cmap, aspect=aspect, interpolation=interpolation)
if self._show:
plt.draw()
if name is not None:
if not os.path.exists("log"):
os.mkdir("log")
plt.savefig("log/{name}.png".format(name=name), bbox_inches="tight",
figsize=(8, 6), dpi=400)
def _addBase(self, position, xlabel=None, ylabel=None):
""" Adds a subplot to the plot's figure at specified position.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@returns (matplotlib.Axes) Axes instance
"""
ax = self._fig.add_subplot(position)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
| agpl-3.0 |
Onager/dftimewolf | tests/lib/collectors/gcloud.py | 1 | 8609 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests the GoogleCloudCollector."""
import unittest
import mock
from libcloudforensics.providers.gcp.internal import project as gcp_project
from libcloudforensics.providers.gcp.internal import compute
from dftimewolf import config
from dftimewolf.lib import state
from dftimewolf.lib.containers import containers
from dftimewolf.lib.collectors import gcloud
FAKE_PROJECT = gcp_project.GoogleCloudProject(
'test-target-project-name',
'fake_zone')
FAKE_ANALYSIS_VM = compute.GoogleComputeInstance(
FAKE_PROJECT.project_id,
'fake_zone',
'fake-analysis-vm')
FAKE_INSTANCE = compute.GoogleComputeInstance(
FAKE_PROJECT.project_id,
'fake_zone',
'fake-instance')
FAKE_DISK = compute.GoogleComputeDisk(
FAKE_PROJECT.project_id,
'fake_zone',
'disk1')
FAKE_BOOT_DISK = compute.GoogleComputeDisk(
FAKE_PROJECT.project_id,
'fake_zone',
'bootdisk')
FAKE_SNAPSHOT = compute.GoogleComputeSnapshot(
FAKE_DISK,
'fake_snapshot')
FAKE_DISK_COPY = compute.GoogleComputeDisk(
FAKE_PROJECT.project_id,
'fake_zone',
'disk1-copy')
class GoogleCloudCollectorTest(unittest.TestCase):
"""Tests for the GCloud collector."""
def testInitialization(self):
"""Tests that the collector can be initialized."""
test_state = state.DFTimewolfState(config.Config)
gcloud_collector = gcloud.GoogleCloudCollector(test_state)
self.assertIsNotNone(gcloud_collector)
# pylint: disable=invalid-name,line-too-long
@mock.patch('libcloudforensics.providers.gcp.internal.compute_base_resource.GoogleComputeBaseResource.AddLabels')
@mock.patch('libcloudforensics.providers.gcp.internal.compute_base_resource.GoogleComputeBaseResource')
@mock.patch('libcloudforensics.providers.gcp.forensics.StartAnalysisVm')
def testSetUp(self,
mock_StartAnalysisVm,
mock_GoogleComputeBaseResource,
mock_AddLabels):
"""Tests that the collector can be initialized."""
test_state = state.DFTimewolfState(config.Config)
mock_StartAnalysisVm.return_value = (mock_GoogleComputeBaseResource, None)
gcloud_collector = gcloud.GoogleCloudCollector(test_state)
gcloud_collector.SetUp(
'test-analysis-project-name',
'test-target-project-name',
'fake_incident_id',
'fake_zone',
'pd-standard',
42.0,
16,
remote_instance_name='my-owned-instance',
)
self.assertEqual(test_state.errors, [])
self.assertEqual(gcloud_collector.disk_names, [])
self.assertEqual(gcloud_collector.analysis_project.project_id,
'test-analysis-project-name')
self.assertEqual(gcloud_collector.remote_project.project_id,
'test-target-project-name')
self.assertEqual(gcloud_collector.remote_instance_name,
'my-owned-instance')
self.assertEqual(gcloud_collector.all_disks, False)
mock_StartAnalysisVm.assert_called_with(
'test-analysis-project-name',
'gcp-forensics-vm-fake_incident_id',
'fake_zone',
'pd-standard',
42.0,
16,
image_family='ubuntu-1804-lts',
image_project='ubuntu-os-cloud'
)
mock_AddLabels.assert_has_calls(
[mock.call({'incident_id': 'fake_incident_id'})])
# pylint: disable=line-too-long
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleComputeInstance.GetBootDisk')
@mock.patch('libcloudforensics.providers.gcp.internal.compute_base_resource.GoogleComputeBaseResource.AddLabels')
@mock.patch('libcloudforensics.providers.gcp.forensics.StartAnalysisVm')
@mock.patch('libcloudforensics.providers.gcp.forensics.CreateDiskCopy')
@mock.patch('dftimewolf.lib.collectors.gcloud.GoogleCloudCollector._FindDisksToCopy')
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleComputeInstance.AttachDisk')
def testProcess(self,
unused_MockAttachDisk,
mock_FindDisks,
mock_CreateDiskCopy,
mock_StartAnalysisVm,
mock_AddLabels,
mock_GetBootDisk):
"""Tests the collector's Process() function."""
mock_StartAnalysisVm.return_value = (FAKE_ANALYSIS_VM, None)
mock_FindDisks.return_value = [FAKE_DISK]
mock_CreateDiskCopy.return_value = FAKE_DISK_COPY
FAKE_ANALYSIS_VM.AddLabels = mock_AddLabels
FAKE_ANALYSIS_VM.GetBootDisk = mock_GetBootDisk
FAKE_DISK_COPY.AddLabels = mock_AddLabels
test_state = state.DFTimewolfState(config.Config)
gcloud_collector = gcloud.GoogleCloudCollector(test_state)
gcloud_collector.SetUp(
'test-analysis-project-name',
'test-target-project-name',
'fake_incident_id',
'fake_zone',
'pd-standard',
42.0,
16,
remote_instance_name='my-owned-instance',
)
gcloud_collector.Process()
mock_CreateDiskCopy.assert_called_with(
'test-target-project-name',
'test-analysis-project-name',
None,
FAKE_DISK.zone,
disk_name=FAKE_DISK.name)
forensics_vms = test_state.GetContainers(containers.ForensicsVM)
forensics_vm = forensics_vms[0]
self.assertEqual(forensics_vm.name, 'fake-analysis-vm')
self.assertEqual(forensics_vm.evidence_disk.name, 'disk1-copy')
mock_AddLabels.assert_has_calls([mock.call({'incident_id': 'fake_incident_id'})])
# pylint: disable=line-too-long,invalid-name
@mock.patch('libcloudforensics.providers.gcp.internal.compute_base_resource.GoogleComputeBaseResource')
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleComputeInstance.GetBootDisk')
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleCloudCompute.GetDisk')
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleComputeInstance.ListDisks')
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleCloudCompute.GetInstance')
@mock.patch('libcloudforensics.providers.gcp.forensics.StartAnalysisVm')
# We're manually calling protected functions
# pylint: disable=protected-access
def testFindDisksToCopy(self,
mock_StartAnalysisVm,
mock_get_instance,
mock_list_disks,
mock_get_disk,
mock_GetBootDisk,
mock_GoogleComputeBaseResource):
"""Tests the FindDisksToCopy function with different SetUp() calls."""
test_state = state.DFTimewolfState(config.Config)
gcloud_collector = gcloud.GoogleCloudCollector(test_state)
mock_StartAnalysisVm.return_value = (mock_GoogleComputeBaseResource, None)
mock_list_disks.return_value = {
'bootdisk': FAKE_BOOT_DISK,
'disk1': FAKE_DISK
}
mock_get_disk.return_value = FAKE_DISK
mock_get_instance.return_value = FAKE_INSTANCE
mock_GetBootDisk.return_value = FAKE_BOOT_DISK
# Nothing is specified, GoogleCloudCollector should collect the instance's
# boot disk
gcloud_collector.SetUp(
'test-analysis-project-name',
'test-target-project-name',
'fake_incident_id',
'fake_zone',
'pd-standard',
42.0,
16,
remote_instance_name='my-owned-instance',
)
disks = gcloud_collector._FindDisksToCopy()
self.assertEqual(len(disks), 1)
self.assertEqual(disks[0].name, 'bootdisk')
mock_GetBootDisk.assert_called_once()
# Specifying all_disks should return all disks for the instance
# (see mock_list_disks return value)
gcloud_collector.SetUp(
'test-analysis-project-name',
'test-target-project-name',
'fake_incident_id',
'fake_zone',
'pd-standard',
42.0,
16,
remote_instance_name='my-owned-instance',
all_disks=True
)
disks = gcloud_collector._FindDisksToCopy()
self.assertEqual(len(disks), 2)
self.assertEqual(disks[0].name, 'bootdisk')
self.assertEqual(disks[1].name, 'disk1')
# If a list of disks is passed, that disk only should be returned
gcloud_collector.SetUp(
'test-analysis-project-name',
'test-target-project-name',
'fake_incident_id',
'fake_zone',
'pd-standard',
42.0,
16,
remote_instance_name='my-owned-instance',
disk_names='disk1'
)
disks = gcloud_collector._FindDisksToCopy()
self.assertEqual(len(disks), 1)
self.assertEqual(disks[0].name, 'disk1')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
alfa-addon/addon | plugin.video.alfa/lib/python_libtorrent/linux_armv7/1.1.0/__init__.py | 362 | 1240 | #-*- coding: utf-8 -*-
'''
python-libtorrent for Kodi (script.module.libtorrent)
Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
| gpl-3.0 |
guillermo-carrasco/MultiQC | multiqc/__init__.py | 1 | 14296 | #!/usr/bin/env python
""" MultiQC modules base class, contains helper functions """
from __future__ import print_function
from collections import OrderedDict
import io
import json
import mimetypes
import os
import random
from multiqc import config
letters = 'abcdefghijklmnopqrstuvwxyz'
class BaseMultiqcModule(object):
def __init__(self, log):
self.log = log
def find_log_files(self, fn_match=None, contents_match=None, filehandles=False):
"""
Search the analysis directory for log files of interest. Can take either a filename
suffix or a search string to return only log files that contain relevant info.
:param fn_match: Optional string or list of strings. Filename suffixes to search for.
:param contents_match: Optional string or list of strings to look for in file.
NB: Both searches return file if *any* of the supplied strings are matched.
:param filehandles: Set to true to return a file handle instead of slurped file contents
:return: Yields a set with two items - a sample name generated from the filename
and either the file contents or file handle for the current matched file.
As yield is used, the function can be iterated over without
"""
for root, dirnames, filenames in os.walk(config.analysis_dir, followlinks=True):
for fn in filenames:
# Make a sample name from the filename
s_name = self.clean_s_name(fn, root)
# Make search strings into lists if a string is given
if type(fn_match) is str:
fn_match = [fn_match]
if type(contents_match) is str:
contents_match = [contents_match]
# Search for file names ending in a certain string
readfile = False
if fn_match is not None:
for m in fn_match:
if m in fn:
readfile = True
break
else:
readfile = True
# Limit search to files under 1MB to avoid 30GB FastQ files etc.
try:
filesize = os.path.getsize(os.path.join(root,fn))
except (IOError, OSError, ValueError, UnicodeDecodeError):
log.debug("Couldn't read file when looking for output: {}".format(fn))
readfile = False
else:
if filesize > 1000000:
readfile = False
# Use mimetypes to exclude binary files where possible
(ftype, encoding) = mimetypes.guess_type(os.path.join(root, fn))
if encoding is not None:
readfile = False # eg. gzipped files
if ftype is not None and ftype.startswith('text') is False:
readfile = False # eg. images - 'image/jpeg'
if readfile:
try:
with io.open (os.path.join(root,fn), "r", encoding='utf-8') as f:
# Search this file for our string of interest
returnfile = False
if contents_match is not None:
for line in f:
for m in contents_match:
if m in line:
returnfile = True
break
f.seek(0)
else:
returnfile = True
# Give back what was asked for. Yield instead of return
# so that this function can be used as an interator
# without loading all files at once.
if returnfile:
if filehandles:
yield {'s_name': s_name, 'f': f, 'root': root, 'fn': fn}
else:
yield {'s_name': s_name, 'f': f.read(), 'root': root, 'fn': fn}
except (IOError, OSError, ValueError, UnicodeDecodeError):
self.log.debug("Couldn't read file when looking for output: {}".format(fn))
def clean_s_name(self, s_name, root):
""" Helper function to take a long file name and strip it
back to a clean sample name. Somewhat arbitrary.
:param s_name: The sample name to clean
:param root: The directory path that this file is within
:param prepend_dirs: boolean, whether to prepend dir name to s_name
:param trimmed: boolean, remove common trimming suffixes from name?
:return: The cleaned sample name, ready to be used
"""
# Split then take first section to remove everything after these matches
s_name = s_name.split(".gz",1)[0]
s_name = s_name.split(".fastq",1)[0]
s_name = s_name.split(".fq",1)[0]
s_name = s_name.split(".bam",1)[0]
s_name = s_name.split(".sam",1)[0]
s_name = s_name.split("_tophat",1)[0]
s_name = s_name.split("_star_aligned",1)[0]
if config.prepend_dirs:
s_name = "{} | {}".format(root.replace(os.sep, ' | '), s_name).lstrip('. | ')
return s_name
def plot_xy_data(self, data, config={}, original_plots=[]):
""" Plot a line graph with X,Y data. See CONTRIBUTING.md for
further instructions on use.
:param data: 2D dict, first keys as sample names, then x:y data pairs
:param original_plots: optional list of dicts with keys 's_name' and 'img_path'
:param config: optional dict with config key:value pairs. See CONTRIBUTING.md
:param original_plots: optional list specifying original plot images. Each dict
should have a key 's_name' and 'img_path'
:return: HTML and JS, ready to be inserted into the page
"""
# Given one dataset - turn it into a list
if type(data) is not list:
data = [data]
# Generate the data dict structure expected by HighCharts series
plotdata = list()
for d in data:
thisplotdata = list()
for s in sorted(d.keys()):
pairs = list()
maxval = 0
for k, p in d[s].items():
pairs.append([k, p])
maxval = max(maxval, p)
if maxval > 0 or config.get('hide_empty') is not True:
thisplotdata.append({
'name': s,
'data': pairs
})
plotdata.append(thisplotdata)
# Build the HTML for the page
if config.get('id') is None:
config['id'] = 'mqc_hcplot_'+''.join(random.sample(letters, 10))
html = ''
# Buttons to cycle through different datasets
if len(plotdata) > 1:
html += '<div class="btn-group switch_group">\n'
for k, p in enumerate(plotdata):
active = 'active' if k == 0 else ''
try: name = config['data_labels'][k]['name']
except: name = k+1
try: ylab = 'data-ylab="{}"'.format(config['data_labels'][k]['ylab'])
except: ylab = 'data-ylab="{}"'.format(name) if name != k+1 else ''
html += '<button class="btn btn-default btn-sm {a}" data-action="set_data" {y} data-newdata="{id}_datasets[{k}]" data-target="#{id}">{n}</button>\n'.format(a=active, id=config['id'], n=name, y=ylab, k=k)
html += '</div>\n\n'
# Markup needed if we have the option of clicking through to original plot images
if len(original_plots) > 0:
config['tt_label'] = 'Click to show original plot.<br>{}'.format(config.get('tt_label', '{point.x}'))
if len(original_plots) > 1:
next_prev_buttons = '<div class="clearfix"><div class="btn-group btn-group-sm"> \n\
<a href="#{prev}" class="btn btn-default original_plot_prev_btn" data-target="#{id}">« Previous</a> \n\
<a href="#{next}" class="btn btn-default original_plot_nxt_btn" data-target="#{id}">Next »</a> \n\
</div></div>'.format(id=config['id'], prev=original_plots[-1]['s_name'], next=original_plots[1]['s_name'])
else:
next_prev_buttons = ''
html += '<p class="text-muted instr">Click to show original FastQC plot.</p>\n\
<div id="fastqc_quals" class="hc-plot-wrapper"> \n\
<div class="showhide_orig" style="display:none;"> \n\
<h4><span class="s_name">{n}</span></h4> \n\
{b} <img data-toggle="tooltip" title="Click to return to overlay plot" class="original-plot" src="{f}"> \n\
</div>\n\
<div id="{id}" class="hc-plot"></div> \n\
</div>'.format(id=config['id'], b=next_prev_buttons, n=original_plots[0]['s_name'], f=original_plots[0]['img_path'])
orig_plots = 'var {id}_orig_plots = {d}; \n'.format(id=config['id'], d=json.dumps(original_plots))
config['orig_click_func'] = True # Javascript prints the click function
# Regular plots (no original images)
else:
html += '<div id="{id}" class="hc-plot"></div> \n'.format(id=config['id'])
orig_plots = ''
# Javascript with data dump
html += '<script type="text/javascript"> \n\
var {id}_datasets = {d}; \n\
{o} \
$(function () {{ plot_xy_line_graph("#{id}", {id}_datasets[0], {c}); }}); \n\
</script>'.format(id=config['id'], d=json.dumps(plotdata), c=json.dumps(config), o=orig_plots);
return html
def plot_bargraph (self, data, cats=None, config={}):
""" Plot a horizontal bar graph. Expects a 2D dict of sample
data. Also can take info about categories. There are quite a
few variants of how to use this function, see CONTRIBUTING.md
for documentation and examples.
:param data: 2D dict, first keys as sample names, then x:y data pairs
:param cats: optnal list, dict or OrderedDict with plot categories
:param config: optional dict with config key:value pairs
:return: HTML and JS, ready to be inserted into the page
"""
# Not given any cats - find them from the data
if cats is None:
cats = list(set(k for s in data.keys() for k in data[s].keys() ))
# Given a list of cats - turn it into a dict
if type(cats) is list:
newcats = OrderedDict()
for c in cats:
newcats[c] = {'name': c}
cats = newcats
# Parse the data into a HighCharts friendly format
hc_samples = sorted(list(data.keys()))
hc_data = list()
for c in cats.keys():
thisdata = list()
for s in hc_samples:
thisdata.append(data[s][c])
if max(thisdata) > 0:
thisdict = { 'name': cats[c]['name'], 'data': thisdata }
if 'color' in cats[c]:
thisdict['color'] = cats[c]['color']
hc_data.append(thisdict)
# Build the HTML
if config.get('id') is None:
config['id'] = 'mqc_hcplot_'+''.join(random.sample(letters, 10))
html = ''
# Counts / Percentages Switch
if config.get('cpswitch') is not False:
if config.get('cpswitch_c_active', True) is True:
c_active = 'active'
p_active = ''
else:
c_active = ''
p_active = 'active'
config['stacking'] = 'percent'
c_label = config.get('cpswitch_counts_label', 'Counts')
p_label = config.get('cpswitch_percent_label', 'Percentages')
html += '<div class="btn-group switch_group"> \n\
<button class="btn btn-default btn-sm {c_a}" data-action="set_numbers" data-target="#{id}">{c_l}</button> \n\
<button class="btn btn-default btn-sm {p_a}" data-action="set_percent" data-target="#{id}">{p_l}</button> \n\
</div>'.format(id=config['id'], c_a=c_active, p_a=p_active, c_l=c_label, p_l=p_label)
# Plot and javascript function
html += '<div id="{id}" class="hc-plot"></div> \n\
<script type="text/javascript"> \n\
$(function () {{ plot_stacked_bar_graph("#{id}", {s}, {d}, {c}); }}); \
</script>'.format(id=config['id'], s=json.dumps(hc_samples), d=json.dumps(hc_data), c=json.dumps(config));
return html
def write_csv_file(self, data, fn):
with io.open (os.path.join(config.output_dir, 'report_data', fn), "w", encoding='utf-8') as f:
print( self.dict_to_csv( data ), file=f)
def dict_to_csv (self, d, delim="\t"):
""" Converts a dict to a CSV string
:param d: 2D dictionary, first keys sample names and second key
column headers
:param delim: optional delimiter character. Default: \t
:return: Flattened string, suitable to write to a CSV file.
"""
h = None # We make a list of keys to ensure consistent order
l = list()
for sn in sorted(d.keys()):
if h is None:
h = list(d[sn].keys())
l.append(delim.join([''] + h))
thesefields = [sn] + [ str(d[sn].get(k, '')) for k in h ]
l.append( delim.join( thesefields ) )
return ('\n'.join(l)).encode('utf-8', 'ignore').decode('utf-8')
| mit |
jorik041/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
wzhfy/spark | examples/src/main/python/mllib/svd_example.py | 128 | 1773 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import SparkContext
# $example on$
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.linalg.distributed import RowMatrix
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonSVDExample")
# $example on$
rows = sc.parallelize([
Vectors.sparse(5, {1: 1.0, 3: 7.0}),
Vectors.dense(2.0, 0.0, 3.0, 4.0, 5.0),
Vectors.dense(4.0, 0.0, 0.0, 6.0, 7.0)
])
mat = RowMatrix(rows)
# Compute the top 5 singular values and corresponding singular vectors.
svd = mat.computeSVD(5, computeU=True)
U = svd.U # The U factor is a RowMatrix.
s = svd.s # The singular values are stored in a local dense vector.
V = svd.V # The V factor is a local dense matrix.
# $example off$
collected = U.rows.collect()
print("U factor is:")
for vector in collected:
print(vector)
print("Singular values are: %s" % s)
print("V factor is:\n%s" % V)
sc.stop()
| apache-2.0 |
shahbazn/neutron | neutron/tests/functional/agent/test_l3_agent.py | 8 | 67537 | # Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
import os.path
import time
import mock
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
import testtools
import webob
import webob.dec
import webob.exc
from neutron.agent.common import config as agent_config
from neutron.agent.common import ovs_lib
from neutron.agent.l3 import agent as neutron_l3_agent
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import namespace_manager
from neutron.agent.l3 import namespaces
from neutron.agent import l3_agent as l3_agent_main
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import config as common_config
from neutron.common import constants as l3_constants
from neutron.common import utils as common_utils
from neutron.tests.common import l3_test_common
from neutron.tests.common import machine_fixtures
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import helpers
from neutron.tests.functional import base
LOG = logging.getLogger(__name__)
_uuid = uuidutils.generate_uuid
METADATA_REQUEST_TIMEOUT = 60
METADATA_REQUEST_SLEEP = 5
def get_ovs_bridge(br_name):
return ovs_lib.OVSBridge(br_name)
class L3AgentTestFramework(base.BaseSudoTestCase):
def setUp(self):
super(L3AgentTestFramework, self).setUp()
self.mock_plugin_api = mock.patch(
'neutron.agent.l3.agent.L3PluginApi').start().return_value
mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()
self.agent = self._configure_agent('agent1')
def _get_config_opts(self):
config = cfg.ConfigOpts()
config.register_opts(common_config.core_opts)
config.register_opts(common_config.core_cli_opts)
logging.register_options(config)
agent_config.register_process_monitor_opts(config)
return config
def _configure_agent(self, host):
conf = self._get_config_opts()
l3_agent_main.register_opts(conf)
conf.set_override(
'interface_driver',
'neutron.agent.linux.interface.OVSInterfaceDriver')
br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
br_ex = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
conf.set_override('ovs_integration_bridge', br_int.br_name)
conf.set_override('external_network_bridge', br_ex.br_name)
temp_dir = self.get_new_temp_dir()
get_temp_file_path = functools.partial(self.get_temp_file_path,
root=temp_dir)
conf.set_override('state_path', temp_dir.path)
# NOTE(cbrandily): log_file or log_dir must be set otherwise
# metadata_proxy_watch_log has no effect
conf.set_override('log_file',
get_temp_file_path('log_file'))
conf.set_override('metadata_proxy_socket',
get_temp_file_path('metadata_proxy'))
conf.set_override('ha_confs_path',
get_temp_file_path('ha_confs'))
conf.set_override('external_pids',
get_temp_file_path('external/pids'))
conf.set_override('host', host)
agent = neutron_l3_agent.L3NATAgentWithStateReport(host, conf)
return agent
def generate_router_info(self, enable_ha, ip_version=4, extra_routes=True,
enable_fip=True, enable_snat=True,
dual_stack=False, v6_ext_gw_with_sub=True):
if ip_version == 6 and not dual_stack:
enable_snat = False
enable_fip = False
extra_routes = False
return l3_test_common.prepare_router_data(ip_version=ip_version,
enable_snat=enable_snat,
enable_floating_ip=enable_fip,
enable_ha=enable_ha,
extra_routes=extra_routes,
dual_stack=dual_stack,
v6_ext_gw_with_sub=(
v6_ext_gw_with_sub))
def manage_router(self, agent, router):
self.addCleanup(agent._safe_router_removed, router['id'])
agent._process_added_router(router)
return agent.router_info[router['id']]
def _delete_router(self, agent, router_id):
agent._router_removed(router_id)
def _add_fip(self, router, fip_address, fixed_address='10.0.0.2',
host=None):
fip = {'id': _uuid(),
'port_id': _uuid(),
'floating_ip_address': fip_address,
'fixed_ip_address': fixed_address,
'host': host}
router.router[l3_constants.FLOATINGIP_KEY].append(fip)
def _add_internal_interface_by_subnet(self, router, count=1,
ip_version=4,
ipv6_subnet_modes=None,
interface_id=None):
return l3_test_common.router_append_subnet(router, count,
ip_version, ipv6_subnet_modes, interface_id)
def _namespace_exists(self, namespace):
ip = ip_lib.IPWrapper(namespace=namespace)
return ip.netns.exists(namespace)
def _metadata_proxy_exists(self, conf, router):
pm = external_process.ProcessManager(
conf,
router.router_id,
router.ns_name)
return pm.active
def device_exists_with_ips_and_mac(self, expected_device, name_getter,
namespace):
ip_cidrs = common_utils.fixed_ip_cidrs(expected_device['fixed_ips'])
return ip_lib.device_exists_with_ips_and_mac(
name_getter(expected_device['id']), ip_cidrs,
expected_device['mac_address'], namespace)
@staticmethod
def _port_first_ip_cidr(port):
fixed_ip = port['fixed_ips'][0]
return common_utils.ip_to_cidr(fixed_ip['ip_address'],
fixed_ip['prefixlen'])
def get_device_mtu(self, target_device, name_getter, namespace):
device = ip_lib.IPDevice(name_getter(target_device), namespace)
return device.link.mtu
def get_expected_keepalive_configuration(self, router):
ha_device_name = router.get_ha_device_name()
external_port = router.get_ex_gw_port()
ex_port_ipv6 = ip_lib.get_ipv6_lladdr(external_port['mac_address'])
external_device_name = router.get_external_device_name(
external_port['id'])
external_device_cidr = self._port_first_ip_cidr(external_port)
internal_port = router.router[l3_constants.INTERFACE_KEY][0]
int_port_ipv6 = ip_lib.get_ipv6_lladdr(internal_port['mac_address'])
internal_device_name = router.get_internal_device_name(
internal_port['id'])
internal_device_cidr = self._port_first_ip_cidr(internal_port)
floating_ip_cidr = common_utils.ip_to_cidr(
router.get_floating_ips()[0]['floating_ip_address'])
default_gateway_ip = external_port['subnets'][0].get('gateway_ip')
extra_subnet_cidr = external_port['extra_subnets'][0].get('cidr')
return """vrrp_instance VR_1 {
state BACKUP
interface %(ha_device_name)s
virtual_router_id 1
priority 50
garp_master_repeat 5
garp_master_refresh 10
nopreempt
advert_int 2
track_interface {
%(ha_device_name)s
}
virtual_ipaddress {
169.254.0.1/24 dev %(ha_device_name)s
}
virtual_ipaddress_excluded {
%(floating_ip_cidr)s dev %(external_device_name)s
%(external_device_cidr)s dev %(external_device_name)s
%(internal_device_cidr)s dev %(internal_device_name)s
%(ex_port_ipv6)s dev %(external_device_name)s scope link
%(int_port_ipv6)s dev %(internal_device_name)s scope link
}
virtual_routes {
0.0.0.0/0 via %(default_gateway_ip)s dev %(external_device_name)s
8.8.8.0/24 via 19.4.4.4
%(extra_subnet_cidr)s dev %(external_device_name)s scope link
}
}""" % {
'ha_device_name': ha_device_name,
'external_device_name': external_device_name,
'external_device_cidr': external_device_cidr,
'internal_device_name': internal_device_name,
'internal_device_cidr': internal_device_cidr,
'floating_ip_cidr': floating_ip_cidr,
'default_gateway_ip': default_gateway_ip,
'int_port_ipv6': int_port_ipv6,
'ex_port_ipv6': ex_port_ipv6,
'extra_subnet_cidr': extra_subnet_cidr,
}
def _get_rule(self, iptables_manager, table, chain, predicate):
rules = iptables_manager.get_chain(table, chain)
result = next(rule for rule in rules if predicate(rule))
return result
def _assert_router_does_not_exist(self, router):
# If the namespace assertion succeeds
# then the devices and iptable rules have also been deleted,
# so there's no need to check that explicitly.
self.assertFalse(self._namespace_exists(router.ns_name))
utils.wait_until_true(
lambda: not self._metadata_proxy_exists(self.agent.conf, router))
def _assert_snat_chains(self, router):
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'snat'))
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'POSTROUTING'))
def _assert_floating_ip_chains(self, router):
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'float-snat'))
def _assert_metadata_chains(self, router):
metadata_port_filter = lambda rule: (
str(self.agent.conf.metadata_port) in rule.rule)
self.assertTrue(self._get_rule(router.iptables_manager,
'nat',
'PREROUTING',
metadata_port_filter))
self.assertTrue(self._get_rule(router.iptables_manager,
'filter',
'INPUT',
metadata_port_filter))
def _assert_internal_devices(self, router):
internal_devices = router.router[l3_constants.INTERFACE_KEY]
self.assertTrue(len(internal_devices))
for device in internal_devices:
self.assertTrue(self.device_exists_with_ips_and_mac(
device, router.get_internal_device_name, router.ns_name))
def _assert_extra_routes(self, router):
routes = ip_lib.get_routing_table(4, namespace=router.ns_name)
routes = [{'nexthop': route['nexthop'],
'destination': route['destination']} for route in routes]
for extra_route in router.router['routes']:
self.assertIn(extra_route, routes)
def _assert_onlink_subnet_routes(self, router, ip_versions):
routes = []
for ip_version in ip_versions:
_routes = ip_lib.get_routing_table(ip_version,
namespace=router.ns_name)
routes.extend(_routes)
routes = set(route['destination'] for route in routes)
extra_subnets = router.get_ex_gw_port()['extra_subnets']
for extra_subnet in (route['cidr'] for route in extra_subnets):
self.assertIn(extra_subnet, routes)
def _assert_interfaces_deleted_from_ovs(self):
def assert_ovs_bridge_empty(bridge_name):
bridge = ovs_lib.OVSBridge(bridge_name)
self.assertFalse(bridge.get_port_name_list())
assert_ovs_bridge_empty(self.agent.conf.ovs_integration_bridge)
assert_ovs_bridge_empty(self.agent.conf.external_network_bridge)
def floating_ips_configured(self, router):
floating_ips = router.router[l3_constants.FLOATINGIP_KEY]
external_port = router.get_ex_gw_port()
return len(floating_ips) and all(
ip_lib.device_exists_with_ips_and_mac(
router.get_external_device_name(external_port['id']),
['%s/32' % fip['floating_ip_address']],
external_port['mac_address'],
namespace=router.ns_name) for fip in floating_ips)
def fail_ha_router(self, router):
device_name = router.get_ha_device_name()
ha_device = ip_lib.IPDevice(device_name, router.ns_name)
ha_device.link.set_down()
class L3AgentTestCase(L3AgentTestFramework):
def test_keepalived_state_change_notification(self):
enqueue_mock = mock.patch.object(
self.agent, 'enqueue_state_change').start()
router_info = self.generate_router_info(enable_ha=True)
router = self.manage_router(self.agent, router_info)
utils.wait_until_true(lambda: router.ha_state == 'master')
self.fail_ha_router(router)
utils.wait_until_true(lambda: router.ha_state == 'backup')
utils.wait_until_true(lambda: enqueue_mock.call_count == 3)
calls = [args[0] for args in enqueue_mock.call_args_list]
self.assertEqual((router.router_id, 'backup'), calls[0])
self.assertEqual((router.router_id, 'master'), calls[1])
self.assertEqual((router.router_id, 'backup'), calls[2])
def _expected_rpc_report(self, expected):
calls = (args[0][1] for args in
self.agent.plugin_rpc.update_ha_routers_states.call_args_list)
# Get the last state reported for each router
actual_router_states = {}
for call in calls:
for router_id, state in six.iteritems(call):
actual_router_states[router_id] = state
return actual_router_states == expected
def test_keepalived_state_change_bulk_rpc(self):
router_info = self.generate_router_info(enable_ha=True)
router1 = self.manage_router(self.agent, router_info)
self.fail_ha_router(router1)
router_info = self.generate_router_info(enable_ha=True)
router2 = self.manage_router(self.agent, router_info)
utils.wait_until_true(lambda: router1.ha_state == 'backup')
utils.wait_until_true(lambda: router2.ha_state == 'master')
utils.wait_until_true(
lambda: self._expected_rpc_report(
{router1.router_id: 'standby', router2.router_id: 'active'}))
def test_agent_notifications_for_router_events(self):
"""Test notifications for router create, update, and delete.
Make sure that when the agent sends notifications of router events
for router create, update, and delete, that the correct handler is
called with the right resource, event, and router information.
"""
event_handler = mock.Mock()
registry.subscribe(event_handler,
resources.ROUTER, events.BEFORE_CREATE)
registry.subscribe(event_handler,
resources.ROUTER, events.AFTER_CREATE)
registry.subscribe(event_handler,
resources.ROUTER, events.BEFORE_UPDATE)
registry.subscribe(event_handler,
resources.ROUTER, events.AFTER_UPDATE)
registry.subscribe(event_handler,
resources.ROUTER, events.BEFORE_DELETE)
registry.subscribe(event_handler,
resources.ROUTER, events.AFTER_DELETE)
router_info = self.generate_router_info(enable_ha=False)
router = self.manage_router(self.agent, router_info)
self.agent._process_updated_router(router.router)
self._delete_router(self.agent, router.router_id)
expected_calls = [
mock.call('router', 'before_create', self.agent, router=router),
mock.call('router', 'after_create', self.agent, router=router),
mock.call('router', 'before_update', self.agent, router=router),
mock.call('router', 'after_update', self.agent, router=router),
mock.call('router', 'before_delete', self.agent, router=router),
mock.call('router', 'after_delete', self.agent, router=router)]
event_handler.assert_has_calls(expected_calls)
def test_legacy_router_lifecycle(self):
self._router_lifecycle(enable_ha=False, dual_stack=True)
def test_legacy_router_lifecycle_with_no_gateway_subnet(self):
self.agent.conf.set_override('ipv6_gateway',
'fe80::f816:3eff:fe2e:1')
self._router_lifecycle(enable_ha=False, dual_stack=True,
v6_ext_gw_with_sub=False)
def test_ha_router_lifecycle(self):
self._router_lifecycle(enable_ha=True)
def test_conntrack_disassociate_fip(self):
'''Test that conntrack immediately drops stateful connection
that uses floating IP once it's disassociated.
'''
router_info = self.generate_router_info(enable_ha=False)
router = self.manage_router(self.agent, router_info)
port = net_helpers.get_free_namespace_port(l3_constants.PROTO_NAME_TCP,
router.ns_name)
client_address = '19.4.4.3'
server_address = '35.4.0.4'
def clean_fips(router):
router.router[l3_constants.FLOATINGIP_KEY] = []
clean_fips(router)
self._add_fip(router, client_address, fixed_address=server_address)
router.process(self.agent)
router_ns = ip_lib.IPWrapper(namespace=router.ns_name)
netcat = net_helpers.NetcatTester(
router.ns_name, router.ns_name, client_address, port,
protocol=net_helpers.NetcatTester.TCP)
self.addCleanup(netcat.stop_processes)
def assert_num_of_conntrack_rules(n):
out = router_ns.netns.execute(["conntrack", "-L",
"--orig-src", client_address])
self.assertEqual(
n, len([line for line in out.strip().split('\n') if line]))
with self.assert_max_execution_time(100):
assert_num_of_conntrack_rules(0)
self.assertTrue(netcat.test_connectivity())
assert_num_of_conntrack_rules(1)
clean_fips(router)
router.process(self.agent)
assert_num_of_conntrack_rules(0)
with testtools.ExpectedException(RuntimeError):
netcat.test_connectivity()
def test_ipv6_ha_router_lifecycle(self):
self._router_lifecycle(enable_ha=True, ip_version=6)
def test_ipv6_ha_router_lifecycle_with_no_gw_subnet(self):
self.agent.conf.set_override('ipv6_gateway',
'fe80::f816:3eff:fe2e:1')
self._router_lifecycle(enable_ha=True, ip_version=6,
v6_ext_gw_with_sub=False)
def test_ipv6_ha_router_lifecycle_with_no_gw_subnet_for_router_advts(self):
# Verify that router gw interface is configured to receive Router
# Advts from upstream router when no external gateway is configured.
self._router_lifecycle(enable_ha=True, dual_stack=True,
v6_ext_gw_with_sub=False)
def test_keepalived_configuration(self):
router_info = self.generate_router_info(enable_ha=True)
router = self.manage_router(self.agent, router_info)
expected = self.get_expected_keepalive_configuration(router)
self.assertEqual(expected,
router.keepalived_manager.get_conf_on_disk())
# Add a new FIP and change the GW IP address
router.router = copy.deepcopy(router.router)
existing_fip = '19.4.4.2'
new_fip = '19.4.4.3'
self._add_fip(router, new_fip)
subnet_id = _uuid()
fixed_ips = [{'ip_address': '19.4.4.10',
'prefixlen': 24,
'subnet_id': subnet_id}]
subnets = [{'id': subnet_id,
'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.5'}]
router.router['gw_port']['subnets'] = subnets
router.router['gw_port']['fixed_ips'] = fixed_ips
router.process(self.agent)
# Get the updated configuration and assert that both FIPs are in,
# and that the GW IP address was updated.
new_config = router.keepalived_manager.config.get_config_str()
old_gw = '0.0.0.0/0 via 19.4.4.1'
new_gw = '0.0.0.0/0 via 19.4.4.5'
old_external_device_ip = '19.4.4.4'
new_external_device_ip = '19.4.4.10'
self.assertIn(existing_fip, new_config)
self.assertIn(new_fip, new_config)
self.assertNotIn(old_gw, new_config)
self.assertIn(new_gw, new_config)
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
self.assertNotIn('%s/24 dev %s' %
(old_external_device_ip, external_device_name),
new_config)
self.assertIn('%s/24 dev %s' %
(new_external_device_ip, external_device_name),
new_config)
def _test_periodic_sync_routers_task(self,
routers_to_keep,
routers_deleted,
routers_deleted_during_resync):
ns_names_to_retrieve = set()
deleted_routers_info = []
for r in routers_to_keep:
ri = self.manage_router(self.agent, r)
ns_names_to_retrieve.add(ri.ns_name)
for r in routers_deleted + routers_deleted_during_resync:
ri = self.manage_router(self.agent, r)
deleted_routers_info.append(ri)
ns_names_to_retrieve.add(ri.ns_name)
mocked_get_routers = self.mock_plugin_api.get_routers
mocked_get_routers.return_value = (routers_to_keep +
routers_deleted_during_resync)
# clear agent router_info as it will be after restart
self.agent.router_info = {}
# Synchonize the agent with the plug-in
with mock.patch.object(namespace_manager.NamespaceManager, 'list_all',
return_value=ns_names_to_retrieve):
self.agent.periodic_sync_routers_task(self.agent.context)
# Mock the plugin RPC API so a known external network id is returned
# when the router updates are processed by the agent
external_network_id = _uuid()
self.mock_plugin_api.get_external_network_id.return_value = (
external_network_id)
# Plug external_gateway_info in the routers that are not going to be
# deleted by the agent when it processes the updates. Otherwise,
# _process_router_if_compatible in the agent fails
for r in routers_to_keep:
r['external_gateway_info'] = {'network_id': external_network_id}
# while sync updates are still in the queue, higher priority
# router_deleted events may be added there as well
for r in routers_deleted_during_resync:
self.agent.router_deleted(self.agent.context, r['id'])
# make sure all events are processed
while not self.agent._queue._queue.empty():
self.agent._process_router_update()
for r in routers_to_keep:
self.assertIn(r['id'], self.agent.router_info)
self.assertTrue(self._namespace_exists(namespaces.NS_PREFIX +
r['id']))
for ri in deleted_routers_info:
self.assertNotIn(ri.router_id,
self.agent.router_info)
self._assert_router_does_not_exist(ri)
def test_periodic_sync_routers_task(self):
routers_to_keep = []
for i in range(2):
routers_to_keep.append(self.generate_router_info(False))
self._test_periodic_sync_routers_task(routers_to_keep,
routers_deleted=[],
routers_deleted_during_resync=[])
def test_periodic_sync_routers_task_routers_deleted_while_agent_down(self):
routers_to_keep = []
routers_deleted = []
for i in range(2):
routers_to_keep.append(self.generate_router_info(False))
for i in range(2):
routers_deleted.append(self.generate_router_info(False))
self._test_periodic_sync_routers_task(routers_to_keep,
routers_deleted,
routers_deleted_during_resync=[])
def test_periodic_sync_routers_task_routers_deleted_while_agent_sync(self):
routers_to_keep = []
routers_deleted_during_resync = []
for i in range(2):
routers_to_keep.append(self.generate_router_info(False))
for i in range(2):
routers_deleted_during_resync.append(
self.generate_router_info(False))
self._test_periodic_sync_routers_task(
routers_to_keep,
routers_deleted=[],
routers_deleted_during_resync=routers_deleted_during_resync)
def _router_lifecycle(self, enable_ha, ip_version=4,
dual_stack=False, v6_ext_gw_with_sub=True):
router_info = self.generate_router_info(enable_ha, ip_version,
dual_stack=dual_stack,
v6_ext_gw_with_sub=(
v6_ext_gw_with_sub))
router = self.manage_router(self.agent, router_info)
# Add multiple-IPv6-prefix internal router port
slaac = l3_constants.IPV6_SLAAC
slaac_mode = {'ra_mode': slaac, 'address_mode': slaac}
subnet_modes = [slaac_mode] * 2
self._add_internal_interface_by_subnet(router.router, count=2,
ip_version=6, ipv6_subnet_modes=subnet_modes)
router.process(self.agent)
if enable_ha:
port = router.get_ex_gw_port()
interface_name = router.get_external_device_name(port['id'])
self._assert_no_ip_addresses_on_interface(router.ns_name,
interface_name)
utils.wait_until_true(lambda: router.ha_state == 'master')
# Keepalived notifies of a state transition when it starts,
# not when it ends. Thus, we have to wait until keepalived finishes
# configuring everything. We verify this by waiting until the last
# device has an IP address.
device = router.router[l3_constants.INTERFACE_KEY][-1]
device_exists = functools.partial(
self.device_exists_with_ips_and_mac,
device,
router.get_internal_device_name,
router.ns_name)
utils.wait_until_true(device_exists)
self.assertTrue(self._namespace_exists(router.ns_name))
utils.wait_until_true(
lambda: self._metadata_proxy_exists(self.agent.conf, router))
self._assert_internal_devices(router)
self._assert_external_device(router)
if not (enable_ha and (ip_version == 6 or dual_stack)):
# Note(SridharG): enable the assert_gateway for IPv6 once
# keepalived on Ubuntu14.04 (i.e., check-neutron-dsvm-functional
# platform) is updated to 1.2.10 (or above).
# For more details: https://review.openstack.org/#/c/151284/
self._assert_gateway(router, v6_ext_gw_with_sub)
self.assertTrue(self.floating_ips_configured(router))
self._assert_snat_chains(router)
self._assert_floating_ip_chains(router)
self._assert_extra_routes(router)
ip_versions = [4, 6] if (ip_version == 6 or dual_stack) else [4]
self._assert_onlink_subnet_routes(router, ip_versions)
self._assert_metadata_chains(router)
# Verify router gateway interface is configured to receive Router Advts
# when IPv6 is enabled and no IPv6 gateway is configured.
if router.use_ipv6 and not v6_ext_gw_with_sub:
if not self.agent.conf.ipv6_gateway:
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
ip_wrapper = ip_lib.IPWrapper(namespace=router.ns_name)
ra_state = ip_wrapper.netns.execute(['sysctl', '-b',
'net.ipv6.conf.%s.accept_ra' % external_device_name])
self.assertEqual('2', ra_state)
if enable_ha:
self._assert_ha_device(router)
self.assertTrue(router.keepalived_manager.get_process().active)
self._delete_router(self.agent, router.router_id)
self._assert_interfaces_deleted_from_ovs()
self._assert_router_does_not_exist(router)
if enable_ha:
self.assertFalse(router.keepalived_manager.get_process().active)
def _assert_external_device(self, router):
external_port = router.get_ex_gw_port()
self.assertTrue(self.device_exists_with_ips_and_mac(
external_port, router.get_external_device_name,
router.ns_name))
def _assert_gateway(self, router, v6_ext_gw_with_sub=True):
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
external_device = ip_lib.IPDevice(external_device_name,
namespace=router.ns_name)
for subnet in external_port['subnets']:
self._gateway_check(subnet['gateway_ip'], external_device)
if not v6_ext_gw_with_sub:
self._gateway_check(self.agent.conf.ipv6_gateway,
external_device)
def _gateway_check(self, gateway_ip, external_device):
expected_gateway = gateway_ip
ip_vers = netaddr.IPAddress(expected_gateway).version
existing_gateway = (external_device.route.get_gateway(
ip_version=ip_vers).get('gateway'))
self.assertEqual(expected_gateway, existing_gateway)
def _assert_ha_device(self, router):
def ha_router_dev_name_getter(not_used):
return router.get_ha_device_name()
self.assertTrue(self.device_exists_with_ips_and_mac(
router.router[l3_constants.HA_INTERFACE_KEY],
ha_router_dev_name_getter, router.ns_name))
@classmethod
def _get_addresses_on_device(cls, namespace, interface):
return [address['cidr'] for address in
ip_lib.IPDevice(interface, namespace=namespace).addr.list()]
def _assert_no_ip_addresses_on_interface(self, namespace, interface):
self.assertEqual(
[], self._get_addresses_on_device(namespace, interface))
def test_ha_router_conf_on_restarted_agent(self):
router_info = self.generate_router_info(enable_ha=True)
router1 = self.manage_router(self.agent, router_info)
self._add_fip(router1, '192.168.111.12')
restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport(
self.agent.host, self.agent.conf)
self.manage_router(restarted_agent, router1.router)
utils.wait_until_true(lambda: self.floating_ips_configured(router1))
self.assertIn(
router1._get_primary_vip(),
self._get_addresses_on_device(
router1.ns_name,
router1.get_ha_device_name()))
def test_fip_connection_from_same_subnet(self):
'''Test connection to floatingip which is associated with
fixed_ip on the same subnet of the source fixed_ip.
In other words it confirms that return packets surely
go through the router.
'''
router_info = self.generate_router_info(enable_ha=False)
router = self.manage_router(self.agent, router_info)
router_ip_cidr = self._port_first_ip_cidr(router.internal_ports[0])
router_ip = router_ip_cidr.partition('/')[0]
br_int = get_ovs_bridge(self.agent.conf.ovs_integration_bridge)
src_machine, dst_machine = self.useFixture(
machine_fixtures.PeerMachines(
br_int,
net_helpers.increment_ip_cidr(router_ip_cidr),
router_ip)).machines
dst_fip = '19.4.4.10'
router.router[l3_constants.FLOATINGIP_KEY] = []
self._add_fip(router, dst_fip, fixed_address=dst_machine.ip)
router.process(self.agent)
protocol_port = net_helpers.get_free_namespace_port(
l3_constants.PROTO_NAME_TCP, dst_machine.namespace)
# client sends to fip
netcat = net_helpers.NetcatTester(
src_machine.namespace, dst_machine.namespace,
dst_fip, protocol_port,
protocol=net_helpers.NetcatTester.TCP)
self.addCleanup(netcat.stop_processes)
self.assertTrue(netcat.test_connectivity())
class L3HATestFramework(L3AgentTestFramework):
NESTED_NAMESPACE_SEPARATOR = '@'
def setUp(self):
super(L3HATestFramework, self).setUp()
self.failover_agent = self._configure_agent('agent2')
br_int_1 = get_ovs_bridge(self.agent.conf.ovs_integration_bridge)
br_int_2 = get_ovs_bridge(
self.failover_agent.conf.ovs_integration_bridge)
veth1, veth2 = self.useFixture(net_helpers.VethFixture()).ports
br_int_1.add_port(veth1.name)
br_int_2.add_port(veth2.name)
def test_ha_router_failover(self):
router_info = self.generate_router_info(enable_ha=True)
get_ns_name = mock.patch.object(
namespaces.RouterNamespace, '_get_ns_name').start()
get_ns_name.return_value = "%s%s%s" % (
namespaces.RouterNamespace._get_ns_name(router_info['id']),
self.NESTED_NAMESPACE_SEPARATOR, self.agent.host)
router1 = self.manage_router(self.agent, router_info)
router_info_2 = copy.deepcopy(router_info)
router_info_2[l3_constants.HA_INTERFACE_KEY] = (
l3_test_common.get_ha_interface(ip='169.254.192.2',
mac='22:22:22:22:22:22'))
get_ns_name.return_value = "%s%s%s" % (
namespaces.RouterNamespace._get_ns_name(router_info_2['id']),
self.NESTED_NAMESPACE_SEPARATOR, self.failover_agent.host)
router2 = self.manage_router(self.failover_agent, router_info_2)
utils.wait_until_true(lambda: router1.ha_state == 'master')
utils.wait_until_true(lambda: router2.ha_state == 'backup')
self.fail_ha_router(router1)
utils.wait_until_true(lambda: router2.ha_state == 'master')
utils.wait_until_true(lambda: router1.ha_state == 'backup')
def test_ha_router_ipv6_radvd_status(self):
router_info = self.generate_router_info(ip_version=6, enable_ha=True)
router1 = self.manage_router(self.agent, router_info)
utils.wait_until_true(lambda: router1.ha_state == 'master')
utils.wait_until_true(lambda: router1.radvd.enabled)
def _check_lla_status(router, expected):
internal_devices = router.router[l3_constants.INTERFACE_KEY]
for device in internal_devices:
lladdr = ip_lib.get_ipv6_lladdr(device['mac_address'])
exists = ip_lib.device_exists_with_ips_and_mac(
router.get_internal_device_name(device['id']), [lladdr],
device['mac_address'], router.ns_name)
self.assertEqual(expected, exists)
_check_lla_status(router1, True)
device_name = router1.get_ha_device_name()
ha_device = ip_lib.IPDevice(device_name, namespace=router1.ns_name)
ha_device.link.set_down()
utils.wait_until_true(lambda: router1.ha_state == 'backup')
utils.wait_until_true(lambda: not router1.radvd.enabled, timeout=10)
_check_lla_status(router1, False)
def test_ha_router_process_ipv6_subnets_to_existing_port(self):
router_info = self.generate_router_info(enable_ha=True, ip_version=6)
router = self.manage_router(self.agent, router_info)
def verify_ip_in_keepalived_config(router, iface):
config = router.keepalived_manager.config.get_config_str()
ip_cidrs = common_utils.fixed_ip_cidrs(iface['fixed_ips'])
for ip_addr in ip_cidrs:
self.assertIn(ip_addr, config)
interface_id = router.router[l3_constants.INTERFACE_KEY][0]['id']
slaac = l3_constants.IPV6_SLAAC
slaac_mode = {'ra_mode': slaac, 'address_mode': slaac}
# Add a second IPv6 subnet to the router internal interface.
self._add_internal_interface_by_subnet(router.router, count=1,
ip_version=6, ipv6_subnet_modes=[slaac_mode],
interface_id=interface_id)
router.process(self.agent)
utils.wait_until_true(lambda: router.ha_state == 'master')
# Verify that router internal interface is present and is configured
# with IP address from both the subnets.
internal_iface = router.router[l3_constants.INTERFACE_KEY][0]
self.assertEqual(2, len(internal_iface['fixed_ips']))
self._assert_internal_devices(router)
# Verify that keepalived config is properly updated.
verify_ip_in_keepalived_config(router, internal_iface)
# Remove one subnet from the router internal iface
interfaces = copy.deepcopy(router.router.get(
l3_constants.INTERFACE_KEY, []))
fixed_ips, subnets = [], []
fixed_ips.append(interfaces[0]['fixed_ips'][0])
subnets.append(interfaces[0]['subnets'][0])
interfaces[0].update({'fixed_ips': fixed_ips, 'subnets': subnets})
router.router[l3_constants.INTERFACE_KEY] = interfaces
router.process(self.agent)
# Verify that router internal interface has a single ipaddress
internal_iface = router.router[l3_constants.INTERFACE_KEY][0]
self.assertEqual(1, len(internal_iface['fixed_ips']))
self._assert_internal_devices(router)
# Verify that keepalived config is properly updated.
verify_ip_in_keepalived_config(router, internal_iface)
class MetadataFakeProxyHandler(object):
def __init__(self, status):
self.status = status
@webob.dec.wsgify()
def __call__(self, req):
return webob.Response(status=self.status)
class MetadataL3AgentTestCase(L3AgentTestFramework):
SOCKET_MODE = 0o644
def _create_metadata_fake_server(self, status):
server = utils.UnixDomainWSGIServer('metadata-fake-server')
self.addCleanup(server.stop)
# NOTE(cbrandily): TempDir fixture creates a folder with 0o700
# permissions but metadata_proxy_socket folder must be readable by all
# users
self.useFixture(
helpers.RecursivePermDirFixture(
os.path.dirname(self.agent.conf.metadata_proxy_socket), 0o555))
server.start(MetadataFakeProxyHandler(status),
self.agent.conf.metadata_proxy_socket,
workers=0, backlog=4096, mode=self.SOCKET_MODE)
def _query_metadata_proxy(self, machine):
url = 'http://%(host)s:%(port)s' % {'host': dhcp.METADATA_DEFAULT_IP,
'port': dhcp.METADATA_PORT}
cmd = 'curl', '--max-time', METADATA_REQUEST_TIMEOUT, '-D-', url
i = 0
CONNECTION_REFUSED_TIMEOUT = METADATA_REQUEST_TIMEOUT // 2
while i <= CONNECTION_REFUSED_TIMEOUT:
try:
raw_headers = machine.execute(cmd)
break
except RuntimeError as e:
if 'Connection refused' in str(e):
time.sleep(METADATA_REQUEST_SLEEP)
i += METADATA_REQUEST_SLEEP
else:
self.fail('metadata proxy unreachable '
'on %s before timeout' % url)
if i > CONNECTION_REFUSED_TIMEOUT:
self.fail('Timed out waiting metadata proxy to become available')
return raw_headers.splitlines()[0]
def test_access_to_metadata_proxy(self):
"""Test access to the l3-agent metadata proxy.
The test creates:
* A l3-agent metadata service:
* A router (which creates a metadata proxy in the router namespace),
* A fake metadata server
* A "client" namespace (simulating a vm) with a port on router
internal subnet.
The test queries from the "client" namespace the metadata proxy on
http://169.254.169.254 and asserts that the metadata proxy added
the X-Forwarded-For and X-Neutron-Router-Id headers to the request
and forwarded the http request to the fake metadata server and the
response to the "client" namespace.
"""
router_info = self.generate_router_info(enable_ha=False)
router = self.manage_router(self.agent, router_info)
self._create_metadata_fake_server(webob.exc.HTTPOk.code)
# Create and configure client namespace
router_ip_cidr = self._port_first_ip_cidr(router.internal_ports[0])
br_int = get_ovs_bridge(self.agent.conf.ovs_integration_bridge)
machine = self.useFixture(
machine_fixtures.FakeMachine(
br_int,
net_helpers.increment_ip_cidr(router_ip_cidr),
router_ip_cidr.partition('/')[0]))
# Query metadata proxy
firstline = self._query_metadata_proxy(machine)
# Check status code
self.assertIn(str(webob.exc.HTTPOk.code), firstline.split())
class UnprivilegedUserMetadataL3AgentTestCase(MetadataL3AgentTestCase):
"""Test metadata proxy with least privileged user.
The least privileged user has uid=65534 and is commonly named 'nobody' but
not always, that's why we use its uid.
"""
SOCKET_MODE = 0o664
def setUp(self):
super(UnprivilegedUserMetadataL3AgentTestCase, self).setUp()
self.agent.conf.set_override('metadata_proxy_user', '65534')
self.agent.conf.set_override('metadata_proxy_watch_log', False)
class UnprivilegedUserGroupMetadataL3AgentTestCase(MetadataL3AgentTestCase):
"""Test metadata proxy with least privileged user/group.
The least privileged user has uid=65534 and is commonly named 'nobody' but
not always, that's why we use its uid.
Its group has gid=65534 and is commonly named 'nobody' or 'nogroup', that's
why we use its gid.
"""
SOCKET_MODE = 0o666
def setUp(self):
super(UnprivilegedUserGroupMetadataL3AgentTestCase, self).setUp()
self.agent.conf.set_override('metadata_proxy_user', '65534')
self.agent.conf.set_override('metadata_proxy_group', '65534')
self.agent.conf.set_override('metadata_proxy_watch_log', False)
class TestDvrRouter(L3AgentTestFramework):
def test_dvr_router_lifecycle_without_ha_without_snat_with_fips(self):
self._dvr_router_lifecycle(enable_ha=False, enable_snat=False)
def test_dvr_router_lifecycle_without_ha_with_snat_with_fips(self):
self._dvr_router_lifecycle(enable_ha=False, enable_snat=True)
def _helper_create_dvr_router_fips_for_ext_network(
self, agent_mode, **dvr_router_kwargs):
self.agent.conf.agent_mode = agent_mode
router_info = self.generate_dvr_router_info(**dvr_router_kwargs)
self.mock_plugin_api.get_external_network_id.return_value = (
router_info['_floatingips'][0]['floating_network_id'])
router = self.manage_router(self.agent, router_info)
fip_ns = router.fip_ns.get_name()
return router, fip_ns
def _validate_fips_for_external_network(self, router, fip_ns):
self.assertTrue(self._namespace_exists(router.ns_name))
self.assertTrue(self._namespace_exists(fip_ns))
self._assert_dvr_floating_ips(router)
self._assert_snat_namespace_does_not_exist(router)
def test_dvr_router_fips_for_multiple_ext_networks(self):
agent_mode = 'dvr'
# Create the first router fip with external net1
dvr_router1_kwargs = {'ip_address': '19.4.4.3',
'subnet_cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1',
'gateway_mac': 'ca:fe:de:ab:cd:ef'}
router1, fip1_ns = (
self._helper_create_dvr_router_fips_for_ext_network(
agent_mode, **dvr_router1_kwargs))
# Validate the fip with external net1
self._validate_fips_for_external_network(router1, fip1_ns)
# Create the second router fip with external net2
dvr_router2_kwargs = {'ip_address': '19.4.5.3',
'subnet_cidr': '19.4.5.0/24',
'gateway_ip': '19.4.5.1',
'gateway_mac': 'ca:fe:de:ab:cd:fe'}
router2, fip2_ns = (
self._helper_create_dvr_router_fips_for_ext_network(
agent_mode, **dvr_router2_kwargs))
# Validate the fip with external net2
self._validate_fips_for_external_network(router2, fip2_ns)
def _dvr_router_lifecycle(self, enable_ha=False, enable_snat=False,
custom_mtu=2000):
'''Test dvr router lifecycle
:param enable_ha: sets the ha value for the router.
:param enable_snat: the value of enable_snat is used
to set the agent_mode.
'''
# The value of agent_mode can be dvr, dvr_snat, or legacy.
# Since by definition this is a dvr (distributed = true)
# only dvr and dvr_snat are applicable
self.agent.conf.agent_mode = 'dvr_snat' if enable_snat else 'dvr'
self.agent.conf.network_device_mtu = custom_mtu
# We get the router info particular to a dvr router
router_info = self.generate_dvr_router_info(
enable_ha, enable_snat)
# We need to mock the get_agent_gateway_port return value
# because the whole L3PluginApi is mocked and we need the port
# gateway_port information before the l3_agent will create it.
# The port returned needs to have the same information as
# router_info['gw_port']
self.mock_plugin_api.get_agent_gateway_port.return_value = router_info[
'gw_port']
# We also need to mock the get_external_network_id method to
# get the correct fip namespace.
self.mock_plugin_api.get_external_network_id.return_value = (
router_info['_floatingips'][0]['floating_network_id'])
# With all that set we can now ask the l3_agent to
# manage the router (create it, create namespaces,
# attach interfaces, etc...)
router = self.manage_router(self.agent, router_info)
self.assertTrue(self._namespace_exists(router.ns_name))
self.assertTrue(self._metadata_proxy_exists(self.agent.conf, router))
self._assert_internal_devices(router)
self._assert_dvr_external_device(router)
self._assert_dvr_gateway(router)
self._assert_dvr_floating_ips(router)
self._assert_snat_chains(router)
self._assert_floating_ip_chains(router)
self._assert_metadata_chains(router)
self._assert_extra_routes(router)
self._assert_rfp_fpr_mtu(router, custom_mtu)
self._delete_router(self.agent, router.router_id)
self._assert_interfaces_deleted_from_ovs()
self._assert_router_does_not_exist(router)
def generate_dvr_router_info(
self, enable_ha=False, enable_snat=False, **kwargs):
router = l3_test_common.prepare_router_data(
enable_snat=enable_snat,
enable_floating_ip=True,
enable_ha=enable_ha,
**kwargs)
internal_ports = router.get(l3_constants.INTERFACE_KEY, [])
router['distributed'] = True
router['gw_port_host'] = self.agent.conf.host
router['gw_port']['binding:host_id'] = self.agent.conf.host
floating_ip = router['_floatingips'][0]
floating_ip['floating_network_id'] = router['gw_port']['network_id']
floating_ip['host'] = self.agent.conf.host
floating_ip['port_id'] = internal_ports[0]['id']
floating_ip['status'] = 'ACTIVE'
self._add_snat_port_info_to_router(router, internal_ports)
# FIP has a dependency on external gateway. So we need to create
# the snat_port info and fip_agent_gw_port_info irrespective of
# the agent type the dvr supports. The namespace creation is
# dependent on the agent_type.
external_gw_port = router['gw_port']
self._add_fip_agent_gw_port_info_to_router(router, external_gw_port)
return router
def _add_fip_agent_gw_port_info_to_router(self, router, external_gw_port):
# Add fip agent gateway port information to the router_info
fip_gw_port_list = router.get(
l3_constants.FLOATINGIP_AGENT_INTF_KEY, [])
if not fip_gw_port_list and external_gw_port:
# Get values from external gateway port
fixed_ip = external_gw_port['fixed_ips'][0]
float_subnet = external_gw_port['subnets'][0]
port_ip = fixed_ip['ip_address']
# Pick an ip address which is not the same as port_ip
fip_gw_port_ip = str(netaddr.IPAddress(port_ip) + 5)
# Add floatingip agent gateway port info to router
prefixlen = netaddr.IPNetwork(float_subnet['cidr']).prefixlen
router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = [
{'subnets': [
{'cidr': float_subnet['cidr'],
'gateway_ip': float_subnet['gateway_ip'],
'id': fixed_ip['subnet_id']}],
'network_id': external_gw_port['network_id'],
'device_owner': 'network:floatingip_agent_gateway',
'mac_address': 'fa:16:3e:80:8d:89',
'binding:host_id': self.agent.conf.host,
'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'],
'ip_address': fip_gw_port_ip,
'prefixlen': prefixlen}],
'id': _uuid(),
'device_id': _uuid()}
]
def _add_snat_port_info_to_router(self, router, internal_ports):
# Add snat port information to the router
snat_port_list = router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
if not snat_port_list and internal_ports:
# Get values from internal port
port = internal_ports[0]
fixed_ip = port['fixed_ips'][0]
snat_subnet = port['subnets'][0]
port_ip = fixed_ip['ip_address']
# Pick an ip address which is not the same as port_ip
snat_ip = str(netaddr.IPAddress(port_ip) + 5)
# Add the info to router as the first snat port
# in the list of snat ports
prefixlen = netaddr.IPNetwork(snat_subnet['cidr']).prefixlen
router[l3_constants.SNAT_ROUTER_INTF_KEY] = [
{'subnets': [
{'cidr': snat_subnet['cidr'],
'gateway_ip': snat_subnet['gateway_ip'],
'id': fixed_ip['subnet_id']}],
'network_id': port['network_id'],
'device_owner': 'network:router_centralized_snat',
'mac_address': 'fa:16:3e:80:8d:89',
'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'],
'ip_address': snat_ip,
'prefixlen': prefixlen}],
'id': _uuid(),
'device_id': _uuid()}
]
def _assert_dvr_external_device(self, router):
external_port = router.get_ex_gw_port()
snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
# if the agent is in dvr_snat mode, then we have to check
# that the correct ports and ip addresses exist in the
# snat_ns_name namespace
if self.agent.conf.agent_mode == 'dvr_snat':
self.assertTrue(self.device_exists_with_ips_and_mac(
external_port, router.get_external_device_name,
snat_ns_name))
# if the agent is in dvr mode then the snat_ns_name namespace
# should not be present at all:
elif self.agent.conf.agent_mode == 'dvr':
self.assertFalse(
self._namespace_exists(snat_ns_name),
"namespace %s was found but agent is in dvr mode not dvr_snat"
% (str(snat_ns_name))
)
# if the agent is anything else the test is misconfigured
# we force a test failure with message
else:
self.assertTrue(False, " agent not configured for dvr or dvr_snat")
def _assert_dvr_gateway(self, router):
gateway_expected_in_snat_namespace = (
self.agent.conf.agent_mode == 'dvr_snat'
)
if gateway_expected_in_snat_namespace:
self._assert_dvr_snat_gateway(router)
self._assert_removal_of_already_deleted_gateway_device(router)
snat_namespace_should_not_exist = (
self.agent.conf.agent_mode == 'dvr'
)
if snat_namespace_should_not_exist:
self._assert_snat_namespace_does_not_exist(router)
def _assert_dvr_snat_gateway(self, router):
namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
external_device = ip_lib.IPDevice(external_device_name,
namespace=namespace)
existing_gateway = (
external_device.route.get_gateway().get('gateway'))
expected_gateway = external_port['subnets'][0]['gateway_ip']
self.assertEqual(expected_gateway, existing_gateway)
def _assert_removal_of_already_deleted_gateway_device(self, router):
namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
device = ip_lib.IPDevice("fakedevice",
namespace=namespace)
# Assert that no exception is thrown for this case
self.assertIsNone(router._delete_gateway_device_if_exists(
device, "192.168.0.1", 0))
def _assert_snat_namespace_does_not_exist(self, router):
namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
self.assertFalse(self._namespace_exists(namespace))
def _assert_dvr_floating_ips(self, router):
# in the fip namespace:
# Check that the fg-<port-id> (floatingip_agent_gateway)
# is created with the ip address of the external gateway port
floating_ips = router.router[l3_constants.FLOATINGIP_KEY]
self.assertTrue(floating_ips)
# We need to fetch the floatingip agent gateway port info
# from the router_info
floating_agent_gw_port = (
router.router[l3_constants.FLOATINGIP_AGENT_INTF_KEY])
self.assertTrue(floating_agent_gw_port)
external_gw_port = floating_agent_gw_port[0]
fip_ns = self.agent.get_fip_ns(floating_ips[0]['floating_network_id'])
fip_ns_name = fip_ns.get_name()
fg_port_created_successfully = ip_lib.device_exists_with_ips_and_mac(
fip_ns.get_ext_device_name(external_gw_port['id']),
[self._port_first_ip_cidr(external_gw_port)],
external_gw_port['mac_address'],
namespace=fip_ns_name)
self.assertTrue(fg_port_created_successfully)
# Check fpr-router device has been created
device_name = fip_ns.get_int_device_name(router.router_id)
fpr_router_device_created_successfully = ip_lib.device_exists(
device_name, namespace=fip_ns_name)
self.assertTrue(fpr_router_device_created_successfully)
# In the router namespace
# Check rfp-<router-id> is created correctly
for fip in floating_ips:
device_name = fip_ns.get_rtr_ext_device_name(router.router_id)
self.assertTrue(ip_lib.device_exists(
device_name, namespace=router.ns_name))
def test_dvr_router_rem_fips_on_restarted_agent(self):
self.agent.conf.agent_mode = 'dvr_snat'
router_info = self.generate_dvr_router_info()
router1 = self.manage_router(self.agent, router_info)
fip_ns = router1.fip_ns.get_name()
self.assertTrue(self._namespace_exists(fip_ns))
restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport(
self.agent.host, self.agent.conf)
router1.router[l3_constants.FLOATINGIP_KEY] = []
self.manage_router(restarted_agent, router1.router)
self._assert_dvr_snat_gateway(router1)
self.assertFalse(self._namespace_exists(fip_ns))
def test_dvr_router_add_fips_on_restarted_agent(self):
self.agent.conf.agent_mode = 'dvr'
router_info = self.generate_dvr_router_info()
router = self.manage_router(self.agent, router_info)
floating_ips = router.router[l3_constants.FLOATINGIP_KEY]
router_ns = router.ns_name
fip_rule_prio_1 = self._get_fixed_ip_rule_priority(
router_ns, floating_ips[0]['fixed_ip_address'])
restarted_agent = neutron_l3_agent.L3NATAgent(
self.agent.host, self.agent.conf)
floating_ips[0]['floating_ip_address'] = '21.4.4.2'
floating_ips[0]['fixed_ip_address'] = '10.0.0.2'
self.manage_router(restarted_agent, router_info)
fip_rule_prio_2 = self._get_fixed_ip_rule_priority(
router_ns, floating_ips[0]['fixed_ip_address'])
self.assertNotEqual(fip_rule_prio_1, fip_rule_prio_2)
def _get_fixed_ip_rule_priority(self, namespace, fip):
iprule = ip_lib.IPRule(namespace)
lines = iprule.rule._as_root([4], ['show']).splitlines()
for line in lines:
if fip in line:
info = iprule.rule._parse_line(4, line)
return info['priority']
def test_dvr_router_add_internal_network_set_arp_cache(self):
# Check that, when the router is set up and there are
# existing ports on the the uplinked subnet, the ARP
# cache is properly populated.
self.agent.conf.agent_mode = 'dvr_snat'
router_info = l3_test_common.prepare_router_data()
router_info['distributed'] = True
expected_neighbor = '35.4.1.10'
port_data = {
'fixed_ips': [{'ip_address': expected_neighbor}],
'mac_address': 'fa:3e:aa:bb:cc:dd',
'device_owner': 'compute:None'
}
self.agent.plugin_rpc.get_ports_by_subnet.return_value = [port_data]
router1 = self.manage_router(self.agent, router_info)
internal_device = router1.get_internal_device_name(
router_info['_interfaces'][0]['id'])
neighbors = ip_lib.IPDevice(internal_device, router1.ns_name).neigh
self.assertEqual(expected_neighbor,
neighbors.show(ip_version=4).split()[0])
def _assert_rfp_fpr_mtu(self, router, expected_mtu=1500):
dev_mtu = self.get_device_mtu(
router.router_id, router.fip_ns.get_rtr_ext_device_name,
router.ns_name)
self.assertEqual(expected_mtu, dev_mtu)
dev_mtu = self.get_device_mtu(
router.router_id, router.fip_ns.get_int_device_name,
router.fip_ns.get_name())
self.assertEqual(expected_mtu, dev_mtu)
def test_dvr_router_fip_agent_mismatch(self):
"""Test to validate the floatingip agent mismatch.
This test validates the condition where floatingip agent
gateway port host mismatches with the agent and so the
binding will not be there.
"""
self.agent.conf.agent_mode = 'dvr'
router_info = self.generate_dvr_router_info()
floating_ip = router_info['_floatingips'][0]
floating_ip['host'] = 'my_new_host'
# In this case the floatingip binding is different and so it
# should not create the floatingip namespace on the given agent.
# This is also like there is no current binding.
router1 = self.manage_router(self.agent, router_info)
fip_ns = router1.fip_ns.get_name()
self.assertTrue(self._namespace_exists(router1.ns_name))
self.assertFalse(self._namespace_exists(fip_ns))
self._assert_snat_namespace_does_not_exist(router1)
def test_dvr_router_fip_late_binding(self):
"""Test to validate the floatingip migration or latebinding.
This test validates the condition where floatingip private
port changes while migration or when the private port host
binding is done later after floatingip association.
"""
self.agent.conf.agent_mode = 'dvr'
router_info = self.generate_dvr_router_info()
fip_agent_gw_port = router_info[l3_constants.FLOATINGIP_AGENT_INTF_KEY]
# Now let us not pass the FLOATINGIP_AGENT_INTF_KEY, to emulate
# that the server did not create the port, since there was no valid
# host binding.
router_info[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = []
self.mock_plugin_api.get_agent_gateway_port.return_value = (
fip_agent_gw_port[0])
router1 = self.manage_router(self.agent, router_info)
fip_ns = router1.fip_ns.get_name()
self.assertTrue(self._namespace_exists(router1.ns_name))
self.assertTrue(self._namespace_exists(fip_ns))
self._assert_snat_namespace_does_not_exist(router1)
def _assert_snat_namespace_exists(self, router):
namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
self.assertTrue(self._namespace_exists(namespace))
def _get_dvr_snat_namespace_device_status(
self, router, internal_dev_name=None):
"""Function returns the internal and external device status."""
snat_ns = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
qg_device_created_successfully = ip_lib.device_exists(
external_device_name, namespace=snat_ns)
sg_device_created_successfully = ip_lib.device_exists(
internal_dev_name, namespace=snat_ns)
return qg_device_created_successfully, sg_device_created_successfully
def test_dvr_router_snat_namespace_with_interface_remove(self):
"""Test to validate the snat namespace with interface remove.
This test validates the snat namespace for all the external
and internal devices. It also validates if the internal
device corresponding to the router interface is removed
when the router interface is deleted.
"""
self.agent.conf.agent_mode = 'dvr_snat'
router_info = self.generate_dvr_router_info()
snat_internal_port = router_info[l3_constants.SNAT_ROUTER_INTF_KEY]
router1 = self.manage_router(self.agent, router_info)
csnat_internal_port = (
router1.router[l3_constants.SNAT_ROUTER_INTF_KEY])
# Now save the internal device name to verify later
internal_device_name = router1._get_snat_int_device_name(
csnat_internal_port[0]['id'])
self._assert_snat_namespace_exists(router1)
qg_device, sg_device = self._get_dvr_snat_namespace_device_status(
router1, internal_dev_name=internal_device_name)
self.assertTrue(qg_device)
self.assertTrue(sg_device)
self.assertEqual(router1.snat_ports, snat_internal_port)
# Now let us not pass INTERFACE_KEY, to emulate
# the interface has been removed.
router1.router[l3_constants.INTERFACE_KEY] = []
# Now let us not pass the SNAT_ROUTER_INTF_KEY, to emulate
# that the server did not send it, since the interface has been
# removed.
router1.router[l3_constants.SNAT_ROUTER_INTF_KEY] = []
self.agent._process_updated_router(router1.router)
router_updated = self.agent.router_info[router_info['id']]
self._assert_snat_namespace_exists(router_updated)
qg_device, sg_device = self._get_dvr_snat_namespace_device_status(
router_updated, internal_dev_name=internal_device_name)
self.assertFalse(sg_device)
self.assertTrue(qg_device)
def test_dvr_router_calls_delete_agent_gateway_if_last_fip(self):
"""Test to validate delete fip if it is last fip managed by agent."""
self.agent.conf.agent_mode = 'dvr_snat'
router_info = self.generate_dvr_router_info(enable_snat=True)
router1 = self.manage_router(self.agent, router_info)
floating_agent_gw_port = (
router1.router[l3_constants.FLOATINGIP_AGENT_INTF_KEY])
self.assertTrue(floating_agent_gw_port)
fip_ns = router1.fip_ns.get_name()
router1.fip_ns.agent_gw_port = floating_agent_gw_port
self.assertTrue(self._namespace_exists(router1.ns_name))
self.assertTrue(self._namespace_exists(fip_ns))
self._assert_dvr_floating_ips(router1)
self._assert_dvr_snat_gateway(router1)
router1.router[l3_constants.FLOATINGIP_KEY] = []
rpc_mock = mock.patch.object(
self.agent.plugin_rpc, 'delete_agent_gateway_port').start()
self.agent._process_updated_router(router1.router)
self.assertTrue(rpc_mock.called)
rpc_mock.assert_called_once_with(
self.agent.context,
floating_agent_gw_port[0]['network_id'])
self.assertFalse(self._namespace_exists(fip_ns))
| apache-2.0 |
ESS-LLP/erpnext-healthcare | erpnext/patches/v6_27/fix_recurring_order_status.py | 54 | 1936 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
for doc in (
{
"doctype": "Sales Order",
"stock_doctype": "Delivery Note",
"invoice_doctype": "Sales Invoice",
"stock_doctype_ref_field": "against_sales_order",
"invoice_ref_field": "sales_order",
"qty_field": "delivered_qty"
},
{
"doctype": "Purchase Order",
"stock_doctype": "Purchase Receipt",
"invoice_doctype": "Purchase Invoice",
"stock_doctype_ref_field": "prevdoc_docname",
"invoice_ref_field": "purchase_order",
"qty_field": "received_qty"
}):
order_list = frappe.db.sql("""select name from `tab{0}`
where docstatus=1 and is_recurring=1
and ifnull(recurring_id, '') != name and creation >= '2016-01-25'"""
.format(doc["doctype"]), as_dict=1)
for order in order_list:
frappe.db.sql("""update `tab{0} Item`
set {1}=0, billed_amt=0 where parent=%s""".format(doc["doctype"],
doc["qty_field"]), order.name)
# Check against Delivery Note and Purchase Receipt
stock_doc_list = frappe.db.sql("""select distinct parent from `tab{0} Item`
where docstatus=1 and ifnull({1}, '')=%s"""
.format(doc["stock_doctype"], doc["stock_doctype_ref_field"]), order.name)
if stock_doc_list:
for dn in stock_doc_list:
frappe.get_doc(doc["stock_doctype"], dn[0]).update_qty(update_modified=False)
# Check against Invoice
invoice_list = frappe.db.sql("""select distinct parent from `tab{0} Item`
where docstatus=1 and ifnull({1}, '')=%s"""
.format(doc["invoice_doctype"], doc["invoice_ref_field"]), order.name)
if invoice_list:
for dn in invoice_list:
frappe.get_doc(doc["invoice_doctype"], dn[0]).update_qty(update_modified=False)
frappe.get_doc(doc["doctype"], order.name).set_status(update=True, update_modified=False) | gpl-3.0 |
mashaoze/esp-idf | tools/tiny-test-fw/CIAssignUnitTest.py | 2 | 5360 | """
Command line tool to assign unit tests to CI test jobs.
"""
import re
import os
import sys
import argparse
import yaml
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path:
sys.path.insert(0, test_fw_path)
from Utility import CIAssignTest
class Group(CIAssignTest.Group):
SORT_KEYS = ["config", "SDK", "test environment", "multi_device", "multi_stage", "tags"]
MAX_CASE = 30
ATTR_CONVERT_TABLE = {
"execution_time": "execution time"
}
# when IDF support multiple chips, SDK will be moved into tags, we can remove it
CI_JOB_MATCH_KEYS = ["test environment", "SDK"]
def __init__(self, case):
super(Group, self).__init__(case)
for tag in self._get_case_attr(case, "tags"):
self.ci_job_match_keys.add(tag)
@staticmethod
def _get_case_attr(case, attr):
if attr in Group.ATTR_CONVERT_TABLE:
attr = Group.ATTR_CONVERT_TABLE[attr]
return case[attr]
def _create_extra_data(self, test_function):
"""
For unit test case, we need to copy some attributes of test cases into config file.
So unit test function knows how to run the case.
"""
case_data = []
for case in self.case_list:
one_case_data = {
"config": self._get_case_attr(case, "config"),
"name": self._get_case_attr(case, "summary"),
"reset": self._get_case_attr(case, "reset"),
"timeout": self._get_case_attr(case, "timeout"),
}
if test_function in ["run_multiple_devices_cases", "run_multiple_stage_cases"]:
try:
one_case_data["child case num"] = self._get_case_attr(case, "child case num")
except KeyError as e:
print("multiple devices/stages cases must contains at least two test functions")
print("case name: {}".format(one_case_data["name"]))
raise e
case_data.append(one_case_data)
return case_data
def _map_test_function(self):
"""
determine which test function to use according to current test case
:return: test function name to use
"""
if self.filters["multi_device"] == "Yes":
test_function = "run_multiple_devices_cases"
elif self.filters["multi_stage"] == "Yes":
test_function = "run_multiple_stage_cases"
else:
test_function = "run_unit_test_cases"
return test_function
def output(self):
"""
output data for job configs
:return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
"""
test_function = self._map_test_function()
output_data = {
# we don't need filter for test function, as UT uses a few test functions for all cases
"CaseConfig": [
{
"name": test_function,
"extra_data": self._create_extra_data(test_function),
}
]
}
return output_data
class UnitTestAssignTest(CIAssignTest.AssignTest):
CI_TEST_JOB_PATTERN = re.compile(r"^UT_.+")
def __init__(self, test_case_path, ci_config_file):
CIAssignTest.AssignTest.__init__(self, test_case_path, ci_config_file, case_group=Group)
def _search_cases(self, test_case_path, case_filter=None):
"""
For unit test case, we don't search for test functions.
The unit test cases is stored in a yaml file which is created in job build-idf-test.
"""
with open(test_case_path, "r") as f:
raw_data = yaml.load(f)
test_cases = raw_data["test cases"]
# filter keys are lower case. Do map lower case keys with original keys.
try:
key_mapping = {x.lower(): x for x in test_cases[0].keys()}
except IndexError:
key_mapping = dict()
if case_filter:
for key in case_filter:
filtered_cases = []
for case in test_cases:
try:
mapped_key = key_mapping[key]
# bot converts string to lower case
if isinstance(case[mapped_key], str):
_value = case[mapped_key].lower()
else:
_value = case[mapped_key]
if _value in case_filter[key]:
filtered_cases.append(case)
except KeyError:
# case don't have this key, regard as filter success
filtered_cases.append(case)
test_cases = filtered_cases
return test_cases
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("test_case",
help="test case folder or file")
parser.add_argument("ci_config_file",
help="gitlab ci config file")
parser.add_argument("output_path",
help="output path of config files")
args = parser.parse_args()
assign_test = UnitTestAssignTest(args.test_case, args.ci_config_file)
assign_test.assign_cases()
assign_test.output_configs(args.output_path)
| apache-2.0 |
mdibaiee/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_handshake_sig_wsh.py | 499 | 1859 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrong web_socket_do_extra_handshake signature.
"""
def no_web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
request.connection.write(
'sub/wrong_handshake_sig_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
| mpl-2.0 |
nathanial/lettuce | tests/integration/lib/Django-1.2.5/tests/regressiontests/formwizard/tests.py | 104 | 1813 | import re
from django import forms
from django.test import TestCase
class FormWizardWithNullBooleanField(TestCase):
urls = 'regressiontests.formwizard.urls'
input_re = re.compile('name="([^"]+)" value="([^"]+)"')
wizard_url = '/wiz/'
wizard_step_data = (
{
'0-name': 'Pony',
'0-thirsty': '2',
},
{
'1-address1': '123 Main St',
'1-address2': 'Djangoland',
},
{
'2-random_crap': 'blah blah',
}
)
def grabFieldData(self, response):
"""
Pull the appropriate field data from the context to pass to the next wizard step
"""
previous_fields = response.context['previous_fields']
fields = {'wizard_step': response.context['step0']}
def grab(m):
fields[m.group(1)] = m.group(2)
return ''
self.input_re.sub(grab, previous_fields)
return fields
def checkWizardStep(self, response, step_no):
"""
Helper function to test each step of the wizard
- Make sure the call succeeded
- Make sure response is the proper step number
- return the result from the post for the next step
"""
step_count = len(self.wizard_step_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Step %d of %d' % (step_no, step_count))
data = self.grabFieldData(response)
data.update(self.wizard_step_data[step_no - 1])
return self.client.post(self.wizard_url, data)
def testWizard(self):
response = self.client.get(self.wizard_url)
for step_no in range(1, len(self.wizard_step_data) + 1):
response = self.checkWizardStep(response, step_no)
| gpl-3.0 |
Aloomaio/googleads-python-lib | examples/ad_manager/v201811/audience_segment_service/update_audience_segments.py | 1 | 2570 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates rule based first party audience segments.
To determine which audience segments exist, run get_all_audience_segments.py.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
AUDIENCE_SEGMENT_ID = 'INSERT_AUDIENCE_SEGMENT_ID_HERE'
def main(client, audience_segment_id):
# Initialize appropriate service.
audience_segment_service = client.GetService(
'AudienceSegmentService', version='v201811')
# Create statement object to get the specified first party audience segment.
statement = (ad_manager.StatementBuilder(version='v201811')
.Where('Type = :type AND Id = :audience_segment_id')
.WithBindVariable('audience_segment_id',
long(audience_segment_id))
.WithBindVariable('type', 'FIRST_PARTY')
.Limit(1))
# Get audience segments by statement.
response = audience_segment_service.getAudienceSegmentsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
updated_audience_segments = []
for audience_segment in response['results']:
print ('Audience segment with id "%s" and name "%s" will be updated.'
% (audience_segment['id'], audience_segment['name']))
audience_segment['membershipExpirationDays'] = '180'
updated_audience_segments.append(audience_segment)
audience_segments = audience_segment_service.updateAudienceSegments(
updated_audience_segments)
for audience_segment in audience_segments:
print ('Audience segment with id "%s" and name "%s" was updated' %
(audience_segment['id'], audience_segment['name']))
else:
print 'No audience segment found to update.'
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, AUDIENCE_SEGMENT_ID)
| apache-2.0 |
npuichigo/ttsflow | third_party/tensorflow/tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py | 77 | 4949 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state updating ops that may have benign race conditions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AssignOpTest(test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
with self.test_session() as sess:
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(array_ops.zeros([1024, 1024]))
adds = [
state_ops.assign_add(
p, ones_t, use_locking=False) for _ in range(20)
]
variables.global_variables_initializer().run()
def run_add(add_op):
sess.run(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all())
def testParallelAssignWithoutLocking(self):
with self.test_session() as sess:
ones_t = array_ops.fill([1024, 1024], float(1))
p = variables.Variable(array_ops.zeros([1024, 1024]))
assigns = [
state_ops.assign(p, math_ops.multiply(ones_t, float(i)), False)
for i in range(1, 21)
]
variables.global_variables_initializer().run()
def run_assign(assign_op):
sess.run(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
# Assert every element is taken from one of the assignments.
self.assertTrue((vals > 0).all())
self.assertTrue((vals <= 20).all())
# NOTE(skyewm): We exclude these tests from the TSAN TAP target, because they
# contain non-benign but known data races between the variable assignment and
# returning the output tensors. This issue will be resolved with the new
# resource variables.
def testParallelUpdateWithLocking(self):
with self.test_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
adds = [
state_ops.assign_add(
p, ones_t, use_locking=True) for _ in range(20)
]
p.initializer.run()
def run_add(add_op):
sess.run(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertAllEqual(vals, ones * 20)
def testParallelAssignWithLocking(self):
with self.test_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
assigns = [
state_ops.assign(
p, math_ops.multiply(ones_t, float(i)), use_locking=True)
for i in range(1, 21)
]
p.initializer.run()
def run_assign(assign_op):
sess.run(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
# Assert every element is the same, and taken from one of the assignments.
self.assertTrue(vals[0, 0] > 0)
self.assertTrue(vals[0, 0] <= 20)
self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0])
if __name__ == "__main__":
test.main()
| apache-2.0 |
greut/invenio-kwalitee | tests/test_hooks.py | 3 | 9073 | # -*- coding: utf-8 -*-
#
# This file is part of kwalitee
# Copyright (C) 2014 CERN.
#
# kwalitee is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# kwalitee is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kwalitee; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import unicode_literals
import os
import sys
import pytest
import shutil
import tempfile
import subprocess
from io import StringIO
from unittest import TestCase
from mock import patch, mock_open, MagicMock
from hamcrest import (assert_that, equal_to, has_length, has_item, has_items,
is_not, contains_string)
from kwalitee.hooks import (_get_component, _get_components,
_get_git_author, _get_files_modified,
_pre_commit, _prepare_commit_msg,
post_commit_hook, pre_commit_hook)
class GetComponentTest(TestCase):
"""Testing _get_component and _get_components."""
def test_get_component(self):
files = {
"invenio/base/factory.py": "base",
"invenio/modules/oauthclient/utils.py": "oauthclient",
"zenodo/modules/deposit/__init__.py": "deposit",
"invenio/legacy/bibupload/engine.py": "bibupload",
"invenio/ext/sqlalchemy/__init__.py": "sqlalchemy",
"docs/index.rst": "docs",
"grunt/app.js": "grunt",
"setup.py": "global",
}
for filename, expected in files.items():
assert_that(_get_component(filename), equal_to(expected), filename)
def test_get_components(self):
components = _get_components(("setup.py", "test.py", "grunt/app.js"))
assert_that(components, has_length(2))
assert_that(components, has_items("global", "grunt"))
class PreCommitTest(TestCase):
"""Testing the pre-commit actions."""
options = {"ignore": ("D100",)}
def test_pre_commit(self):
errors = _pre_commit((("__init__.py", b""),
("a/b/c/d/e/f/g/test.py", b""),
("i/j/k/error.py", b"import os")),
self.options)
assert_that(
errors,
has_items(
"i/j/k/error.py: 1: L101 copyright is missing",
"i/j/k/error.py: 1:1: F401 'os' imported but unused"))
class PrepareCommitMsgTest(TestCase):
"""Testing the preparation of commit message."""
def _mock_open(self, data=""):
"""Creates a mock for a file with the given data and returns the mock
and the file handler.
NB: everytime the file is reopened, it's trucated. To be used in
read or read then write operations.
"""
mock = mock_open()
filehandler = StringIO(data)
def _open():
if filehandler.tell() > 0:
filehandler.truncate(0)
filehandler.seek(0)
return filehandler
mock.return_value = MagicMock(spec=StringIO)
mock.return_value.__enter__.side_effect = _open
return mock, filehandler
def test_prepare_commit_msg(self):
commit_msg = "# this is a comment"
mock, tmp_file = self._mock_open(commit_msg)
with patch("kwalitee.hooks.open", mock, create=True):
_prepare_commit_msg("mock", "John",
template="{component}: {author}")
tmp_file.seek(0)
new_commit_msg = "\n".join(tmp_file.readlines())
assert_that(new_commit_msg, equal_to("unknown: John"))
def test_prepare_commit_msg_with_one_component(self):
commit_msg = "# this is a comment"
mock, tmp_file = self._mock_open(commit_msg)
with patch("kwalitee.hooks.open", mock, create=True):
_prepare_commit_msg("mock", "John",
("setup.py", "test.py"),
"{component}")
tmp_file.seek(0)
new_commit_msg = "\n".join(tmp_file.readlines())
assert_that(new_commit_msg, equal_to("global"))
def test_prepare_commit_msg_with_many_components(self):
commit_msg = "# this is a comment"
mock, tmp_file = self._mock_open(commit_msg)
with patch("kwalitee.hooks.open", mock, create=True):
_prepare_commit_msg("mock", "John",
("setup.py",
"grunt/foo.js",
"docs/bar.rst"),
"{component}")
tmp_file.seek(0)
new_commit_msg = "\n".join(tmp_file.readlines())
assert_that(new_commit_msg, contains_string("global"))
assert_that(new_commit_msg, contains_string("grunt"))
assert_that(new_commit_msg, contains_string("docs"))
def test_prepare_commit_msg_aborts_if_existing(self):
commit_msg = "Lorem ipsum"
mock, tmp_file = self._mock_open(commit_msg)
with patch("kwalitee.hooks.open", mock, create=True):
_prepare_commit_msg("mock", "John")
tmp_file.seek(0)
new_commit_msg = "\n".join(tmp_file.readlines())
assert_that(new_commit_msg, equal_to(commit_msg))
@pytest.fixture(scope="function")
def github(app, session, request):
commit_msg = "dummy: test\n\n" \
"* foo\n" \
" bar\n\n" \
"Signed-off-by: John Doe <[email protected]>"
cmds = (
"git init",
"git config user.name 'Jürg Müller'",
"git config user.email [email protected]",
"touch empty.py",
"git add empty.py",
"git commit -m '{0}'".format(commit_msg),
"touch README.rst",
"git add README.rst",
"mkdir -p invenio/modules/testmod1/",
"mkdir invenio/modules/testmod2/",
"echo pass > invenio/modules/testmod1/test.py",
"echo pass > invenio/modules/testmod2/test.py",
"touch invenio/modules/testmod1/script.js",
"touch invenio/modules/testmod1/style.css",
"git add invenio/modules/testmod1/test.py",
"git add invenio/modules/testmod2/test.py",
"git add invenio/modules/testmod1/script.js",
"git add invenio/modules/testmod1/style.css",
)
config = app.config
app.config.update({
"COMPONENTS": ["dummy"],
"TRUSTED_DEVELOPERS": ["[email protected]"]
})
path = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(path)
for command in cmds:
proc = subprocess.Popen(command.encode("utf-8"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
cwd=path)
(stdout, stderr) = proc.communicate()
assert_that(proc.returncode, equal_to(0),
"{0}: {1}".format(command, stderr))
def teardown():
shutil.rmtree(path)
os.chdir(cwd)
app.config = config
request.addfinalizer(teardown)
return path
def test_get_files_modified(github):
assert_that(_get_files_modified(), is_not(has_item("empty.py")))
assert_that(_get_files_modified(),
has_items("README.rst",
"invenio/modules/testmod1/test.py",
"invenio/modules/testmod2/test.py",
"invenio/modules/testmod1/script.js",
"invenio/modules/testmod1/style.css"))
def test_get_git_author(github):
assert_that(_get_git_author(),
equal_to("Jürg Müller <[email protected]>"))
def test_post_commit_hook(github):
"""Hook: post-commit doesn't fail"""
stderr = sys.stderr
sys.stderr = StringIO()
assert_that(post_commit_hook() == 0)
sys.stderr.seek(0)
output = "\n".join(sys.stderr.readlines())
sys.stderr = stderr
assert_that(output, has_length(0))
def test_pre_commit_hook(github):
"""Hook: pre-commit fails because some copyrights are missing"""
stderr = sys.stderr
sys.stderr = StringIO()
assert_that(pre_commit_hook() == 1)
sys.stderr.seek(0)
output = "\n".join(sys.stderr.readlines())
sys.stderr = stderr
assert_that(output, contains_string("kwalitee errors"))
| gpl-2.0 |
Turlough/keyczar | cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/filesystem.py | 19 | 3473 | """SCons.Tool.filesystem
Tool-specific initialization for the filesystem tools.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/filesystem.py 4043 2009/02/23 09:06:45 scons"
import SCons
from SCons.Tool.install import copyFunc
copyToBuilder, copyAsBuilder = None, None
def copyto_emitter(target, source, env):
""" changes the path of the source to be under the target (which
are assumed to be directories.
"""
n_target = []
for t in target:
n_target = n_target + map( lambda s, t=t: t.File( str( s ) ), source )
return (n_target, source)
def copy_action_func(target, source, env):
assert( len(target) == len(source) ), "\ntarget: %s\nsource: %s" %(map(str, target),map(str, source))
for t, s in zip(target, source):
if copyFunc(t.get_path(), s.get_path(), env):
return 1
return 0
def copy_action_str(target, source, env):
return env.subst_target_source(env['COPYSTR'], 0, target, source)
copy_action = SCons.Action.Action( copy_action_func, copy_action_str )
def generate(env):
try:
env['BUILDERS']['CopyTo']
env['BUILDERS']['CopyAs']
except KeyError, e:
global copyToBuilder
if copyToBuilder is None:
copyToBuilder = SCons.Builder.Builder(
action = copy_action,
target_factory = env.fs.Dir,
source_factory = env.fs.Entry,
multi = 1,
emitter = [ copyto_emitter, ] )
global copyAsBuilder
if copyAsBuilder is None:
copyAsBuilder = SCons.Builder.Builder(
action = copy_action,
target_factory = env.fs.Entry,
source_factory = env.fs.Entry )
env['BUILDERS']['CopyTo'] = copyToBuilder
env['BUILDERS']['CopyAs'] = copyAsBuilder
env['COPYSTR'] = 'Copy file(s): "$SOURCES" to "$TARGETS"'
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
anthonylife/TaobaoCompetition2014 | src/feature_based_for_repeated_buy/predict.py | 1 | 2887 | #!/usr/bin/env python
#encoding=utf8
#Copyright [2014] [Wei Zhang]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
###################################################################
# Date: 2014/4/1 #
# Generate recommendation results based on GBT #
###################################################################
import sys, csv, json, argparse
sys.path.append("../")
import data_io
from collections import defaultdict
from tool import calSegNum
settings = json.loads(open("../../SETTINGS.json").read())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-tv', type=float, action='store',
dest='threshold_val', help='specify how to generate recommendation result.')
parser.add_argument('-t', type=int, action='store',
dest='target', help='for validation or test dataset')
if len(sys.argv) != 5:
print 'Command e.g.: python predict.py -tv 0.8 -t 0(1)'
sys.exit(1)
para = parser.parse_args()
if para.target == 0:
file_name = settings["GBT_TEST_FILE"]
elif para.target == 1:
file_name = settings["GBT_TEST_FILE_FOR_SUBMIT"]
classifier = data_io.load_model(settings["GBT_MODEL_FILE"])
user_recommend_result = defaultdict(list)
finished_num = 0
features = []
user_product_ids = []
cache_uid = -1
for i, entry in enumerate(csv.reader(open(file_name))):
feature = map(float, entry[2:])
uid, pid = map(int, entry[:2])
if i == 0:
cache_uid = uid
if uid != cache_uid:
predictions = classifier.predict_proba(features)[:,1]
predictions = list(predictions)
for (t_uid, t_pid), pred in zip(user_product_ids, predictions):
if pred > para.threshold_val:
user_recommend_result[t_uid].append(t_pid)
features = [feature]
user_product_ids = [[uid, pid]]
cache_uid = uid
finished_num += 1
#print("FINISHED UID NUM: %d. " % (finished_num))
#sys.stderr.write("\rFINISHED UID NUM: %d. " % (finished_num))
#sys.stderr.flush()
else:
features.append(feature)
user_product_ids.append([uid, pid])
data_io.write_submission(user_recommend_result)
if __name__ == "__main__":
main()
| gpl-2.0 |
omar-AM/python-pgp | pgp/commands/gpg/encrypt.py | 2 | 2820 | from pgp.commands.gpg.exceptions import FatalException
from pgp.commands.gpg.message_ import MessageCommand
class Command(MessageCommand):
multifile = True
def __init__(self, io_helper, recipient_helper,
algorithm_helper,
message_filename, for_your_eyes_only,
throw_keys, escape_from_lines, set_filesize):
MessageCommand.__init__(self, io_helper, message_filename,
set_filesize, for_your_eyes_only)
self.recipient_helper = recipient_helper
self.algorithm_helper = algorithm_helper
self.throw_keys = throw_keys
def file_to_literal_message(self, filename, binary=True):
msg = MessageCommand.file_to_literal_message(
self, filename, binary=binary)
msg = msg.compress(self.algorithm_helper.get_compression_algorithm(),
self.algorithm_helper.get_compression_level())
return msg
def pubkey_encrypt_message(self, msg):
session_key = None
use_mdc = True
# Choose an algorithm supported by all our recipients
preferred_ciphers = \
self.algorithm_helper.get_preferred_ciphers_for_encryption()
recipient_public_keys = \
self.recipient_helper.get_recipient_public_keys()
hidden_recipient_public_keys = \
self.recipient_helper.get_hidden_recipient_public_keys()
if self.throw_keys:
# All recipients are hidden
hidden_recipient_public_keys += recipient_public_keys
recipient_public_keys = []
all_keys = recipient_public_keys + hidden_recipient_public_keys
for cipher in preferred_ciphers:
acceptable = True
for key in all_keys:
if cipher not in key.preferred_symmetric_algorithms:
acceptable = False
break
if acceptable:
break
if not acceptable:
raise FatalException(
u'Cannot find symmetric cipher acceptable by all recipients.'
)
if self.algorithm_helper.get_mdc_forced():
use_mdc = True
elif self.algorithm_helper.get_mdc_disabled():
use_mdc = False
else:
# check recipients for compatibility
for key in all_keys:
if not key.supports_modification_detection:
use_mdc = False
break
return msg.public_key_encrypt(
cipher, public_keys=recipient_public_keys,
hidden_public_keys=hidden_recipient_public_keys,
session_key=session_key, integrity_protect=use_mdc
)
def process_message(self, msg):
return self.pubkey_encrypt_message(msg)
| gpl-3.0 |
andyzsf/edx | lms/djangoapps/shoppingcart/migrations/0015_auto__del_field_invoice_purchase_order_number__del_field_invoice_compa.py | 114 | 16995 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Invoice.purchase_order_number'
db.delete_column('shoppingcart_invoice', 'purchase_order_number')
# Deleting field 'Invoice.company_contact_name'
db.delete_column('shoppingcart_invoice', 'company_contact_name')
# Deleting field 'Invoice.company_contact_email'
db.delete_column('shoppingcart_invoice', 'company_contact_email')
# Adding field 'Invoice.company_email'
db.add_column('shoppingcart_invoice', 'company_email',
self.gf('django.db.models.fields.CharField')(default='', max_length=255),
keep_default=False)
# Adding field 'Invoice.recipient_name'
db.add_column('shoppingcart_invoice', 'recipient_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=255),
keep_default=False)
# Adding field 'Invoice.recipient_email'
db.add_column('shoppingcart_invoice', 'recipient_email',
self.gf('django.db.models.fields.CharField')(default='', max_length=255),
keep_default=False)
# Adding field 'Invoice.customer_reference_number'
db.add_column('shoppingcart_invoice', 'customer_reference_number',
self.gf('django.db.models.fields.CharField')(max_length=63, null=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Invoice.purchase_order_number'
db.add_column('shoppingcart_invoice', 'purchase_order_number',
self.gf('django.db.models.fields.CharField')(max_length=63, null=True),
keep_default=False)
# Adding field 'Invoice.company_contact_name'
db.add_column('shoppingcart_invoice', 'company_contact_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=255),
keep_default=False)
# Adding field 'Invoice.company_contact_email'
db.add_column('shoppingcart_invoice', 'company_contact_email',
self.gf('django.db.models.fields.CharField')(default='', max_length=255),
keep_default=False)
# Deleting field 'Invoice.company_email'
db.delete_column('shoppingcart_invoice', 'company_email')
# Deleting field 'Invoice.recipient_name'
db.delete_column('shoppingcart_invoice', 'recipient_name')
# Deleting field 'Invoice.recipient_email'
db.delete_column('shoppingcart_invoice', 'recipient_email')
# Deleting field 'Invoice.customer_reference_number'
db.delete_column('shoppingcart_invoice', 'customer_reference_number')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 28, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 28, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'})
},
'shoppingcart.invoice': {
'Meta': {'object_name': 'Invoice'},
'address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'address_line_3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'company_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'company_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'total_amount': ('django.db.models.fields.FloatField', [], {}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.registrationcoderedemption': {
'Meta': {'object_name': 'RegistrationCodeRedemption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 28, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 |
bepitulaz/huntingdimana | env/Lib/site-packages/pip/_vendor/html5lib/serializer/htmlserializer.py | 310 | 12909 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import gettext
_ = gettext.gettext
try:
from functools import reduce
except ImportError:
pass
from ..constants import voidElements, booleanAttributes, spaceCharacters
from ..constants import rcdataElements, entities, xmlEntities
from .. import utils
from xml.sax.saxutils import escape
spaceCharacters = "".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
encode_entity_map = {}
is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if not v in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from ..filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from ..filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from ..filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from ..filters.optionaltags import Filter
treewalker = Filter(treewalker)
# Alphabetical attributes must be last, as other filters
# could add attributes and alter the order
if self.alphabetical_attributes:
from ..filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
for (attr_namespace, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple())
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x, y: x or (y in v),
spaceCharacters + ">\"'=", False)
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if not key in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
| gpl-3.0 |
remynguyen96/webpack-layout | Son/node_modules/node-gyp/gyp/pylib/gyp/input_test.py | 1841 | 3207 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the input.py file."""
import gyp.input
import unittest
import sys
class TestFindCycles(unittest.TestCase):
def setUp(self):
self.nodes = {}
for x in ('a', 'b', 'c', 'd', 'e'):
self.nodes[x] = gyp.input.DependencyGraphNode(x)
def _create_dependency(self, dependent, dependency):
dependent.dependencies.append(dependency)
dependency.dependents.append(dependent)
def test_no_cycle_empty_graph(self):
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_line(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_dag(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['a'], self.nodes['c'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_cycle_self_reference(self):
self._create_dependency(self.nodes['a'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['a']]],
self.nodes['a'].FindCycles())
def test_cycle_two_nodes(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['a']]],
self.nodes['a'].FindCycles())
self.assertEquals([[self.nodes['b'], self.nodes['a'], self.nodes['b']]],
self.nodes['b'].FindCycles())
def test_two_cycles(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['b'])
cycles = self.nodes['a'].FindCycles()
self.assertTrue(
[self.nodes['a'], self.nodes['b'], self.nodes['a']] in cycles)
self.assertTrue(
[self.nodes['b'], self.nodes['c'], self.nodes['b']] in cycles)
self.assertEquals(2, len(cycles))
def test_big_cycle(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
self._create_dependency(self.nodes['d'], self.nodes['e'])
self._create_dependency(self.nodes['e'], self.nodes['a'])
self.assertEquals([[self.nodes['a'],
self.nodes['b'],
self.nodes['c'],
self.nodes['d'],
self.nodes['e'],
self.nodes['a']]],
self.nodes['a'].FindCycles())
if __name__ == '__main__':
unittest.main()
| mit |
phiedulxp/quora-mining | notebook/api.py | 2 | 6352 | #_*_ coding=utf-8 _*_
import requests
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)\
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'authorization':'oauth c3cef7c66a1843f8b3a9e6a1e3160e20',
}
zhihu_page_question_url = 'https://www.zhihu.com/question/{}'
zhihu_api = 'https://www.zhihu.com/api/v4/'
zhihu_api_topic = 'https://www.zhihu.com/api/v4/topics/{}?include=introduction%2Cquestions_count%2Cbest_answers_count%2Cfollowers_count%2Cis_following'
zhihu_api_questions = zhihu_api+'questions/{}'
zhihu_api_answers = zhihu_api+'answers/{}'
zhihu_api_answers_comments = zhihu_api+'''answers/{}/comments?include=data%5B*%5D.author%2Ccollapsed%2C
reply_to_author%2Cdisliked%2Ccontent%2Cvoting%2Cvote_count%2Cis_parent_author%2C
is_author&order=normal&limit=20&offset=0&status=open'''
questions_answers_include = '''include=data%5B*%5D.is_normal%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2C\
can_comment%2Ccontent%2Ceditable_content%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Cmark_infos%2C\
created_time%2Cupdated_time%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cupvoted_followees%3B\
data%5B*%5D.author.badge%5B%3F(type%3Dbest_answerer)%5D.topics'''
zhihu_api_questions_answers = zhihu_api+'questions/{}/answers?limit=20&offset=0'+'&'+questions_answers_include
zhihu_api_new_question_up = zhihu_api+'banners/new_question_up?question_token={}'
zhihu_api_new_question_down = zhihu_api+'banners/new_question_down?question_token={}'
zhihu_api_similar_questions = zhihu_api+'questions/{}/similar-questions?include=data%5B%2A%5D.answer_count%2Cauthor&limit=5&offset=0'
zhihu_api_people = zhihu_api+'people/{}'
members_include = '''include=locations%2Cemployments%2Cgender%2Ceducations%2Cbusiness%2C\
voteup_count%2Cthanked_Count%2Cfollower_count%2Cfollowing_count%2C\
cover_url%2Cfollowing_topic_count%2Cfollowing_question_count%2C\
following_favlists_count%2Cfollowing_columns_count%2Cavatar_hue%2C\
answer_count%2Carticles_count%2Cpins_count%2Cquestion_count%2C\
commercial_question_count%2Cfavorite_count%2Cfavorited_count%2C\
logs_count%2Cmarked_answers_count%2Cmarked_answers_text%2C\
message_thread_token%2Caccount_status%2Cis_active%2C\
is_force_renamed%2Cis_bind_sina%2Csina_weibo_url%2C\
sina_weibo_name%2Cshow_sina_weibo%2Cis_blocking%2C\
is_blocked%2Cis_following%2Cis_followed%2C\
mutual_followees_count%2Cvote_to_count%2C\
vote_from_count%2Cthank_to_count%2C\
thank_from_count%2Cthanked_count%2C\
description%2Chosted_live_count%2C\
participated_live_count%2Callow_message%2C\
industry_category%2Corg_name%2Corg_homepage%2C\
badge%5B%3F(type%3Dbest_answerer)%5D.topics'''
zhihu_api_members = zhihu_api+'members/{}'+'?'+members_include
zhihu_api_members_followees = zhihu_api+'members/{}/followees?offset=0&limit=20'+'&'+members_include
zhihu_api_members_followers = zhihu_api+'members/{}/followers?offset=0&limit=20'+'&'+members_include
zhihu_api_members_activities = zhihu_api+'members/{}/activities?offset=0&limit=20' #&after_id=1418481629&desktop=True
members_answers_include = '''include=data%5B*%5D.is_normal%2Csuggest_edit%2Ccomment_count%2C\
can_comment%2Ccontent%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2C\
mark_infos%2Ccreated_time%2Cupdated_time%2Crelationship.is_authorized%2Cvoting%2C\
is_author%2Cis_thanked%2Cis_nothelp%2Cupvoted_followees%3Bdata%5B*%5D.author.badge%5B%3F\
(type%3Dbest_answerer)%5D.topics'''
zhihu_api_members_answers = zhihu_api+'members/{}/answer?offset=0&limit=20&sort_by=created'+'&'+members_answers_include
zhihu_api_members_questions = zhihu_api+'members/{}/questions?include=data%5B*%5D.created%2Canswer_count%2Cfollower_count%2Cauthor&offset=0&limit=20'
zhihu_api_members_pins = zhihu_api+'members/{}/pins?offset=0&limit=20&includes=data%5B*%5D.upvoted_followees'
zhihu_api_members_articles = zhihu_api+'members/{}/articles?include=data%5B*%5D.comment_count%2Ccan_comment%2Ccomment_permission%2Ccontent%2Cvoteup_count%2Ccreated%2Cupdated%2Cupvoted_followees%2Cvoting%3Bdata%5B*%5D.author.badge%5B%3F(type%3Dbest_answerer)%5D.topics&offset=0&limit=20&sort_by=created'
zhihu_api_members_column_contributions = zhihu_api+'members/{}/column-contributions?include=data%5B*%5D.column.title%2Cintro%2Cdescription%2Cfollowers%2Carticles_count&offset=0&limit=20'
zhihu_api_members_favlists = zhihu_api+'members/{}/favlists?include=data%5B*%5D.updated_time%2Canswer_count%2Cfollower_count%2Ccreator%2Cis_public&offset=0&limit=20'
zhihu_api_members_following_columns = zhihu_api+'members/{}/following-columns?include=data%5B*%5D.intro%2Cfollowers%2Carticles_count%2Cimage_url%2Cimage_width%2Cimage_height%2Cis_following%2Clast_article.created&offset=0&limit=20'
zhihu_api_members_following_topic_contributions = zhihu_api+'members/{}/following-topic-contributions?include=data%5B*%5D.topic.introduction&offset=0&limit=20'
zhihu_api_members_following_questions = zhihu_api+'members/{}/following-questions?include=data%5B*%5D.created%2Canswer_count%2Cfollower_count%2Cauthor&offset=0&limit=20'
zhihu_api_members_following_favlists = zhihu_api+'members/{}/following-favlists?include=data%5B*%5D.updated_time%2Canswer_count%2Cfollower_count%2Ccreator&offset=0&limit=20'
def get_data_from_zhihu_api(api,token):
data = {'totals':0,'items':[]}
res = requests.get(api.format(token),headers=headers)
j = res.json()
if not 'offset' in api:
return j
else:
try:
data['totals'] = j['paging']['totals']
except:
data['totals'] = None
data['items'].extend(j['data'])
while not j['paging']['is_end']:
res = requests.get(j['paging']['next'],headers=headers)
j = res.json()
data['items'].extend(j['data'])
return data
def get_questions(question_token):
questions = get_data_from_zhihu_api(zhihu_api_questions,question_token)
answers = get_data_from_zhihu_api(zhihu_api_questions_answers,question_token)
similar_questions = get_data_from_zhihu_api(zhihu_api_similar_questions,question_token)
return questions,answers,similar_questions
def get_members(members_token):
members = get_data_from_zhihu_api(zhihu_api_members,members_token)
followees = get_data_from_zhihu_api(zhihu_api_members_followees,members_token)
followers = get_data_from_zhihu_api(zhihu_api_members_followers,members_token)
return members,followees,followers | apache-2.0 |
mne-tools/mne-tools.github.io | 0.15/_downloads/decoding_rsa.py | 6 | 7255 | """
.. _rsa_noplot:
====================================
Representational Similarity Analysis
====================================
Representational Similarity Analysis is used to perform summary statistics
on supervised classifications where the number of classes is relatively high.
It consists in characterizing the structure of the confusion matrix to infer
the similarity between brain responses and serves as a proxy for characterizing
the space of mental representations [1]_ [2]_ [3]_.
In this example, we perform RSA on responses to 24 object images (among
a list of 92 images). Subjects were presented with images of human, animal
and inanimate objects [4]_. Here we use the 24 unique images of faces
and body parts.
.. note:: this example will download a very large (~6GB) file, so we will not
build the images below.
References
----------
.. [1] Shepard, R. "Multidimensional scaling, tree-fitting, and clustering."
Science 210.4468 (1980): 390-398.
.. [2] Laakso, A. & Cottrell, G.. "Content and cluster analysis:
assessing representational similarity in neural systems." Philosophical
psychology 13.1 (2000): 47-76.
.. [3] Kriegeskorte, N., Marieke, M., & Bandettini. P. "Representational
similarity analysis-connecting the branches of systems neuroscience."
Frontiers in systems neuroscience 2 (2008): 4.
.. [4] Cichy, R. M., Pantazis, D., & Oliva, A. "Resolving human object
recognition in space and time." Nature neuroscience (2014): 17(3),
455-462.
"""
# Authors: Jean-Remi King <[email protected]>
# Jaakko Leppakangas <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from pandas import read_csv
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.manifold import MDS
import mne
from mne.io import read_raw_fif, concatenate_raws
from mne.datasets import visual_92_categories
print(__doc__)
data_path = visual_92_categories.data_path()
# Define stimulus - trigger mapping
fname = op.join(data_path, 'visual_stimuli.csv')
conds = read_csv(fname)
print(conds.head(5))
##############################################################################
# Let's restrict the number of conditions to speed up computation
max_trigger = 24
conds = conds[:max_trigger] # take only the first 24 rows
##############################################################################
# Define stimulus - trigger mapping
conditions = []
for c in conds.values:
cond_tags = list(c[:2])
cond_tags += [('not-' if i == 0 else '') + conds.columns[k]
for k, i in enumerate(c[2:], 2)]
conditions.append('/'.join(map(str, cond_tags)))
print(conditions[:10])
##############################################################################
# Let's make the event_id dictionary
event_id = dict(zip(conditions, conds.trigger + 1))
event_id['0/human bodypart/human/not-face/animal/natural']
##############################################################################
# Read MEG data
n_runs = 4 # 4 for full data (use less to speed up computations)
fname = op.join(data_path, 'sample_subject_%i_tsss_mc.fif')
raws = [read_raw_fif(fname % block) for block in range(n_runs)]
raw = concatenate_raws(raws)
events = mne.find_events(raw, min_duration=.002)
events = events[events[:, 2] <= max_trigger]
##############################################################################
# Epoch data
picks = mne.pick_types(raw.info, meg=True)
epochs = mne.Epochs(raw, events=events, event_id=event_id, baseline=None,
picks=picks, tmin=-.1, tmax=.500, preload=True)
##############################################################################
# Let's plot some conditions
epochs['face'].average().plot()
epochs['not-face'].average().plot()
##############################################################################
# Representational Similarity Analysis (RSA) is a neuroimaging-specific
# appelation to refer to statistics applied to the confusion matrix
# also referred to as the representational dissimilarity matrices (RDM).
#
# Compared to the approach from Cichy et al. we'll use a multiclass
# classifier (Multinomial Logistic Regression) while the paper uses
# all pairwise binary classification task to make the RDM.
# Also we use here the ROC-AUC as performance metric while the
# paper uses accuracy. Finally here for the sake of time we use
# RSA on a window of data while Cichy et al. did it for all time
# instants separately.
# Classify using the average signal in the window 50ms to 300ms
# to focus the classifier on the time interval with best SNR.
clf = make_pipeline(StandardScaler(),
LogisticRegression(C=1, solver='lbfgs'))
X = epochs.copy().crop(0.05, 0.3).get_data().mean(axis=2)
y = epochs.events[:, 2]
classes = set(y)
cv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)
# Compute confusion matrix for each cross-validation fold
y_pred = np.zeros((len(y), len(classes)))
for train, test in cv.split(X, y):
# Fit
clf.fit(X[train], y[train])
# Probabilistic prediction (necessary for ROC-AUC scoring metric)
y_pred[test] = clf.predict_proba(X[test])
##############################################################################
# Compute confusion matrix using ROC-AUC
confusion = np.zeros((len(classes), len(classes)))
for ii, train_class in enumerate(classes):
for jj in range(ii, len(classes)):
confusion[ii, jj] = roc_auc_score(y == train_class, y_pred[:, jj])
confusion[jj, ii] = confusion[ii, jj]
##############################################################################
# Plot
labels = [''] * 5 + ['face'] + [''] * 11 + ['bodypart'] + [''] * 6
fig, ax = plt.subplots(1)
im = ax.matshow(confusion, cmap='RdBu_r', clim=[0.3, 0.7])
ax.set_yticks(range(len(classes)))
ax.set_yticklabels(labels)
ax.set_xticks(range(len(classes)))
ax.set_xticklabels(labels, rotation=40, ha='left')
ax.axhline(11.5, color='k')
ax.axvline(11.5, color='k')
plt.colorbar(im)
plt.tight_layout()
plt.show()
##############################################################################
# Confusion matrix related to mental representations have been historically
# summarized with dimensionality reduction using multi-dimensional scaling [1].
# See how the face samples cluster together.
fig, ax = plt.subplots(1)
mds = MDS(2, random_state=0, dissimilarity='precomputed')
chance = 0.5
summary = mds.fit_transform(chance - confusion)
cmap = plt.get_cmap('rainbow')
colors = ['r', 'b']
names = list(conds['condition'].values)
for color, name in zip(colors, set(names)):
sel = np.where([this_name == name for this_name in names])[0]
size = 500 if name == 'human face' else 100
ax.scatter(summary[sel, 0], summary[sel, 1], s=size,
facecolors=color, label=name, edgecolors='k')
ax.axis('off')
ax.legend(loc='lower right', scatterpoints=1, ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
Beyond-Imagination/BlubBlub | RaspberryPI/django-env/lib/python3.4/site-packages/django/contrib/gis/admin/options.py | 47 | 5390 | from django.contrib.admin import ModelAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
from django.contrib.gis.db import models
from django.contrib.gis.gdal import OGRGeomType
spherical_mercator_srid = 3857
class GeoModelAdmin(ModelAdmin):
"""
The administration options class for Geographic models. Map settings
may be overloaded from their defaults to create custom maps.
"""
# The default map settings that may be overloaded -- still subject
# to API changes.
default_lon = 0
default_lat = 0
default_zoom = 4
display_wkt = False
display_srid = False
extra_js = []
num_zoom = 18
max_zoom = False
min_zoom = False
units = False
max_resolution = False
max_extent = False
modifiable = True
mouse_position = True
scale_text = True
layerswitcher = True
scrollable = True
map_width = 600
map_height = 400
map_srid = 4326
map_template = 'gis/admin/openlayers.html'
openlayers_url = 'https://cdnjs.cloudflare.com/ajax/libs/openlayers/2.13.1/OpenLayers.js'
point_zoom = num_zoom - 6
wms_url = 'http://vmap0.tiles.osgeo.org/wms/vmap0'
wms_layer = 'basic'
wms_name = 'OpenLayers WMS'
wms_options = {'format': 'image/jpeg'}
debug = False
widget = OpenLayersWidget
@property
def media(self):
"Injects OpenLayers JavaScript into the admin."
media = super(GeoModelAdmin, self).media
media.add_js([self.openlayers_url])
media.add_js(self.extra_js)
return media
def formfield_for_dbfield(self, db_field, request, **kwargs):
"""
Overloaded from ModelAdmin so that an OpenLayersWidget is used
for viewing/editing 2D GeometryFields (OpenLayers 2 does not support
3D editing).
"""
if isinstance(db_field, models.GeometryField) and db_field.dim < 3:
# Setting the widget with the newly defined widget.
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super(GeoModelAdmin, self).formfield_for_dbfield(db_field, request, **kwargs)
def get_map_widget(self, db_field):
"""
Returns a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
"""
is_collection = db_field.geom_type in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field.geom_type == 'GEOMETRYCOLLECTION':
collection_type = 'Any'
else:
collection_type = OGRGeomType(db_field.geom_type.replace('MULTI', ''))
else:
collection_type = 'None'
class OLMap(self.widget):
template_name = self.map_template
geom_type = db_field.geom_type
wms_options = ''
if self.wms_options:
wms_options = ["%s: '%s'" % pair for pair in self.wms_options.items()]
wms_options = ', %s' % ', '.join(wms_options)
params = {'default_lon': self.default_lon,
'default_lat': self.default_lat,
'default_zoom': self.default_zoom,
'display_wkt': self.debug or self.display_wkt,
'geom_type': OGRGeomType(db_field.geom_type),
'field_name': db_field.name,
'is_collection': is_collection,
'scrollable': self.scrollable,
'layerswitcher': self.layerswitcher,
'collection_type': collection_type,
'is_generic': db_field.geom_type == 'GEOMETRY',
'is_linestring': db_field.geom_type in ('LINESTRING', 'MULTILINESTRING'),
'is_polygon': db_field.geom_type in ('POLYGON', 'MULTIPOLYGON'),
'is_point': db_field.geom_type in ('POINT', 'MULTIPOINT'),
'num_zoom': self.num_zoom,
'max_zoom': self.max_zoom,
'min_zoom': self.min_zoom,
'units': self.units, # likely should get from object
'max_resolution': self.max_resolution,
'max_extent': self.max_extent,
'modifiable': self.modifiable,
'mouse_position': self.mouse_position,
'scale_text': self.scale_text,
'map_width': self.map_width,
'map_height': self.map_height,
'point_zoom': self.point_zoom,
'srid': self.map_srid,
'display_srid': self.display_srid,
'wms_url': self.wms_url,
'wms_layer': self.wms_layer,
'wms_name': self.wms_name,
'wms_options': wms_options,
'debug': self.debug,
}
return OLMap
class OSMGeoAdmin(GeoModelAdmin):
map_template = 'gis/admin/osm.html'
num_zoom = 20
map_srid = spherical_mercator_srid
max_extent = '-20037508,-20037508,20037508,20037508'
max_resolution = '156543.0339'
point_zoom = num_zoom - 6
units = 'm'
| gpl-3.0 |
prospwro/odoo | openerp/osv/osv.py | 337 | 1384 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from ..exceptions import except_orm
from .orm import Model, TransientModel, AbstractModel
# Deprecated, kept for backward compatibility.
# openerp.exceptions.Warning should be used instead.
except_osv = except_orm
# Deprecated, kept for backward compatibility.
osv = Model
osv_memory = TransientModel
osv_abstract = AbstractModel # ;-)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kanagasabapathi/python-for-android | python3-alpha/python3-src/Tools/demo/redemo.py | 45 | 5771 | """Basic regular expression demostration facility (Perl style syntax)."""
from tkinter import *
import re
class ReDemo:
def __init__(self, master):
self.master = master
self.promptdisplay = Label(self.master, anchor=W,
text="Enter a Perl-style regular expression:")
self.promptdisplay.pack(side=TOP, fill=X)
self.regexdisplay = Entry(self.master)
self.regexdisplay.pack(fill=X)
self.regexdisplay.focus_set()
self.addoptions()
self.statusdisplay = Label(self.master, text="", anchor=W)
self.statusdisplay.pack(side=TOP, fill=X)
self.labeldisplay = Label(self.master, anchor=W,
text="Enter a string to search:")
self.labeldisplay.pack(fill=X)
self.labeldisplay.pack(fill=X)
self.showframe = Frame(master)
self.showframe.pack(fill=X, anchor=W)
self.showvar = StringVar(master)
self.showvar.set("first")
self.showfirstradio = Radiobutton(self.showframe,
text="Highlight first match",
variable=self.showvar,
value="first",
command=self.recompile)
self.showfirstradio.pack(side=LEFT)
self.showallradio = Radiobutton(self.showframe,
text="Highlight all matches",
variable=self.showvar,
value="all",
command=self.recompile)
self.showallradio.pack(side=LEFT)
self.stringdisplay = Text(self.master, width=60, height=4)
self.stringdisplay.pack(fill=BOTH, expand=1)
self.stringdisplay.tag_configure("hit", background="yellow")
self.grouplabel = Label(self.master, text="Groups:", anchor=W)
self.grouplabel.pack(fill=X)
self.grouplist = Listbox(self.master)
self.grouplist.pack(expand=1, fill=BOTH)
self.regexdisplay.bind('<Key>', self.recompile)
self.stringdisplay.bind('<Key>', self.reevaluate)
self.compiled = None
self.recompile()
btags = self.regexdisplay.bindtags()
self.regexdisplay.bindtags(btags[1:] + btags[:1])
btags = self.stringdisplay.bindtags()
self.stringdisplay.bindtags(btags[1:] + btags[:1])
def addoptions(self):
self.frames = []
self.boxes = []
self.vars = []
for name in ('IGNORECASE',
'LOCALE',
'MULTILINE',
'DOTALL',
'VERBOSE'):
if len(self.boxes) % 3 == 0:
frame = Frame(self.master)
frame.pack(fill=X)
self.frames.append(frame)
val = getattr(re, name)
var = IntVar()
box = Checkbutton(frame,
variable=var, text=name,
offvalue=0, onvalue=val,
command=self.recompile)
box.pack(side=LEFT)
self.boxes.append(box)
self.vars.append(var)
def getflags(self):
flags = 0
for var in self.vars:
flags = flags | var.get()
flags = flags
return flags
def recompile(self, event=None):
try:
self.compiled = re.compile(self.regexdisplay.get(),
self.getflags())
bg = self.promptdisplay['background']
self.statusdisplay.config(text="", background=bg)
except re.error as msg:
self.compiled = None
self.statusdisplay.config(
text="re.error: %s" % str(msg),
background="red")
self.reevaluate()
def reevaluate(self, event=None):
try:
self.stringdisplay.tag_remove("hit", "1.0", END)
except TclError:
pass
try:
self.stringdisplay.tag_remove("hit0", "1.0", END)
except TclError:
pass
self.grouplist.delete(0, END)
if not self.compiled:
return
self.stringdisplay.tag_configure("hit", background="yellow")
self.stringdisplay.tag_configure("hit0", background="orange")
text = self.stringdisplay.get("1.0", END)
last = 0
nmatches = 0
while last <= len(text):
m = self.compiled.search(text, last)
if m is None:
break
first, last = m.span()
if last == first:
last = first+1
tag = "hit0"
else:
tag = "hit"
pfirst = "1.0 + %d chars" % first
plast = "1.0 + %d chars" % last
self.stringdisplay.tag_add(tag, pfirst, plast)
if nmatches == 0:
self.stringdisplay.yview_pickplace(pfirst)
groups = list(m.groups())
groups.insert(0, m.group())
for i in range(len(groups)):
g = "%2d: %r" % (i, groups[i])
self.grouplist.insert(END, g)
nmatches = nmatches + 1
if self.showvar.get() == "first":
break
if nmatches == 0:
self.statusdisplay.config(text="(no match)",
background="yellow")
else:
self.statusdisplay.config(text="")
# Main function, run when invoked as a stand-alone Python program.
def main():
root = Tk()
demo = ReDemo(root)
root.protocol('WM_DELETE_WINDOW', root.quit)
root.mainloop()
if __name__ == '__main__':
main()
| apache-2.0 |
pigeonflight/strider-plone | docker/appengine/lib/django-1.5/django/contrib/gis/gdal/srs.py | 219 | 11914 | """
The Spatial Reference class, represensents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input=''):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
srs_type = 'user'
if isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr: capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]')
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the untis
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected: return self.attr_value('PROJCS')
elif self.geographic: return self.attr_value('GEOGCS')
elif self.local: return self.attr_value('LOCAL_CS')
else: return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name.decode()
return (units, name)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr: capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| mit |
heli522/scikit-learn | sklearn/cluster/affinity_propagation_.py | 224 | 10733 | """ Algorithms for clustering : Meanshift, Affinity propagation and spectral
clustering.
"""
# Author: Alexandre Gramfort [email protected]
# Gael Varoquaux [email protected]
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array, check_array
from ..utils.validation import check_is_fitted
from ..metrics import euclidean_distances
from ..metrics import pairwise_distances_argmin
def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,
damping=0.5, copy=True, verbose=False,
return_n_iter=False):
"""Perform Affinity Propagation Clustering of data
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like, shape (n_samples, n_samples)
Matrix of similarities between points
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
copy : boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose : boolean, optional, default: False
The verbosity level
return_n_iter : bool, default False
Whether or not to return the number of iterations.
Returns
-------
cluster_centers_indices : array, shape (n_clusters,)
index of clusters centers
labels : array, shape (n_samples,)
cluster labels for each point
n_iter : int
number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
random_state = np.random.RandomState(0)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
if verbose:
print("Converged after %d iterations." % it)
break
else:
if verbose:
print("Did not converge")
I = np.where(np.diag(A + R) > 0)[0]
K = I.size # Identify exemplars
if K > 0:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
labels = np.empty((n_samples, 1))
cluster_centers_indices = None
labels.fill(np.nan)
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(BaseEstimator, ClusterMixin):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations.
copy : boolean, optional, default: True
Make a copy of input data.
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : string, optional, default=``euclidean``
Which affinity to use. At the moment ``precomputed`` and
``euclidean`` are supported. ``euclidean`` uses the
negative squared euclidean distance between points.
verbose : boolean, optional, default: False
Whether to be verbose.
Attributes
----------
cluster_centers_indices_ : array, shape (n_clusters,)
Indices of cluster centers
cluster_centers_ : array, shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : array, shape (n_samples,)
Labels of each point
affinity_matrix_ : array, shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
def __init__(self, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
@property
def _pairwise(self):
return self.affinity == "precomputed"
def fit(self, X, y=None):
""" Create affinity matrix from negative euclidean distances, then
apply affinity propagation clustering.
Parameters
----------
X: array-like, shape (n_samples, n_features) or (n_samples, n_samples)
Data matrix or, if affinity is ``precomputed``, matrix of
similarities / affinities.
"""
X = check_array(X, accept_sparse='csr')
if self.affinity == "precomputed":
self.affinity_matrix_ = X
elif self.affinity == "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_, self.n_iter_ = \
affinity_propagation(
self.affinity_matrix_, self.preference, max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose, return_n_iter=True)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : array, shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_indices_")
if not hasattr(self, "cluster_centers_"):
raise ValueError("Predict method is not supported when "
"affinity='precomputed'.")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
vnsofthe/odoo-dev | openerp/tools/win32.py | 457 | 1993 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import locale
import time
import datetime
if not hasattr(locale, 'D_FMT'):
locale.D_FMT = 1
if not hasattr(locale, 'T_FMT'):
locale.T_FMT = 2
if not hasattr(locale, 'nl_langinfo'):
def nl_langinfo(param):
if param == locale.D_FMT:
val = time.strptime('30/12/2004', '%d/%m/%Y')
dt = datetime.datetime(*val[:-2])
format_date = dt.strftime('%x')
for x, y in [('30', '%d'),('12', '%m'),('2004','%Y'),('04', '%Y')]:
format_date = format_date.replace(x, y)
return format_date
if param == locale.T_FMT:
val = time.strptime('13:24:56', '%H:%M:%S')
dt = datetime.datetime(*val[:-2])
format_time = dt.strftime('%X')
for x, y in [('13', '%H'),('24', '%M'),('56','%S')]:
format_time = format_time.replace(x, y)
return format_time
locale.nl_langinfo = nl_langinfo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
WeichenXu123/spark | examples/src/main/python/als.py | 121 | 3267 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This is an example implementation of ALS for learning how to use Spark. Please refer to
pyspark.ml.recommendation.ALS for more conventional use.
This example requires numpy (http://www.numpy.org/)
"""
from __future__ import print_function
import sys
import numpy as np
from numpy.random import rand
from numpy import matrix
from pyspark.sql import SparkSession
LAMBDA = 0.01 # regularization
np.random.seed(42)
def rmse(R, ms, us):
diff = R - ms * us.T
return np.sqrt(np.sum(np.power(diff, 2)) / (M * U))
def update(i, mat, ratings):
uu = mat.shape[0]
ff = mat.shape[1]
XtX = mat.T * mat
Xty = mat.T * ratings[i, :].T
for j in range(ff):
XtX[j, j] += LAMBDA * uu
return np.linalg.solve(XtX, Xty)
if __name__ == "__main__":
"""
Usage: als [M] [U] [F] [iterations] [partitions]"
"""
print("""WARN: This is a naive implementation of ALS and is given as an
example. Please use pyspark.ml.recommendation.ALS for more
conventional use.""", file=sys.stderr)
spark = SparkSession\
.builder\
.appName("PythonALS")\
.getOrCreate()
sc = spark.sparkContext
M = int(sys.argv[1]) if len(sys.argv) > 1 else 100
U = int(sys.argv[2]) if len(sys.argv) > 2 else 500
F = int(sys.argv[3]) if len(sys.argv) > 3 else 10
ITERATIONS = int(sys.argv[4]) if len(sys.argv) > 4 else 5
partitions = int(sys.argv[5]) if len(sys.argv) > 5 else 2
print("Running ALS with M=%d, U=%d, F=%d, iters=%d, partitions=%d\n" %
(M, U, F, ITERATIONS, partitions))
R = matrix(rand(M, F)) * matrix(rand(U, F).T)
ms = matrix(rand(M, F))
us = matrix(rand(U, F))
Rb = sc.broadcast(R)
msb = sc.broadcast(ms)
usb = sc.broadcast(us)
for i in range(ITERATIONS):
ms = sc.parallelize(range(M), partitions) \
.map(lambda x: update(x, usb.value, Rb.value)) \
.collect()
# collect() returns a list, so array ends up being
# a 3-d array, we take the first 2 dims for the matrix
ms = matrix(np.array(ms)[:, :, 0])
msb = sc.broadcast(ms)
us = sc.parallelize(range(U), partitions) \
.map(lambda x: update(x, msb.value, Rb.value.T)) \
.collect()
us = matrix(np.array(us)[:, :, 0])
usb = sc.broadcast(us)
error = rmse(R, ms, us)
print("Iteration %d:" % i)
print("\nRMSE: %5.4f\n" % error)
spark.stop()
| apache-2.0 |
seem-sky/kbengine | kbe/res/scripts/common/Lib/lib2to3/tests/test_refactor.py | 91 | 11847 | """
Unit tests for refactor.py.
"""
from __future__ import with_statement
import sys
import os
import codecs
import operator
import io
import tempfile
import shutil
import unittest
import warnings
from lib2to3 import refactor, pygram, fixer_base
from lib2to3.pgen2 import token
from . import support
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
FIXER_DIR = os.path.join(TEST_DATA_DIR, "fixers")
sys.path.append(FIXER_DIR)
try:
_DEFAULT_FIXERS = refactor.get_fixers_from_package("myfixes")
finally:
sys.path.pop()
_2TO3_FIXERS = refactor.get_fixers_from_package("lib2to3.fixes")
class TestRefactoringTool(unittest.TestCase):
def setUp(self):
sys.path.append(FIXER_DIR)
def tearDown(self):
sys.path.pop()
def check_instances(self, instances, classes):
for inst, cls in zip(instances, classes):
if not isinstance(inst, cls):
self.fail("%s are not instances of %s" % instances, classes)
def rt(self, options=None, fixers=_DEFAULT_FIXERS, explicit=None):
return refactor.RefactoringTool(fixers, options, explicit)
def test_print_function_option(self):
rt = self.rt({"print_function" : True})
self.assertIs(rt.grammar, pygram.python_grammar_no_print_statement)
self.assertIs(rt.driver.grammar,
pygram.python_grammar_no_print_statement)
def test_write_unchanged_files_option(self):
rt = self.rt()
self.assertFalse(rt.write_unchanged_files)
rt = self.rt({"write_unchanged_files" : True})
self.assertTrue(rt.write_unchanged_files)
def test_fixer_loading_helpers(self):
contents = ["explicit", "first", "last", "parrot", "preorder"]
non_prefixed = refactor.get_all_fix_names("myfixes")
prefixed = refactor.get_all_fix_names("myfixes", False)
full_names = refactor.get_fixers_from_package("myfixes")
self.assertEqual(prefixed, ["fix_" + name for name in contents])
self.assertEqual(non_prefixed, contents)
self.assertEqual(full_names,
["myfixes.fix_" + name for name in contents])
def test_detect_future_features(self):
run = refactor._detect_future_features
fs = frozenset
empty = fs()
self.assertEqual(run(""), empty)
self.assertEqual(run("from __future__ import print_function"),
fs(("print_function",)))
self.assertEqual(run("from __future__ import generators"),
fs(("generators",)))
self.assertEqual(run("from __future__ import generators, feature"),
fs(("generators", "feature")))
inp = "from __future__ import generators, print_function"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp ="from __future__ import print_function, generators"
self.assertEqual(run(inp), fs(("print_function", "generators")))
inp = "from __future__ import (print_function,)"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "from __future__ import (generators, print_function)"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp = "from __future__ import (generators, nested_scopes)"
self.assertEqual(run(inp), fs(("generators", "nested_scopes")))
inp = """from __future__ import generators
from __future__ import print_function"""
self.assertEqual(run(inp), fs(("generators", "print_function")))
invalid = ("from",
"from 4",
"from x",
"from x 5",
"from x im",
"from x import",
"from x import 4",
)
for inp in invalid:
self.assertEqual(run(inp), empty)
inp = "'docstring'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "'docstring'\n'somng'\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
inp = "# comment\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "# comment\n'doc'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "class x: pass\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
def test_get_headnode_dict(self):
class NoneFix(fixer_base.BaseFix):
pass
class FileInputFix(fixer_base.BaseFix):
PATTERN = "file_input< any * >"
class SimpleFix(fixer_base.BaseFix):
PATTERN = "'name'"
no_head = NoneFix({}, [])
with_head = FileInputFix({}, [])
simple = SimpleFix({}, [])
d = refactor._get_headnode_dict([no_head, with_head, simple])
top_fixes = d.pop(pygram.python_symbols.file_input)
self.assertEqual(top_fixes, [with_head, no_head])
name_fixes = d.pop(token.NAME)
self.assertEqual(name_fixes, [simple, no_head])
for fixes in d.values():
self.assertEqual(fixes, [no_head])
def test_fixer_loading(self):
from myfixes.fix_first import FixFirst
from myfixes.fix_last import FixLast
from myfixes.fix_parrot import FixParrot
from myfixes.fix_preorder import FixPreorder
rt = self.rt()
pre, post = rt.get_fixers()
self.check_instances(pre, [FixPreorder])
self.check_instances(post, [FixFirst, FixParrot, FixLast])
def test_naughty_fixers(self):
self.assertRaises(ImportError, self.rt, fixers=["not_here"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["no_fixer_cls"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["bad_order"])
def test_refactor_string(self):
rt = self.rt()
input = "def parrot(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertNotEqual(str(tree), input)
input = "def f(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertEqual(str(tree), input)
def test_refactor_stdin(self):
class MyRT(refactor.RefactoringTool):
def print_output(self, old_text, new_text, filename, equal):
results.extend([old_text, new_text, filename, equal])
results = []
rt = MyRT(_DEFAULT_FIXERS)
save = sys.stdin
sys.stdin = io.StringIO("def parrot(): pass\n\n")
try:
rt.refactor_stdin()
finally:
sys.stdin = save
expected = ["def parrot(): pass\n\n",
"def cheese(): pass\n\n",
"<stdin>", False]
self.assertEqual(results, expected)
def check_file_refactoring(self, test_file, fixers=_2TO3_FIXERS,
options=None, mock_log_debug=None,
actually_write=True):
tmpdir = tempfile.mkdtemp(prefix="2to3-test_refactor")
self.addCleanup(shutil.rmtree, tmpdir)
# make a copy of the tested file that we can write to
shutil.copy(test_file, tmpdir)
test_file = os.path.join(tmpdir, os.path.basename(test_file))
os.chmod(test_file, 0o644)
def read_file():
with open(test_file, "rb") as fp:
return fp.read()
old_contents = read_file()
rt = self.rt(fixers=fixers, options=options)
if mock_log_debug:
rt.log_debug = mock_log_debug
rt.refactor_file(test_file)
self.assertEqual(old_contents, read_file())
if not actually_write:
return
rt.refactor_file(test_file, True)
new_contents = read_file()
self.assertNotEqual(old_contents, new_contents)
return new_contents
def test_refactor_file(self):
test_file = os.path.join(FIXER_DIR, "parrot_example.py")
self.check_file_refactoring(test_file, _DEFAULT_FIXERS)
def test_refactor_file_write_unchanged_file(self):
test_file = os.path.join(FIXER_DIR, "parrot_example.py")
debug_messages = []
def recording_log_debug(msg, *args):
debug_messages.append(msg % args)
self.check_file_refactoring(test_file, fixers=(),
options={"write_unchanged_files": True},
mock_log_debug=recording_log_debug,
actually_write=False)
# Testing that it logged this message when write=False was passed is
# sufficient to see that it did not bail early after "No changes".
message_regex = r"Not writing changes to .*%s%s" % (
os.sep, os.path.basename(test_file))
for message in debug_messages:
if "Not writing changes" in message:
self.assertRegex(message, message_regex)
break
else:
self.fail("%r not matched in %r" % (message_regex, debug_messages))
def test_refactor_dir(self):
def check(structure, expected):
def mock_refactor_file(self, f, *args):
got.append(f)
save_func = refactor.RefactoringTool.refactor_file
refactor.RefactoringTool.refactor_file = mock_refactor_file
rt = self.rt()
got = []
dir = tempfile.mkdtemp(prefix="2to3-test_refactor")
try:
os.mkdir(os.path.join(dir, "a_dir"))
for fn in structure:
open(os.path.join(dir, fn), "wb").close()
rt.refactor_dir(dir)
finally:
refactor.RefactoringTool.refactor_file = save_func
shutil.rmtree(dir)
self.assertEqual(got,
[os.path.join(dir, path) for path in expected])
check([], [])
tree = ["nothing",
"hi.py",
".dumb",
".after.py",
"notpy.npy",
"sappy"]
expected = ["hi.py"]
check(tree, expected)
tree = ["hi.py",
os.path.join("a_dir", "stuff.py")]
check(tree, tree)
def test_file_encoding(self):
fn = os.path.join(TEST_DATA_DIR, "different_encoding.py")
self.check_file_refactoring(fn)
def test_false_file_encoding(self):
fn = os.path.join(TEST_DATA_DIR, "false_encoding.py")
data = self.check_file_refactoring(fn)
def test_bom(self):
fn = os.path.join(TEST_DATA_DIR, "bom.py")
data = self.check_file_refactoring(fn)
self.assertTrue(data.startswith(codecs.BOM_UTF8))
def test_crlf_newlines(self):
old_sep = os.linesep
os.linesep = "\r\n"
try:
fn = os.path.join(TEST_DATA_DIR, "crlf.py")
fixes = refactor.get_fixers_from_package("lib2to3.fixes")
self.check_file_refactoring(fn, fixes)
finally:
os.linesep = old_sep
def test_refactor_docstring(self):
rt = self.rt()
doc = """
>>> example()
42
"""
out = rt.refactor_docstring(doc, "<test>")
self.assertEqual(out, doc)
doc = """
>>> def parrot():
... return 43
"""
out = rt.refactor_docstring(doc, "<test>")
self.assertNotEqual(out, doc)
def test_explicit(self):
from myfixes.fix_explicit import FixExplicit
rt = self.rt(fixers=["myfixes.fix_explicit"])
self.assertEqual(len(rt.post_order), 0)
rt = self.rt(explicit=["myfixes.fix_explicit"])
for fix in rt.post_order:
if isinstance(fix, FixExplicit):
break
else:
self.fail("explicit fixer not loaded")
| lgpl-3.0 |
hmcmooc/muddx-platform | common/test/acceptance/tests/video/test_video_times.py | 1 | 7329 | """
Acceptance tests for Video Times(Start, End and Finish) functionality.
"""
from .test_video_module import VideoBaseTest
class VideoTimesTest(VideoBaseTest):
""" Test Video Player Times """
def setUp(self):
super(VideoTimesTest, self).setUp()
def test_video_start_time(self):
"""
Scenario: Start time works for Youtube video
Given we have a video in "Youtube" mode with start_time set to 00:00:10
And I see video slider at "0:10" position
And I click video button "play"
Then video starts playing at or after start_time(00:00:10)
"""
data = {'start_time': '00:00:10'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.assertEqual(self.video.position(), '0:10')
self.video.click_player_button('play')
self.assertGreaterEqual(int(self.video.position().split(':')[1]), 10)
def test_video_end_time_with_default_start_time(self):
"""
Scenario: End time works for Youtube video if starts playing from beginning.
Given we have a video in "Youtube" mode with end time set to 00:00:02
And I click video button "play"
And I wait until video stop playing
Then I see video slider at "0:02" position
"""
data = {'end_time': '00:00:02'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.click_player_button('play')
# wait until video stop playing
self.video.wait_for_state('pause')
self.assertEqual(self.video.position(), '0:02')
def test_video_end_time_wo_default_start_time(self):
"""
Scenario: End time works for Youtube video if starts playing from between.
Given we have a video in "Youtube" mode with end time set to 00:01:00
And I seek video to "0:55" position
And I click video button "play"
And I wait until video stop playing
Then I see video slider at "1:00" position
"""
data = {'end_time': '00:01:00'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.seek('0:55')
self.video.click_player_button('play')
# wait until video stop playing
self.video.wait_for_state('pause')
self.assertEqual(self.video.position(), '1:00')
def test_video_start_time_and_end_time(self):
"""
Scenario: Start time and end time work together for Youtube video.
Given we a video in "Youtube" mode with start time set to 00:00:10 and end_time set to 00:00:12
And I see video slider at "0:10" position
And I click video button "play"
Then I wait until video stop playing
Then I see video slider at "0:12" position
"""
data = {'start_time': '00:00:10', 'end_time': '00:00:12'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.assertEqual(self.video.position(), '0:10')
self.video.click_player_button('play')
# wait until video stop playing
self.video.wait_for_state('pause')
self.assertEqual(self.video.position(), '0:12')
def test_video_end_time_and_finish_time(self):
"""
Scenario: Youtube video works after pausing at end time and then plays again from End Time to the end.
Given we have a video in "Youtube" mode with start time set to 00:01:41 and end_time set to 00:01:42
And I click video button "play"
And I wait until video stop playing
Then I see video slider at "1:42" position
And I click video button "play"
And I wait until video stop playing
Then I see video slider at "1:54" position
# NOTE: The above video duration(1:54) is disputed because
# 1. Our Video Player first shows Video Duration equals to 1 minute and 56 sec and then 1 minute and 54 sec
# 2 YouTube first shows duration of 1 minute and 56 seconds and then changes duration to 1 minute and 55 sec
#
# The 1:56 time is the duration from metadata. 1:54 time is the duration reported by the video API once
# the video starts playing. BUT sometime video API gives duration equals 1 minute and 55 second.
"""
data = {'start_time': '00:01:41', 'end_time': '00:01:42'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.click_player_button('play')
# wait until video stop playing
self.video.wait_for_state('pause')
self.assertEqual(self.video.position(), '1:42')
self.video.click_player_button('play')
# wait until video stop playing
self.video.wait_for_state('finished')
self.assertIn(self.video.position(), ['1:54', '1:55'])
def test_video_end_time_with_seek(self):
"""
Scenario: End Time works for Youtube Video if starts playing before Start Time.
Given we have a video in "Youtube" mode with end-time at 0:32 and start-time at 0:30
And I seek video to "0:28" position
And I click video button "play"
And I wait until video stop playing
Then I see video slider at "0:32" position
"""
data = {'start_time': '00:00:30', 'end_time': '00:00:32'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.seek('0:28')
self.video.click_player_button('play')
# wait until video stop playing
self.video.wait_for_state('pause')
self.assertEqual(self.video.position(), '0:32')
def test_video_finish_time_with_seek(self):
"""
Scenario: Finish Time works for Youtube video.
Given it has a video in "Youtube" mode with end-time at 1:00, the video starts playing from 1:42
And I seek video to "1:42" position
And I click video button "play"
And I wait until video stop playing
Then I see video slider at "1:54" position
# NOTE: The above video duration(1:54) is disputed because
# 1. Our Video Player first shows Video Duration equals to 1 minute and 56 sec and then 1 minute and 54 sec
# 2 YouTube first shows duration of 1 minute and 56 seconds and then changes duration to 1 minute and 55 sec
#
# The 1:56 time is the duration from metadata. 1:54 time is the duration reported by the video API once
# the video starts playing. BUT sometime video API gives duration equals 1 minute and 55 second.
"""
data = {'end_time': '00:01:00'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.seek('1:42')
self.video.click_player_button('play')
# wait until video stop playing
self.video.wait_for_state('finished')
self.assertIn(self.video.position(), ['1:54', '1:55'])
| agpl-3.0 |
OrangutanGaming/OG_SelfBot | cogs/eval.py | 1 | 3925 | import discord
from discord.ext import commands
import inspect
import io
from contextlib import redirect_stdout
import textwrap, traceback
import argparse
import cogs.utils.formatting as formatting
from sympy import *
import sys
import mpmath
sys.modules["sympy.mpmath"] = mpmath
class Eval():
def __init__(self, bot):
self.bot = bot
self._last_result = None
def get_syntax_error(self, e):
if e.text is None:
return "```py\n{0.__class__.__name__}: {0}\n```".format(e)
return "```py\n{0.text}{1:>{0.offset}}\n{2}: {0}```".format(e, "^", type(e).__name__)
@commands.command(aliases=["ev"])
async def eval(self, ctx, *, code : str):
original = ctx.message
code = code.strip("`")
python = "{}"
result = None
env = {
"bot": self.bot,
"ctx": ctx,
"message": ctx.message,
"guild": ctx.message.guild,
"channel": ctx.message.channel,
"author": ctx.message.author
}
env.update(globals())
try:
result = eval(code, env)
if inspect.isawaitable(result):
result = await result
except Exception as e:
await ctx.channel.send(python.format(type(e).__name__ + ": " + str(e)))
await ctx.message.delete()
return
lines = ["```py"]
lines.append(f">>> {code}")
lines.append(">>> {}".format(python.format(result)))
lines.append("```")
await original.edit(content="\n".join(lines))
@commands.command(name="exec", aliases = ["ex", "exed"])
async def _exec(self, ctx, *, body: str):
if "*****" in body:
return
env = {
"bot": self.bot,
"ctx": ctx,
"channel": ctx.channel,
"author": ctx.author,
"server": ctx.guild,
"message": ctx.message,
"_": self._last_result
}
env.update(globals())
body = formatting.cleanup_code(body)
stdout = io.StringIO()
to_compile = "async def func():\n{}".format(textwrap.indent(body, " "))
try:
exec(to_compile, env)
except SyntaxError as e:
return await ctx.send(self.get_syntax_error(e))
func = env["func"]
try:
with redirect_stdout(stdout):
ret = await func()
except Exception as e:
value = stdout.getvalue()
await ctx.send(self.bot.blank + "```py\n{}{}\n```".format(value, traceback.format_exc()))
else:
value = stdout.getvalue()
if ret is None:
if value:
await ctx.send(self.bot.blank + "```py\n%s\n```" % value)
else:
self._last_result = ret
await ctx.send(self.bot.blank + "```py\n%s%s\n```" % (value, ret))
@commands.command(aliases=["olve"])
async def solve(self, ctx, *, equation: str):
x, y, z = symbols("x y z")
Return = equation.split("` ")[1].replace(" ", "")
equation = equation.split("` ")[0].replace("`", "").replace(" = ", " - ").replace("=", "-")
result = solve(equation, Return)
lines = ["```py"]
lines.append(">>> {}".format(equation.replace("-", "=")))
lines.append(f">>> {result}")
lines.append("```")
await ctx.message.edit(content="\n".join(lines))
@commands.command()
async def argtest(self, ctx):
parser = argparse.ArgumentParser()
parser.add_argument("square", help="display a square of a given number",
type=int)
args = parser.parse_args()
await ctx.send(args.square ** 2)
@commands.command()
async def content(self, ctx):
await ctx.send(ctx.message.content)
def setup(bot):
bot.add_cog(Eval(bot)) | mit |