repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bruce3557/NTHUOJ_web | problem/admin.py | 4 | 1385 | '''
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.contrib import admin
from problem.models import Problem, Testcase, Submission, SubmissionDetail, Tag
# Register your models here.
admin.site.register(Problem)
admin.site.register(Testcase)
admin.site.register(Submission)
admin.site.register(SubmissionDetail)
admin.site.register(Tag)
| mit |
ProjexSoftware/projexui | projexui/widgets/xquerybuilderwidget/xquerybuilderwidget.py | 2 | 9247 | #!/usr/bin/python
""" Defines an interface to allow users to build their queries on the fly. """
# define authorship information
__authors__ = ['Eric Hulser']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011, Projex Software'
__license__ = 'LGPL'
# maintanence information
__maintainer__ = 'Projex Software'
__email__ = '[email protected]'
#------------------------------------------------------------------------------
from projex.text import nativestring
from projexui.qt import Signal
from projexui.qt.QtCore import Qt
from projexui.qt.QtGui import QWidget,\
QVBoxLayout
import projexui
from projexui.widgets.xquerybuilderwidget.xqueryrule \
import XQueryRule
from projexui.widgets.xquerybuilderwidget.xquerylinewidget \
import XQueryLineWidget
class XQueryBuilderWidget(QWidget):
""" """
saveRequested = Signal()
resetRequested = Signal()
cancelRequested = Signal()
def __init__( self, parent = None ):
super(XQueryBuilderWidget, self).__init__( parent )
# load the user interface
projexui.loadUi(__file__, self)
self.setMinimumWidth(470)
# define custom properties
self._rules = {}
self._defaultQuery = []
self._completionTerms = []
self._minimumCount = 1
# set default properties
self._container = QWidget(self)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(2)
layout.addStretch(1)
self._container.setLayout(layout)
self.uiQueryAREA.setWidget(self._container)
# create connections
self.uiResetBTN.clicked.connect( self.emitResetRequested )
self.uiSaveBTN.clicked.connect( self.emitSaveRequested )
self.uiCancelBTN.clicked.connect( self.emitCancelRequested )
self.resetRequested.connect( self.reset )
def addLineWidget( self, query = None ):
"""
Adds a new line widget to the system with the given values.
:param query | (<str> term, <str> operator, <str> vlaue) || None
"""
widget = XQueryLineWidget(self)
widget.setTerms(sorted(self._rules.keys()))
widget.setQuery(query)
index = self._container.layout().count() - 1
self._container.layout().insertWidget(index, widget)
widget.addRequested.connect( self.addLineWidget )
widget.removeRequested.connect( self.removeLineWidget )
# update the remove enabled options for these widgets
self.updateRemoveEnabled()
def addRule( self, rule ):
"""
Adds a rule to the system.
:param rule | <XQueryRule>
"""
self._rules[rule.term()] = rule
self.updateRules()
def clear( self ):
"""
Clears out all the widgets from the system.
"""
for lineWidget in self.lineWidgets():
lineWidget.setParent(None)
lineWidget.deleteLater()
def completionTerms( self ):
"""
Returns the list of terms that will be used as a global override
for completion terms when the query rule generates a QLineEdit instance.
:return [<str>, ..]
"""
return self._completionTerms
def count( self ):
"""
Returns the count of the line widgets in the system.
:return <int>
"""
return len(self.lineWidgets())
def currentQuery( self ):
"""
Returns the current query string for this widget.
:return [(<str> term, <str> operator, <str> value), ..]
"""
widgets = self.lineWidgets()
output = []
for widget in widgets:
output.append(widget.query())
return output
def defaultQuery( self ):
"""
Returns the default query for the system.
:return [(<str> term, <str> operator, <str> value), ..]
"""
return self._defaultQuery
def keyPressEvent( self, event ):
"""
Emits the save requested signal for this builder for when the enter
or return press is clicked.
:param event | <QKeyEvent>
"""
if ( event.key() in (Qt.Key_Enter, Qt.Key_Return) ):
self.emitSaveRequested()
super(XQueryBuilderWidget, self).keyPressEvent(event)
def emitCancelRequested( self ):
"""
Emits the cancel requested signal.
"""
if ( not self.signalsBlocked() ):
self.cancelRequested.emit()
def emitResetRequested( self ):
"""
Emits the reste requested signal.
"""
if ( not self.signalsBlocked() ):
self.resetRequested.emit()
def emitSaveRequested( self ):
"""
Emits the save requested signal.
"""
if ( not self.signalsBlocked() ):
self.saveRequested.emit()
def findRule( self, term ):
"""
Looks up a rule by the inputed term.
:param term | <str>
:return <XQueryRule> || None
"""
return self._rules.get(nativestring(term))
def removeLineWidget( self, widget ):
"""
Removes the line widget from the query.
:param widget | <XQueryLineWidget>
"""
widget.setParent(None)
widget.deleteLater()
self.updateRemoveEnabled()
def minimumCount( self ):
"""
Defines the minimum number of query widgets that are allowed.
:return <int>
"""
return self._minimumCount
def lineWidgets( self ):
"""
Returns a list of line widgets for this system.
:return [<XQueryLineWidget>, ..]
"""
return self.findChildren(XQueryLineWidget)
def reset( self ):
"""
Resets the system to the default query.
"""
self.setCurrentQuery(self.defaultQuery())
def setCompletionTerms( self, terms ):
"""
Sets the list of terms that will be used as a global override
for completion terms when the query rule generates a QLineEdit instance.
:param terms | [<str>, ..]
"""
self._completionTerms = terms
def setCurrentQuery( self, query ):
"""
Sets the query for this system to the inputed query.
:param query | [(<str> term, <str> operator, <str> value), ..]
"""
self.clear()
for entry in query:
self.addLineWidget(entry)
# make sure we have the minimum number of widgets
for i in range(self.minimumCount() - len(query)):
self.addLineWidget()
def setDefaultQuery( self, query ):
"""
Sets the default query that will be used when the user clicks on the \
reset button or the reset method is called.
:param query | [(<str> term, <str> operator, <str> value), ..]
"""
self._defaultQuery = query[:]
def setMinimumCount( self, count ):
"""
Sets the minimum number of line widgets that are allowed at any \
given time.
:param count | <int>
"""
self._minimumCount = count
def setRules( self, rules ):
"""
Sets all the rules for this builder.
:param rules | [<XQueryRule>, ..]
"""
if ( type(rules) in (list, tuple) ):
self._rules = dict([(x.term(), x) for x in rules])
self.updateRules()
return True
elif ( type(rules) == dict ):
self._rules = rules.copy()
self.updateRules()
return True
else:
return False
def setTerms( self, terms ):
"""
Sets a simple rule list by accepting a list of strings for terms. \
This is a convenience method for the setRules method.
:param rules | [<str> term, ..]
"""
return self.setRules([XQueryRule(term = term) for term in terms])
def updateRemoveEnabled( self ):
"""
Updates the remove enabled baesd on the current number of line widgets.
"""
lineWidgets = self.lineWidgets()
count = len(lineWidgets)
state = self.minimumCount() < count
for widget in lineWidgets:
widget.setRemoveEnabled(state)
def updateRules( self ):
"""
Updates the query line items to match the latest rule options.
"""
terms = sorted(self._rules.keys())
for child in self.lineWidgets():
child.setTerms(terms) | lgpl-3.0 |
diagramsoftware/odoo | addons/analytic_contract_hr_expense/analytic_contract_hr_expense.py | 223 | 7860 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons.decimal_precision import decimal_precision as dp
class account_analytic_account(osv.osv):
_name = "account.analytic.account"
_inherit = "account.analytic.account"
def _get_total_estimation(self, account):
tot_est = super(account_analytic_account, self)._get_total_estimation(account)
if account.charge_expenses:
tot_est += account.est_expenses
return tot_est
def _get_total_invoiced(self, account):
total_invoiced = super(account_analytic_account, self)._get_total_invoiced(account)
if account.charge_expenses:
total_invoiced += account.expense_invoiced
return total_invoiced
def _get_total_remaining(self, account):
total_remaining = super(account_analytic_account, self)._get_total_remaining(account)
if account.charge_expenses:
total_remaining += account.remaining_expense
return total_remaining
def _get_total_toinvoice(self, account):
total_toinvoice = super(account_analytic_account, self)._get_total_toinvoice(account)
if account.charge_expenses:
total_toinvoice += account.expense_to_invoice
return total_toinvoice
def _remaining_expnse_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.est_expenses != 0:
res[account.id] = max(account.est_expenses - account.expense_invoiced, account.expense_to_invoice)
else:
res[account.id]=0.0
return res
def _expense_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
#We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for account in self.pool.get('account.analytic.account').browse(cr, uid, ids, context=context):
cr.execute("""
SELECT product_id, sum(amount), user_id, to_invoice, sum(unit_amount), product_uom_id, line.name
FROM account_analytic_line line
LEFT JOIN account_analytic_journal journal ON (journal.id = line.journal_id)
WHERE account_id = %s
AND journal.type = 'purchase'
AND invoice_id IS NULL
AND to_invoice IS NOT NULL
GROUP BY product_id, user_id, to_invoice, product_uom_id, line.name""", (account.id,))
res[account.id] = 0.0
for product_id, total_amount, user_id, factor_id, qty, uom, line_name in cr.fetchall():
#the amount to reinvoice is the real cost. We don't use the pricelist
total_amount = -total_amount
factor = self.pool.get('hr_timesheet_invoice.factor').browse(cr, uid, factor_id, context=context)
res[account.id] += total_amount * (100 - factor.factor or 0.0) / 100.0
return res
def _expense_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
lines_obj = self.pool.get('account.analytic.line')
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
line_ids = lines_obj.search(cr, uid, [('account_id','=', account.id), ('invoice_id','!=',False), ('to_invoice','!=', False), ('journal_id.type', '=', 'purchase')], context=context)
#Put invoices in separate array in order not to calculate them double
invoices = []
for line in lines_obj.browse(cr, uid, line_ids, context=context):
if line.invoice_id not in invoices:
invoices.append(line.invoice_id)
for invoice in invoices:
res[account.id] += invoice.amount_untaxed
return res
def _ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
result = super(account_analytic_account, self)._ca_invoiced_calc(cr, uid, ids, name, arg, context=context)
for acc in self.browse(cr, uid, result.keys(), context=context):
result[acc.id] = result[acc.id] - (acc.expense_invoiced or 0.0)
return result
_columns = {
'charge_expenses' : fields.boolean('Charge Expenses'),
'expense_invoiced' : fields.function(_expense_invoiced_calc, type="float"),
'expense_to_invoice' : fields.function(_expense_to_invoice_calc, type='float'),
'remaining_expense' : fields.function(_remaining_expnse_calc, type="float"),
'est_expenses': fields.float('Estimation of Expenses to Invoice'),
'ca_invoiced': fields.function(_ca_invoiced_calc, type='float', string='Invoiced Amount',
help="Total customer invoiced amount for this account.",
digits_compute=dp.get_precision('Account')),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['charge_expenses'] = template.charge_expenses
res['value']['est_expenses'] = template.est_expenses
return res
def open_hr_expense(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
dummy, act_window_id = mod_obj.get_object_reference(cr, uid, 'hr_expense', 'expense_all')
result = act_obj.read(cr, uid, [act_window_id], context=context)[0]
line_ids = self.pool.get('hr.expense.line').search(cr,uid,[('analytic_account', 'in', ids)])
result['domain'] = [('line_ids', 'in', line_ids)]
names = [account.name for account in self.browse(cr, uid, ids, context=context)]
result['name'] = _('Expenses of %s') % ','.join(names)
result['context'] = {'analytic_account': ids[0]}
result['view_type'] = 'form'
return result
def hr_to_invoice_expense(self, cr, uid, ids, context=None):
domain = [('invoice_id','=',False),('to_invoice','!=',False), ('journal_id.type', '=', 'purchase'), ('account_id', 'in', ids)]
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Expenses to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'domain' : domain,
'res_model': 'account.analytic.line',
'nodestroy': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
maverickYQB/mqtt_zway | test/test_main_class.py | 1 | 2397 | #!/usr/bin/env python
'''
Created on Mars 20 2016
@author: popotvin
'''
import mqtt_zway_test
import mqtt_zway
import paho.mqtt.client as mqtt
import time
import traceback
date_time = mqtt_zway_test.date_time
# Main variables
mqtt_old_payload = []
mqtt_new_payload = []
payload = {}
publish_string = ""
# MQTT config
outgoing_topic = mqtt_zway_test.outgoing_topic
ongoing_topic = mqtt_zway_test.ongoing_topic
mqtt_ip = mqtt_zway_test.mqtt_ip
mqtt_port = mqtt_zway_test.mqtt_port
mqtt_client = mqtt_zway_test.mqtt_client
# ZWAY config
zway_ip = mqtt_zway_test.zway_ip
zway_port = mqtt_zway_test.zway_port
# list of connected devices on the zway server (device_id, device type, device level value)
zway_devList = mqtt_zway.zway_devList(zway_ip,zway_port)
# MQTT Client init
mqttc = mqtt.Client(str(mqtt_client))
mqttc.on_subscribe = mqtt_zway_test.on_subscribe
mqttc.on_message = mqtt_zway_test.on_message
mqttc.on_connect = mqtt_zway_test.on_connect
mqttc.connect(mqtt_ip, mqtt_port)
# Test zway and MQTT servers
zway_test = mqtt_zway.server_test(zway_ip, zway_port)
mqtt_test = mqtt_zway.server_test(mqtt_ip, mqtt_port)
# Main loop
if zway_test and mqtt_test:
print "ZWAY is running at: %s"% str(date_time)
print "MQTT is running at: %s"% str(date_time)
while True:
try:
mqttc.loop()
for key, value in zway_devList.dev_dict().iteritems():
for i,j in value.iteritems():
if i == "id":
dev_id = j
elif i == "type":
dev_type = j
zway_devList.dev_get(dev_id, dev_type)
payload["device_id"] = str(dev_id)
payload["type"] = str(dev_type)
payload["value"] = zway_devList.dev_value(dev_id, dev_type)
mqtt_new_payload.append(dict(payload))
time.sleep(0.1)
if mqtt_old_payload != mqtt_new_payload:
mqttc.publish(outgoing_topic, str(mqtt_new_payload))
#print "published to mQTT: %s" % mqtt_new_payload
mqtt_old_payload = mqtt_new_payload
mqtt_new_payload = []
time.sleep(0.5)
except Exception, e:
print traceback.print_exc()
break
elif not zway_test:
print "ZWAY server is offline"
elif not mqtt_test:
print "MQTT server is Offline"
| gpl-3.0 |
j4/horizon | openstack_dashboard/urls.py | 56 | 1979 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
URL patterns for the OpenStack Dashboard.
"""
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls.static import static # noqa
from django.conf.urls import url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns # noqa
import horizon
urlpatterns = patterns(
'',
url(r'^$', 'openstack_dashboard.views.splash', name='splash'),
url(r'^api/', include('openstack_dashboard.api.rest.urls')),
url(r'', include(horizon.urls)),
)
for u in getattr(settings, 'AUTHENTICATION_URLS', ['openstack_auth.urls']):
urlpatterns += patterns(
'',
url(r'^auth/', include(u))
)
# Development static app and project media serving using the staticfiles app.
urlpatterns += staticfiles_urlpatterns()
# Convenience function for serving user-uploaded media during
# development. Only active if DEBUG==True and the URL prefix is a local
# path. Production media should NOT be served by Django.
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += patterns(
'',
url(r'^500/$', 'django.views.defaults.server_error')
)
| apache-2.0 |
tylerclair/py3canvas | py3canvas/apis/modules.py | 1 | 54047 | """Modules API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class ModulesAPI(BaseCanvasAPI):
"""Modules API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for ModulesAPI."""
super(ModulesAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.ModulesAPI")
def list_modules(self, course_id, include=None, search_term=None, student_id=None):
"""
List modules.
List the modules in a course
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - include
"""- "items": Return module items inline if possible.
This parameter suggests that Canvas return module items directly
in the Module object JSON, to avoid having to make separate API
requests for each module when enumerating modules and items. Canvas
is free to omit 'items' for any particular module if it deems them
too numerous to return inline. Callers must be prepared to use the
{api:ContextModuleItemsApiController#index List Module Items API}
if items are not returned.
- "content_details": Requires include['items']. Returns additional
details with module items specific to their associated content items.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["items", "content_details"])
params["include"] = include
# OPTIONAL - search_term
"""The partial name of the modules (and module items, if include['items'] is
specified) to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules".format(**path), data=data, params=params, all_pages=True)
def show_module(self, id, course_id, include=None, student_id=None):
"""
Show module.
Get information about a single module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - include
"""- "items": Return module items inline if possible.
This parameter suggests that Canvas return module items directly
in the Module object JSON, to avoid having to make separate API
requests for each module when enumerating modules and items. Canvas
is free to omit 'items' for any particular module if it deems them
too numerous to return inline. Callers must be prepared to use the
{api:ContextModuleItemsApiController#index List Module Items API}
if items are not returned.
- "content_details": Requires include['items']. Returns additional
details with module items specific to their associated content items.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["items", "content_details"])
params["include"] = include
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules/{id}".format(**path), data=data, params=params, single_item=True)
def create_module(self, course_id, module_name, module_position=None, module_prerequisite_module_ids=None, module_publish_final_grade=None, module_require_sequential_progress=None, module_unlock_at=None):
"""
Create a module.
Create and return a new module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - module[name]
"""The name of the module"""
data["module[name]"] = module_name
# OPTIONAL - module[unlock_at]
"""The date the module will unlock"""
if module_unlock_at is not None:
if issubclass(module_unlock_at.__class__, str):
module_unlock_at = self._validate_iso8601_string(module_unlock_at)
elif issubclass(module_unlock_at.__class__, date) or issubclass(module_unlock_at.__class__, datetime):
module_unlock_at = module_unlock_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["module[unlock_at]"] = module_unlock_at
# OPTIONAL - module[position]
"""The position of this module in the course (1-based)"""
if module_position is not None:
data["module[position]"] = module_position
# OPTIONAL - module[require_sequential_progress]
"""Whether module items must be unlocked in order"""
if module_require_sequential_progress is not None:
data["module[require_sequential_progress]"] = module_require_sequential_progress
# OPTIONAL - module[prerequisite_module_ids]
"""IDs of Modules that must be completed before this one is unlocked.
Prerequisite modules must precede this module (i.e. have a lower position
value), otherwise they will be ignored"""
if module_prerequisite_module_ids is not None:
data["module[prerequisite_module_ids]"] = module_prerequisite_module_ids
# OPTIONAL - module[publish_final_grade]
"""Whether to publish the student's final grade for the course upon
completion of this module."""
if module_publish_final_grade is not None:
data["module[publish_final_grade]"] = module_publish_final_grade
self.logger.debug("POST /api/v1/courses/{course_id}/modules with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules".format(**path), data=data, params=params, single_item=True)
def update_module(self, id, course_id, module_name=None, module_position=None, module_prerequisite_module_ids=None, module_publish_final_grade=None, module_published=None, module_require_sequential_progress=None, module_unlock_at=None):
"""
Update a module.
Update and return an existing module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - module[name]
"""The name of the module"""
if module_name is not None:
data["module[name]"] = module_name
# OPTIONAL - module[unlock_at]
"""The date the module will unlock"""
if module_unlock_at is not None:
if issubclass(module_unlock_at.__class__, str):
module_unlock_at = self._validate_iso8601_string(module_unlock_at)
elif issubclass(module_unlock_at.__class__, date) or issubclass(module_unlock_at.__class__, datetime):
module_unlock_at = module_unlock_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["module[unlock_at]"] = module_unlock_at
# OPTIONAL - module[position]
"""The position of the module in the course (1-based)"""
if module_position is not None:
data["module[position]"] = module_position
# OPTIONAL - module[require_sequential_progress]
"""Whether module items must be unlocked in order"""
if module_require_sequential_progress is not None:
data["module[require_sequential_progress]"] = module_require_sequential_progress
# OPTIONAL - module[prerequisite_module_ids]
"""IDs of Modules that must be completed before this one is unlocked
Prerequisite modules must precede this module (i.e. have a lower position
value), otherwise they will be ignored"""
if module_prerequisite_module_ids is not None:
data["module[prerequisite_module_ids]"] = module_prerequisite_module_ids
# OPTIONAL - module[publish_final_grade]
"""Whether to publish the student's final grade for the course upon
completion of this module."""
if module_publish_final_grade is not None:
data["module[publish_final_grade]"] = module_publish_final_grade
# OPTIONAL - module[published]
"""Whether the module is published and visible to students"""
if module_published is not None:
data["module[published]"] = module_published
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{id}".format(**path), data=data, params=params, single_item=True)
def delete_module(self, id, course_id):
"""
Delete module.
Delete a module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("DELETE /api/v1/courses/{course_id}/modules/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{course_id}/modules/{id}".format(**path), data=data, params=params, single_item=True)
def re_lock_module_progressions(self, id, course_id):
"""
Re-lock module progressions.
Resets module progressions to their default locked state and
recalculates them based on the current requirements.
Adding progression requirements to an active course will not lock students
out of modules they have already unlocked unless this action is called.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{id}/relock with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{id}/relock".format(**path), data=data, params=params, single_item=True)
def list_module_items(self, course_id, module_id, include=None, search_term=None, student_id=None):
"""
List module items.
List the items in a module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# OPTIONAL - include
"""If included, will return additional details specific to the content
associated with each item. Refer to the {api:Modules:Module%20Item Module
Item specification} for more details.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["content_details"])
params["include"] = include
# OPTIONAL - search_term
"""The partial title of the items to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules/{module_id}/items with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules/{module_id}/items".format(**path), data=data, params=params, all_pages=True)
def show_module_item(self, id, course_id, module_id, include=None, student_id=None):
"""
Show module item.
Get information about a single module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - include
"""If included, will return additional details specific to the content
associated with this item. Refer to the {api:Modules:Module%20Item Module
Item specification} for more details.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["content_details"])
params["include"] = include
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
def create_module_item(self, course_id, module_id, module_item_type, module_item_content_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_new_tab=None, module_item_page_url=None, module_item_position=None, module_item_title=None):
"""
Create a module item.
Create and return a new module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# OPTIONAL - module_item[title]
"""The name of the module item and associated content"""
if module_item_title is not None:
data["module_item[title]"] = module_item_title
# REQUIRED - module_item[type]
"""The type of content linked to the item"""
self._validate_enum(module_item_type, ["File", "Page", "Discussion", "Assignment", "Quiz", "SubHeader", "ExternalUrl", "ExternalTool"])
data["module_item[type]"] = module_item_type
# REQUIRED - module_item[content_id]
"""The id of the content to link to the module item. Required, except for
'ExternalUrl', 'Page', and 'SubHeader' types."""
data["module_item[content_id]"] = module_item_content_id
# OPTIONAL - module_item[position]
"""The position of this item in the module (1-based)."""
if module_item_position is not None:
data["module_item[position]"] = module_item_position
# OPTIONAL - module_item[indent]
"""0-based indent level; module items may be indented to show a hierarchy"""
if module_item_indent is not None:
data["module_item[indent]"] = module_item_indent
# OPTIONAL - module_item[page_url]
"""Suffix for the linked wiki page (e.g. 'front-page'). Required for 'Page'
type."""
if module_item_page_url is not None:
data["module_item[page_url]"] = module_item_page_url
# OPTIONAL - module_item[external_url]
"""External url that the item points to. [Required for 'ExternalUrl' and
'ExternalTool' types."""
if module_item_external_url is not None:
data["module_item[external_url]"] = module_item_external_url
# OPTIONAL - module_item[new_tab]
"""Whether the external tool opens in a new tab. Only applies to
'ExternalTool' type."""
if module_item_new_tab is not None:
data["module_item[new_tab]"] = module_item_new_tab
# OPTIONAL - module_item[completion_requirement][type]
"""Completion requirement for this module item.
"must_view": Applies to all item types
"must_contribute": Only applies to "Assignment", "Discussion", and "Page" types
"must_submit", "min_score": Only apply to "Assignment" and "Quiz" types
Inapplicable types will be ignored"""
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ["must_view", "must_contribute", "must_submit"])
data["module_item[completion_requirement][type]"] = module_item_completion_requirement_type
# OPTIONAL - module_item[completion_requirement][min_score]
"""Minimum score required to complete. Required for completion_requirement
type 'min_score'."""
if module_item_completion_requirement_min_score is not None:
data["module_item[completion_requirement][min_score]"] = module_item_completion_requirement_min_score
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items".format(**path), data=data, params=params, single_item=True)
def update_module_item(self, id, course_id, module_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_module_id=None, module_item_new_tab=None, module_item_position=None, module_item_published=None, module_item_title=None):
"""
Update a module item.
Update and return an existing module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - module_item[title]
"""The name of the module item"""
if module_item_title is not None:
data["module_item[title]"] = module_item_title
# OPTIONAL - module_item[position]
"""The position of this item in the module (1-based)"""
if module_item_position is not None:
data["module_item[position]"] = module_item_position
# OPTIONAL - module_item[indent]
"""0-based indent level; module items may be indented to show a hierarchy"""
if module_item_indent is not None:
data["module_item[indent]"] = module_item_indent
# OPTIONAL - module_item[external_url]
"""External url that the item points to. Only applies to 'ExternalUrl' type."""
if module_item_external_url is not None:
data["module_item[external_url]"] = module_item_external_url
# OPTIONAL - module_item[new_tab]
"""Whether the external tool opens in a new tab. Only applies to
'ExternalTool' type."""
if module_item_new_tab is not None:
data["module_item[new_tab]"] = module_item_new_tab
# OPTIONAL - module_item[completion_requirement][type]
"""Completion requirement for this module item.
"must_view": Applies to all item types
"must_contribute": Only applies to "Assignment", "Discussion", and "Page" types
"must_submit", "min_score": Only apply to "Assignment" and "Quiz" types
Inapplicable types will be ignored"""
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ["must_view", "must_contribute", "must_submit"])
data["module_item[completion_requirement][type]"] = module_item_completion_requirement_type
# OPTIONAL - module_item[completion_requirement][min_score]
"""Minimum score required to complete, Required for completion_requirement
type 'min_score'."""
if module_item_completion_requirement_min_score is not None:
data["module_item[completion_requirement][min_score]"] = module_item_completion_requirement_min_score
# OPTIONAL - module_item[published]
"""Whether the module item is published and visible to students."""
if module_item_published is not None:
data["module_item[published]"] = module_item_published
# OPTIONAL - module_item[module_id]
"""Move this item to another module by specifying the target module id here.
The target module must be in the same course."""
if module_item_module_id is not None:
data["module_item[module_id]"] = module_item_module_id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
def select_mastery_path(self, id, course_id, module_id, assignment_set_id=None, student_id=None):
"""
Select a mastery path.
Select a mastery path when module item includes several possible paths.
Requires Mastery Paths feature to be enabled. Returns a compound document
with the assignments included in the given path and any module items
related to those assignments
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - assignment_set_id
"""Assignment set chosen, as specified in the mastery_paths portion of the
context module item response"""
if assignment_set_id is not None:
data["assignment_set_id"] = assignment_set_id
# OPTIONAL - student_id
"""Which student the selection applies to. If not specified, current user is
implied."""
if student_id is not None:
data["student_id"] = student_id
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path".format(**path), data=data, params=params, no_data=True)
def delete_module_item(self, id, course_id, module_id):
"""
Delete module item.
Delete a module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("DELETE /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
def mark_module_item_as_done_not_done(self, id, course_id, module_id):
"""
Mark module item as done/not done.
Mark a module item as done/not done. Use HTTP method PUT to mark as done,
and DELETE to mark as not done.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/done with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/done".format(**path), data=data, params=params, no_data=True)
def get_module_item_sequence(self, course_id, asset_id=None, asset_type=None):
"""
Get module item sequence.
Given an asset in a course, find the ModuleItem it belongs to, and also the previous and next Module Items
in the course sequence.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - asset_type
"""The type of asset to find module sequence information for. Use the ModuleItem if it is known
(e.g., the user navigated from a module item), since this will avoid ambiguity if the asset
appears more than once in the module sequence."""
if asset_type is not None:
self._validate_enum(asset_type, ["ModuleItem", "File", "Page", "Discussion", "Assignment", "Quiz", "ExternalTool"])
params["asset_type"] = asset_type
# OPTIONAL - asset_id
"""The id of the asset (or the url in the case of a Page)"""
if asset_id is not None:
params["asset_id"] = asset_id
self.logger.debug("GET /api/v1/courses/{course_id}/module_item_sequence with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/module_item_sequence".format(**path), data=data, params=params, single_item=True)
def mark_module_item_read(self, id, course_id, module_id):
"""
Mark module item read.
Fulfills "must view" requirement for a module item. It is generally not necessary to do this explicitly,
but it is provided for applications that need to access external content directly (bypassing the html_url
redirect that normally allows Canvas to fulfill "must view" requirements).
This endpoint cannot be used to complete requirements on locked or unpublished module items.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/mark_read with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/mark_read".format(**path), data=data, params=params, no_data=True)
class Contentdetails(BaseModel):
"""Contentdetails Model."""
def __init__(self, unlock_at=None, due_at=None, points_possible=None, lock_info=None, lock_at=None, lock_explanation=None, locked_for_user=None):
"""Init method for Contentdetails class."""
self._unlock_at = unlock_at
self._due_at = due_at
self._points_possible = points_possible
self._lock_info = lock_info
self._lock_at = lock_at
self._lock_explanation = lock_explanation
self._locked_for_user = locked_for_user
self.logger = logging.getLogger('py3canvas.Contentdetails')
@property
def unlock_at(self):
"""unlock_at."""
return self._unlock_at
@unlock_at.setter
def unlock_at(self, value):
"""Setter for unlock_at property."""
self.logger.warn("Setting values on unlock_at will NOT update the remote Canvas instance.")
self._unlock_at = value
@property
def due_at(self):
"""due_at."""
return self._due_at
@due_at.setter
def due_at(self, value):
"""Setter for due_at property."""
self.logger.warn("Setting values on due_at will NOT update the remote Canvas instance.")
self._due_at = value
@property
def points_possible(self):
"""points_possible."""
return self._points_possible
@points_possible.setter
def points_possible(self, value):
"""Setter for points_possible property."""
self.logger.warn("Setting values on points_possible will NOT update the remote Canvas instance.")
self._points_possible = value
@property
def lock_info(self):
"""lock_info."""
return self._lock_info
@lock_info.setter
def lock_info(self, value):
"""Setter for lock_info property."""
self.logger.warn("Setting values on lock_info will NOT update the remote Canvas instance.")
self._lock_info = value
@property
def lock_at(self):
"""lock_at."""
return self._lock_at
@lock_at.setter
def lock_at(self, value):
"""Setter for lock_at property."""
self.logger.warn("Setting values on lock_at will NOT update the remote Canvas instance.")
self._lock_at = value
@property
def lock_explanation(self):
"""lock_explanation."""
return self._lock_explanation
@lock_explanation.setter
def lock_explanation(self, value):
"""Setter for lock_explanation property."""
self.logger.warn("Setting values on lock_explanation will NOT update the remote Canvas instance.")
self._lock_explanation = value
@property
def locked_for_user(self):
"""locked_for_user."""
return self._locked_for_user
@locked_for_user.setter
def locked_for_user(self, value):
"""Setter for locked_for_user property."""
self.logger.warn("Setting values on locked_for_user will NOT update the remote Canvas instance.")
self._locked_for_user = value
class Moduleitemsequenceasset(BaseModel):
"""Moduleitemsequenceasset Model."""
def __init__(self, module_id=None, type=None, id=None, title=None):
"""Init method for Moduleitemsequenceasset class."""
self._module_id = module_id
self._type = type
self._id = id
self._title = title
self.logger = logging.getLogger('py3canvas.Moduleitemsequenceasset')
@property
def module_id(self):
"""module_id."""
return self._module_id
@module_id.setter
def module_id(self, value):
"""Setter for module_id property."""
self.logger.warn("Setting values on module_id will NOT update the remote Canvas instance.")
self._module_id = value
@property
def type(self):
"""type."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def id(self):
"""id."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def title(self):
"""title."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn("Setting values on title will NOT update the remote Canvas instance.")
self._title = value
class Moduleitemcompletionrequirement(BaseModel):
"""Moduleitemcompletionrequirement Model."""
def __init__(self, min_score=None, type=None, completed=None):
"""Init method for Moduleitemcompletionrequirement class."""
self._min_score = min_score
self._type = type
self._completed = completed
self.logger = logging.getLogger('py3canvas.Moduleitemcompletionrequirement')
@property
def min_score(self):
"""min_score."""
return self._min_score
@min_score.setter
def min_score(self, value):
"""Setter for min_score property."""
self.logger.warn("Setting values on min_score will NOT update the remote Canvas instance.")
self._min_score = value
@property
def type(self):
"""type."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def completed(self):
"""completed."""
return self._completed
@completed.setter
def completed(self, value):
"""Setter for completed property."""
self.logger.warn("Setting values on completed will NOT update the remote Canvas instance.")
self._completed = value
class Module(BaseModel):
"""Module Model."""
def __init__(self, completed_at=None, items_count=None, unlock_at=None, workflow_state=None, items=None, prerequisite_module_ids=None, state=None, publish_final_grade=None, position=None, items_url=None, id=None, require_sequential_progress=None, name=None):
"""Init method for Module class."""
self._completed_at = completed_at
self._items_count = items_count
self._unlock_at = unlock_at
self._workflow_state = workflow_state
self._items = items
self._prerequisite_module_ids = prerequisite_module_ids
self._state = state
self._publish_final_grade = publish_final_grade
self._position = position
self._items_url = items_url
self._id = id
self._require_sequential_progress = require_sequential_progress
self._name = name
self.logger = logging.getLogger('py3canvas.Module')
@property
def completed_at(self):
"""the date the calling user completed the module (Optional; present only if the caller is a student or if the optional parameter 'student_id' is included)."""
return self._completed_at
@completed_at.setter
def completed_at(self, value):
"""Setter for completed_at property."""
self.logger.warn("Setting values on completed_at will NOT update the remote Canvas instance.")
self._completed_at = value
@property
def items_count(self):
"""The number of items in the module."""
return self._items_count
@items_count.setter
def items_count(self, value):
"""Setter for items_count property."""
self.logger.warn("Setting values on items_count will NOT update the remote Canvas instance.")
self._items_count = value
@property
def unlock_at(self):
"""(Optional) the date this module will unlock."""
return self._unlock_at
@unlock_at.setter
def unlock_at(self, value):
"""Setter for unlock_at property."""
self.logger.warn("Setting values on unlock_at will NOT update the remote Canvas instance.")
self._unlock_at = value
@property
def workflow_state(self):
"""the state of the module: 'active', 'deleted'."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn("Setting values on workflow_state will NOT update the remote Canvas instance.")
self._workflow_state = value
@property
def items(self):
"""The contents of this module, as an array of Module Items. (Present only if requested via include[]=items AND the module is not deemed too large by Canvas.)."""
return self._items
@items.setter
def items(self, value):
"""Setter for items property."""
self.logger.warn("Setting values on items will NOT update the remote Canvas instance.")
self._items = value
@property
def prerequisite_module_ids(self):
"""IDs of Modules that must be completed before this one is unlocked."""
return self._prerequisite_module_ids
@prerequisite_module_ids.setter
def prerequisite_module_ids(self, value):
"""Setter for prerequisite_module_ids property."""
self.logger.warn("Setting values on prerequisite_module_ids will NOT update the remote Canvas instance.")
self._prerequisite_module_ids = value
@property
def state(self):
"""The state of this Module for the calling user one of 'locked', 'unlocked', 'started', 'completed' (Optional; present only if the caller is a student or if the optional parameter 'student_id' is included)."""
return self._state
@state.setter
def state(self, value):
"""Setter for state property."""
self.logger.warn("Setting values on state will NOT update the remote Canvas instance.")
self._state = value
@property
def publish_final_grade(self):
"""if the student's final grade for the course should be published to the SIS upon completion of this module."""
return self._publish_final_grade
@publish_final_grade.setter
def publish_final_grade(self, value):
"""Setter for publish_final_grade property."""
self.logger.warn("Setting values on publish_final_grade will NOT update the remote Canvas instance.")
self._publish_final_grade = value
@property
def position(self):
"""the position of this module in the course (1-based)."""
return self._position
@position.setter
def position(self, value):
"""Setter for position property."""
self.logger.warn("Setting values on position will NOT update the remote Canvas instance.")
self._position = value
@property
def items_url(self):
"""The API URL to retrive this module's items."""
return self._items_url
@items_url.setter
def items_url(self, value):
"""Setter for items_url property."""
self.logger.warn("Setting values on items_url will NOT update the remote Canvas instance.")
self._items_url = value
@property
def id(self):
"""the unique identifier for the module."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def require_sequential_progress(self):
"""Whether module items must be unlocked in order."""
return self._require_sequential_progress
@require_sequential_progress.setter
def require_sequential_progress(self, value):
"""Setter for require_sequential_progress property."""
self.logger.warn("Setting values on require_sequential_progress will NOT update the remote Canvas instance.")
self._require_sequential_progress = value
@property
def name(self):
"""the name of this module."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn("Setting values on name will NOT update the remote Canvas instance.")
self._name = value
class Moduleitemsequence(BaseModel):
"""Moduleitemsequence Model."""
def __init__(self, items=None, modules=None):
"""Init method for Moduleitemsequence class."""
self._items = items
self._modules = modules
self.logger = logging.getLogger('py3canvas.Moduleitemsequence')
@property
def items(self):
"""an array containing one hash for each appearence of the asset in the module sequence (up to 10 total)."""
return self._items
@items.setter
def items(self, value):
"""Setter for items property."""
self.logger.warn("Setting values on items will NOT update the remote Canvas instance.")
self._items = value
@property
def modules(self):
"""an array containing each Module referenced above."""
return self._modules
@modules.setter
def modules(self, value):
"""Setter for modules property."""
self.logger.warn("Setting values on modules will NOT update the remote Canvas instance.")
self._modules = value
class Completionrequirement(BaseModel):
"""Completionrequirement Model."""
def __init__(self, min_score=None, type=None, completed=None):
"""Init method for Completionrequirement class."""
self._min_score = min_score
self._type = type
self._completed = completed
self.logger = logging.getLogger('py3canvas.Completionrequirement')
@property
def min_score(self):
"""minimum score required to complete (only present when type == 'min_score')."""
return self._min_score
@min_score.setter
def min_score(self, value):
"""Setter for min_score property."""
self.logger.warn("Setting values on min_score will NOT update the remote Canvas instance.")
self._min_score = value
@property
def type(self):
"""one of 'must_view', 'must_submit', 'must_contribute', 'min_score'."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def completed(self):
"""whether the calling user has met this requirement (Optional; present only if the caller is a student or if the optional parameter 'student_id' is included)."""
return self._completed
@completed.setter
def completed(self, value):
"""Setter for completed property."""
self.logger.warn("Setting values on completed will NOT update the remote Canvas instance.")
self._completed = value
class Moduleitem(BaseModel):
"""Moduleitem Model."""
def __init__(self, indent=None, title=None, url=None, completion_requirement=None, html_url=None, content_details=None, new_tab=None, external_url=None, position=None, module_id=None, content_id=None, type=None, id=None, page_url=None):
"""Init method for Moduleitem class."""
self._indent = indent
self._title = title
self._url = url
self._completion_requirement = completion_requirement
self._html_url = html_url
self._content_details = content_details
self._new_tab = new_tab
self._external_url = external_url
self._position = position
self._module_id = module_id
self._content_id = content_id
self._type = type
self._id = id
self._page_url = page_url
self.logger = logging.getLogger('py3canvas.Moduleitem')
@property
def indent(self):
"""0-based indent level; module items may be indented to show a hierarchy."""
return self._indent
@indent.setter
def indent(self, value):
"""Setter for indent property."""
self.logger.warn("Setting values on indent will NOT update the remote Canvas instance.")
self._indent = value
@property
def title(self):
"""the title of this item."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn("Setting values on title will NOT update the remote Canvas instance.")
self._title = value
@property
def url(self):
"""(Optional) link to the Canvas API object, if applicable."""
return self._url
@url.setter
def url(self, value):
"""Setter for url property."""
self.logger.warn("Setting values on url will NOT update the remote Canvas instance.")
self._url = value
@property
def completion_requirement(self):
"""Completion requirement for this module item."""
return self._completion_requirement
@completion_requirement.setter
def completion_requirement(self, value):
"""Setter for completion_requirement property."""
self.logger.warn("Setting values on completion_requirement will NOT update the remote Canvas instance.")
self._completion_requirement = value
@property
def html_url(self):
"""link to the item in Canvas."""
return self._html_url
@html_url.setter
def html_url(self, value):
"""Setter for html_url property."""
self.logger.warn("Setting values on html_url will NOT update the remote Canvas instance.")
self._html_url = value
@property
def content_details(self):
"""(Present only if requested through include[]=content_details) If applicable, returns additional details specific to the associated object."""
return self._content_details
@content_details.setter
def content_details(self, value):
"""Setter for content_details property."""
self.logger.warn("Setting values on content_details will NOT update the remote Canvas instance.")
self._content_details = value
@property
def new_tab(self):
"""(only for 'ExternalTool' type) whether the external tool opens in a new tab."""
return self._new_tab
@new_tab.setter
def new_tab(self, value):
"""Setter for new_tab property."""
self.logger.warn("Setting values on new_tab will NOT update the remote Canvas instance.")
self._new_tab = value
@property
def external_url(self):
"""(only for 'ExternalUrl' and 'ExternalTool' types) external url that the item points to."""
return self._external_url
@external_url.setter
def external_url(self, value):
"""Setter for external_url property."""
self.logger.warn("Setting values on external_url will NOT update the remote Canvas instance.")
self._external_url = value
@property
def position(self):
"""the position of this item in the module (1-based)."""
return self._position
@position.setter
def position(self, value):
"""Setter for position property."""
self.logger.warn("Setting values on position will NOT update the remote Canvas instance.")
self._position = value
@property
def module_id(self):
"""the id of the Module this item appears in."""
return self._module_id
@module_id.setter
def module_id(self, value):
"""Setter for module_id property."""
self.logger.warn("Setting values on module_id will NOT update the remote Canvas instance.")
self._module_id = value
@property
def content_id(self):
"""the id of the object referred to applies to 'File', 'Discussion', 'Assignment', 'Quiz', 'ExternalTool' types."""
return self._content_id
@content_id.setter
def content_id(self, value):
"""Setter for content_id property."""
self.logger.warn("Setting values on content_id will NOT update the remote Canvas instance.")
self._content_id = value
@property
def type(self):
"""the type of object referred to one of 'File', 'Page', 'Discussion', 'Assignment', 'Quiz', 'SubHeader', 'ExternalUrl', 'ExternalTool'."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def id(self):
"""the unique identifier for the module item."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def page_url(self):
"""(only for 'Page' type) unique locator for the linked wiki page."""
return self._page_url
@page_url.setter
def page_url(self, value):
"""Setter for page_url property."""
self.logger.warn("Setting values on page_url will NOT update the remote Canvas instance.")
self._page_url = value
class Moduleitemsequencenode(BaseModel):
"""Moduleitemsequencenode Model."""
def __init__(self, current=None, prev=None, next=None):
"""Init method for Moduleitemsequencenode class."""
self._current = current
self._prev = prev
self._next = next
self.logger = logging.getLogger('py3canvas.Moduleitemsequencenode')
@property
def current(self):
"""current."""
return self._current
@current.setter
def current(self, value):
"""Setter for current property."""
self.logger.warn("Setting values on current will NOT update the remote Canvas instance.")
self._current = value
@property
def prev(self):
"""prev."""
return self._prev
@prev.setter
def prev(self, value):
"""Setter for prev property."""
self.logger.warn("Setting values on prev will NOT update the remote Canvas instance.")
self._prev = value
@property
def next(self):
"""next."""
return self._next
@next.setter
def next(self, value):
"""Setter for next property."""
self.logger.warn("Setting values on next will NOT update the remote Canvas instance.")
self._next = value
class Moduleitemcontentdetails(BaseModel):
"""Moduleitemcontentdetails Model."""
def __init__(self, unlock_at=None, due_at=None, points_possible=None, lock_info=None, lock_at=None, lock_explanation=None, locked_for_user=None):
"""Init method for Moduleitemcontentdetails class."""
self._unlock_at = unlock_at
self._due_at = due_at
self._points_possible = points_possible
self._lock_info = lock_info
self._lock_at = lock_at
self._lock_explanation = lock_explanation
self._locked_for_user = locked_for_user
self.logger = logging.getLogger('py3canvas.Moduleitemcontentdetails')
@property
def unlock_at(self):
"""unlock_at."""
return self._unlock_at
@unlock_at.setter
def unlock_at(self, value):
"""Setter for unlock_at property."""
self.logger.warn("Setting values on unlock_at will NOT update the remote Canvas instance.")
self._unlock_at = value
@property
def due_at(self):
"""due_at."""
return self._due_at
@due_at.setter
def due_at(self, value):
"""Setter for due_at property."""
self.logger.warn("Setting values on due_at will NOT update the remote Canvas instance.")
self._due_at = value
@property
def points_possible(self):
"""points_possible."""
return self._points_possible
@points_possible.setter
def points_possible(self, value):
"""Setter for points_possible property."""
self.logger.warn("Setting values on points_possible will NOT update the remote Canvas instance.")
self._points_possible = value
@property
def lock_info(self):
"""lock_info."""
return self._lock_info
@lock_info.setter
def lock_info(self, value):
"""Setter for lock_info property."""
self.logger.warn("Setting values on lock_info will NOT update the remote Canvas instance.")
self._lock_info = value
@property
def lock_at(self):
"""lock_at."""
return self._lock_at
@lock_at.setter
def lock_at(self, value):
"""Setter for lock_at property."""
self.logger.warn("Setting values on lock_at will NOT update the remote Canvas instance.")
self._lock_at = value
@property
def lock_explanation(self):
"""lock_explanation."""
return self._lock_explanation
@lock_explanation.setter
def lock_explanation(self, value):
"""Setter for lock_explanation property."""
self.logger.warn("Setting values on lock_explanation will NOT update the remote Canvas instance.")
self._lock_explanation = value
@property
def locked_for_user(self):
"""locked_for_user."""
return self._locked_for_user
@locked_for_user.setter
def locked_for_user(self, value):
"""Setter for locked_for_user property."""
self.logger.warn("Setting values on locked_for_user will NOT update the remote Canvas instance.")
self._locked_for_user = value
| mit |
fengzhe29888/gnuradio-old | gr-blocks/python/blocks/qa_threshold.py | 57 | 1537 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
class test_threshold(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_01(self):
tb = self.tb
data = [0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2]
expected_result = (0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1)
src = blocks.vector_source_f(data, False)
op = blocks.threshold_ff(1, 1)
dst = blocks.vector_sink_f()
tb.connect(src, op)
tb.connect(op, dst)
tb.run()
dst_data = dst.data()
self.assertEqual(expected_result, dst_data)
if __name__ == '__main__':
gr_unittest.run(test_threshold, "test_threshold.xml")
| gpl-3.0 |
kinnou02/navitia | source/jormungandr/jormungandr/parking_space_availability/__init__.py | 3 | 1795 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division
from jormungandr.parking_space_availability.abstract_parking_places_provider import AbstractParkingPlacesProvider
from jormungandr.parking_space_availability.abstract_provider_manager import AbstractProviderManager
from jormungandr.parking_space_availability.abstract_provider_manager import get_from_to_pois_of_journeys
from jormungandr.parking_space_availability.bss.stands import Stands, StandsStatus
from jormungandr.parking_space_availability.car.parking_places import ParkingPlaces
| agpl-3.0 |
0xkag/tornado | tornado/test/simple_httpclient_test.py | 13 | 22722 | from __future__ import absolute_import, division, print_function, with_statement
import collections
from contextlib import closing
import errno
import gzip
import logging
import os
import re
import socket
import sys
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders
from tornado.ioloop import IOLoop
from tornado.log import gen_log
from tornado.netutil import Resolver, bind_sockets
from tornado.simple_httpclient import SimpleAsyncHTTPClient, _default_ca_certs
from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler
from tornado.test import httpclient_test
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog
from tornado.test.util import skipOnTravis, skipIfNoIPv6
from tornado.web import RequestHandler, Application, asynchronous, url, stream_request_body
class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
def get_http_client(self):
client = SimpleAsyncHTTPClient(io_loop=self.io_loop,
force_instance=True)
self.assertTrue(isinstance(client, SimpleAsyncHTTPClient))
return client
class TriggerHandler(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@asynchronous
def get(self):
logging.debug("queuing trigger")
self.queue.append(self.finish)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
class HangHandler(RequestHandler):
@asynchronous
def get(self):
pass
class ContentLengthHandler(RequestHandler):
def get(self):
self.set_header("Content-Length", self.get_argument("value"))
self.write("ok")
class HeadHandler(RequestHandler):
def head(self):
self.set_header("Content-Length", "7")
class OptionsHandler(RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("ok")
class NoContentHandler(RequestHandler):
def get(self):
if self.get_argument("error", None):
self.set_header("Content-Length", "5")
self.write("hello")
self.set_status(204)
class SeeOtherPostHandler(RequestHandler):
def post(self):
redirect_code = int(self.request.body)
assert redirect_code in (302, 303), "unexpected body %r" % self.request.body
self.set_header("Location", "/see_other_get")
self.set_status(redirect_code)
class SeeOtherGetHandler(RequestHandler):
def get(self):
if self.request.body:
raise Exception("unexpected body %r" % self.request.body)
self.write("ok")
class HostEchoHandler(RequestHandler):
def get(self):
self.write(self.request.headers["Host"])
class NoContentLengthHandler(RequestHandler):
@gen.coroutine
def get(self):
# Emulate the old HTTP/1.0 behavior of returning a body with no
# content-length. Tornado handles content-length at the framework
# level so we have to go around it.
stream = self.request.connection.stream
yield stream.write(b"HTTP/1.0 200 OK\r\n\r\n"
b"hello")
stream.close()
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
@stream_request_body
class RespondInPrepareHandler(RequestHandler):
def prepare(self):
self.set_status(403)
self.finish("forbidden")
class SimpleHTTPClientTestMixin(object):
def get_app(self):
# callable objects to finish pending /trigger requests
self.triggers = collections.deque()
return Application([
url("/trigger", TriggerHandler, dict(queue=self.triggers,
wake_callback=self.stop)),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
url("/options", OptionsHandler),
url("/no_content", NoContentHandler),
url("/see_other_post", SeeOtherPostHandler),
url("/see_other_get", SeeOtherGetHandler),
url("/host_echo", HostEchoHandler),
url("/no_content_length", NoContentLengthHandler),
url("/echo_post", EchoPostHandler),
url("/respond_in_prepare", RespondInPrepareHandler),
], gzip=True)
def test_singleton(self):
# Class "constructor" reuses objects on the same IOLoop
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is
SimpleAsyncHTTPClient(self.io_loop))
# unless force_instance is used
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(self.io_loop,
force_instance=True))
# different IOLoops use different objects
with closing(IOLoop()) as io_loop2:
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(io_loop2))
def test_connection_limit(self):
with closing(self.create_client(max_clients=2)) as client:
self.assertEqual(client.max_clients, 2)
seen = []
# Send 4 requests. Two can be sent immediately, while the others
# will be queued
for i in range(4):
client.fetch(self.get_url("/trigger"),
lambda response, i=i: (seen.append(i), self.stop()))
self.wait(condition=lambda: len(self.triggers) == 2)
self.assertEqual(len(client.queue), 2)
# Finish the first two requests and let the next two through
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: (len(self.triggers) == 2 and
len(seen) == 2))
self.assertEqual(set(seen), set([0, 1]))
self.assertEqual(len(client.queue), 0)
# Finish all the pending requests
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: len(seen) == 4)
self.assertEqual(set(seen), set([0, 1, 2, 3]))
self.assertEqual(len(self.triggers), 0)
def test_redirect_connection_limit(self):
# following redirects should not consume additional connections
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/countdown/3'), self.stop,
max_redirects=3)
response = self.wait()
response.rethrow()
def test_default_certificates_exist(self):
open(_default_ca_certs()).close()
def test_gzip(self):
# All the tests in this file should be using gzip, but this test
# ensures that it is in fact getting compressed.
# Setting Accept-Encoding manually bypasses the client's
# decompression so we can see the raw data.
response = self.fetch("/chunk", use_gzip=False,
headers={"Accept-Encoding": "gzip"})
self.assertEqual(response.headers["Content-Encoding"], "gzip")
self.assertNotEqual(response.body, b"asdfqwer")
# Our test data gets bigger when gzipped. Oops. :)
self.assertEqual(len(response.body), 34)
f = gzip.GzipFile(mode="r", fileobj=response.buffer)
self.assertEqual(f.read(), b"asdfqwer")
def test_max_redirects(self):
response = self.fetch("/countdown/5", max_redirects=3)
self.assertEqual(302, response.code)
# We requested 5, followed three redirects for 4, 3, 2, then the last
# unfollowed redirect is to 1.
self.assertTrue(response.request.url.endswith("/countdown/5"))
self.assertTrue(response.effective_url.endswith("/countdown/2"))
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
def test_header_reuse(self):
# Apps may reuse a headers object if they are only passing in constant
# headers like user-agent. The header object should not be modified.
headers = HTTPHeaders({'User-Agent': 'Foo'})
self.fetch("/hello", headers=headers)
self.assertEqual(list(headers.get_all()), [('User-Agent', 'Foo')])
def test_see_other_redirect(self):
for code in (302, 303):
response = self.fetch("/see_other_post", method="POST", body="%d" % code)
self.assertEqual(200, response.code)
self.assertTrue(response.request.url.endswith("/see_other_post"))
self.assertTrue(response.effective_url.endswith("/see_other_get"))
# request is the original request, is a POST still
self.assertEqual("POST", response.request.method)
@skipOnTravis
def test_request_timeout(self):
response = self.fetch('/trigger?wake=false', request_timeout=0.1)
self.assertEqual(response.code, 599)
self.assertTrue(0.099 < response.request_time < 0.15, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
# trigger the hanging request to let it clean up after itself
self.triggers.popleft()()
@skipIfNoIPv6
def test_ipv6(self):
try:
[sock] = bind_sockets(None, '::1', family=socket.AF_INET6)
port = sock.getsockname()[1]
self.http_server.add_socket(sock)
except socket.gaierror as e:
if e.args[0] == socket.EAI_ADDRFAMILY:
# python supports ipv6, but it's not configured on the network
# interface, so skip this test.
return
raise
url = '%s://[::1]:%d/hello' % (self.get_protocol(), port)
# ipv6 is currently enabled by default but can be disabled
self.http_client.fetch(url, self.stop, allow_ipv6=False)
response = self.wait()
self.assertEqual(response.code, 599)
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(response.body, b"Hello world!")
def xtest_multiple_content_length_accepted(self):
response = self.fetch("/content_length?value=2,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,%202,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,4")
self.assertEqual(response.code, 599)
response = self.fetch("/content_length?value=2,%202,3")
self.assertEqual(response.code, 599)
def test_head_request(self):
response = self.fetch("/head", method="HEAD")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "7")
self.assertFalse(response.body)
def test_options_request(self):
response = self.fetch("/options", method="OPTIONS")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "2")
self.assertEqual(response.headers["access-control-allow-origin"], "*")
self.assertEqual(response.body, b"ok")
def test_no_content(self):
response = self.fetch("/no_content")
self.assertEqual(response.code, 204)
# 204 status doesn't need a content-length, but tornado will
# add a zero content-length anyway.
#
# A test without a content-length header is included below
# in HTTP204NoContentTestCase.
self.assertEqual(response.headers["Content-length"], "0")
# 204 status with non-zero content length is malformed
with ExpectLog(gen_log, "Malformed HTTP message"):
response = self.fetch("/no_content?error=1")
self.assertEqual(response.code, 599)
def test_host_header(self):
host_re = re.compile(b"^localhost:[0-9]+$")
response = self.fetch("/host_echo")
self.assertTrue(host_re.match(response.body))
url = self.get_url("/host_echo").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertTrue(host_re.match(response.body), response.body)
def test_connection_refused(self):
server_socket, port = bind_unused_port()
server_socket.close()
with ExpectLog(gen_log, ".*", required=False):
self.http_client.fetch("http://localhost:%d/" % port, self.stop)
response = self.wait()
self.assertEqual(599, response.code)
if sys.platform != 'cygwin':
# cygwin returns EPERM instead of ECONNREFUSED here
contains_errno = str(errno.ECONNREFUSED) in str(response.error)
if not contains_errno and hasattr(errno, "WSAECONNREFUSED"):
contains_errno = str(errno.WSAECONNREFUSED) in str(response.error)
self.assertTrue(contains_errno, response.error)
# This is usually "Connection refused".
# On windows, strerror is broken and returns "Unknown error".
expected_message = os.strerror(errno.ECONNREFUSED)
self.assertTrue(expected_message in str(response.error),
response.error)
def test_queue_timeout(self):
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/trigger'), self.stop,
request_timeout=10)
# Wait for the trigger request to block, not complete.
self.wait()
client.fetch(self.get_url('/hello'), self.stop,
connect_timeout=0.1)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertTrue(response.request_time < 1, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
self.triggers.popleft()()
self.wait()
def test_no_content_length(self):
response = self.fetch("/no_content_length")
self.assertEquals(b"hello", response.body)
def sync_body_producer(self, write):
write(b'1234')
write(b'5678')
@gen.coroutine
def async_body_producer(self, write):
yield write(b'1234')
yield gen.Task(IOLoop.current().add_callback)
yield write(b'5678')
def test_sync_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_sync_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_100_continue(self):
response = self.fetch("/echo_post", method="POST",
body=b"1234",
expect_100_continue=True)
self.assertEqual(response.body, b"1234")
def test_100_continue_early_response(self):
def body_producer(write):
raise Exception("should not be called")
response = self.fetch("/respond_in_prepare", method="POST",
body_producer=body_producer,
expect_100_continue=True)
self.assertEqual(response.code, 403)
class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase):
def setUp(self):
super(SimpleHTTPClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
**kwargs)
class SimpleHTTPSClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPSTestCase):
def setUp(self):
super(SimpleHTTPSClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
defaults=dict(validate_cert=False),
**kwargs)
class CreateAsyncHTTPClientTestCase(AsyncTestCase):
def setUp(self):
super(CreateAsyncHTTPClientTestCase, self).setUp()
self.saved = AsyncHTTPClient._save_configuration()
def tearDown(self):
AsyncHTTPClient._restore_configuration(self.saved)
super(CreateAsyncHTTPClientTestCase, self).tearDown()
def test_max_clients(self):
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 10)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=11, force_instance=True)) as client:
self.assertEqual(client.max_clients, 11)
# Now configure max_clients statically and try overriding it
# with each way max_clients can be passed
AsyncHTTPClient.configure(SimpleAsyncHTTPClient, max_clients=12)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 12)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=13, force_instance=True)) as client:
self.assertEqual(client.max_clients, 13)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=14, force_instance=True)) as client:
self.assertEqual(client.max_clients, 14)
class HTTP100ContinueTestCase(AsyncHTTPTestCase):
def respond_100(self, request):
self.request = request
self.request.connection.stream.write(
b"HTTP/1.1 100 CONTINUE\r\n\r\n",
self.respond_200)
def respond_200(self):
self.request.connection.stream.write(
b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\nA",
self.request.connection.stream.close)
def get_app(self):
# Not a full Application, but works as an HTTPServer callback
return self.respond_100
def test_100_continue(self):
res = self.fetch('/')
self.assertEqual(res.body, b'A')
class HTTP204NoContentTestCase(AsyncHTTPTestCase):
def respond_204(self, request):
# A 204 response never has a body, even if doesn't have a content-length
# (which would otherwise mean read-until-close). Tornado always
# sends a content-length, so we simulate here a server that sends
# no content length and does not close the connection.
#
# Tests of a 204 response with a Content-Length header are included
# in SimpleHTTPClientTestMixin.
request.connection.stream.write(
b"HTTP/1.1 204 No content\r\n\r\n")
def get_app(self):
return self.respond_204
def test_204_no_content(self):
resp = self.fetch('/')
self.assertEqual(resp.code, 204)
self.assertEqual(resp.body, b'')
class HostnameMappingTestCase(AsyncHTTPTestCase):
def setUp(self):
super(HostnameMappingTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
hostname_mapping={
'www.example.com': '127.0.0.1',
('foo.example.com', 8000): ('127.0.0.1', self.get_http_port()),
})
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_hostname_mapping(self):
self.http_client.fetch(
'http://www.example.com:%d/hello' % self.get_http_port(), self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
def test_port_mapping(self):
self.http_client.fetch('http://foo.example.com:8000/hello', self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
class ResolveTimeoutTestCase(AsyncHTTPTestCase):
def setUp(self):
# Dummy Resolver subclass that never invokes its callback.
class BadResolver(Resolver):
def resolve(self, *args, **kwargs):
pass
super(ResolveTimeoutTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
resolver=BadResolver())
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_resolve_timeout(self):
response = self.fetch('/hello', connect_timeout=0.1)
self.assertEqual(response.code, 599)
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 100)
self.write("ok")
class LargeHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 1000)
self.write("ok")
return Application([('/small', SmallHeaders),
('/large', LargeHeaders)])
def get_http_client(self):
return SimpleAsyncHTTPClient(io_loop=self.io_loop, max_header_size=1024)
def test_small_headers(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'ok')
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read"):
response = self.fetch('/large')
self.assertEqual(response.code, 599)
| apache-2.0 |
IronLanguages/ironpython2 | Src/StdLib/Lib/test/test_poll.py | 4 | 7315 | # Test case for the os.poll() function
import os
import random
import select
try:
import threading
except ImportError:
threading = None
import time
import unittest
from test.test_support import TESTFN, run_unittest, reap_threads, cpython_only
try:
select.poll
except AttributeError:
raise unittest.SkipTest, "select.poll not defined -- skipping test_poll"
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
class PollTests(unittest.TestCase):
def test_poll1(self):
# Basic functional test of poll object
# Create a bunch of pipe and test that poll works with them.
p = select.poll()
NUM_PIPES = 12
MSG = " This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd)
p.modify(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
bufs = []
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
raise RuntimeError, "no pipes ready for writing"
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
raise RuntimeError, "no pipes ready for reading"
rd = random.choice(ready_readers)
buf = os.read(rd, MSG_LEN)
self.assertEqual(len(buf), MSG_LEN)
bufs.append(buf)
os.close(r2w[rd]) ; os.close( rd )
p.unregister( r2w[rd] )
p.unregister( rd )
writers.remove(r2w[rd])
self.assertEqual(bufs, [MSG] * NUM_PIPES)
def poll_unit_tests(self):
# returns NVAL for invalid file descriptor
FD = 42
try:
os.close(FD)
except OSError:
pass
p = select.poll()
p.register(FD)
r = p.poll()
self.assertEqual(r[0], (FD, select.POLLNVAL))
f = open(TESTFN, 'w')
fd = f.fileno()
p = select.poll()
p.register(f)
r = p.poll()
self.assertEqual(r[0][0], fd)
f.close()
r = p.poll()
self.assertEqual(r[0], (fd, select.POLLNVAL))
os.unlink(TESTFN)
# type error for invalid arguments
p = select.poll()
self.assertRaises(TypeError, p.register, p)
self.assertRaises(TypeError, p.unregister, p)
# can't unregister non-existent object
p = select.poll()
self.assertRaises(KeyError, p.unregister, 3)
# Test error cases
pollster = select.poll()
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
self.assertRaises(TypeError, pollster.register, Nope(), 0)
self.assertRaises(TypeError, pollster.register, Almost(), 0)
# Another test case for poll(). This is copied from the test case for
# select(), modified to use poll() instead.
def test_poll2(self):
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
pollster = select.poll()
pollster.register( p, select.POLLIN )
for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10:
fdlist = pollster.poll(tout)
if (fdlist == []):
continue
fd, flags = fdlist[0]
if flags & select.POLLHUP:
line = p.readline()
if line != "":
self.fail('error: pipe seems to be closed, but still returns data')
continue
elif flags & select.POLLIN:
line = p.readline()
if not line:
break
continue
else:
self.fail('Unexpected return value from select.poll: %s' % fdlist)
p.close()
def test_poll3(self):
# test int overflow
pollster = select.poll()
pollster.register(1)
self.assertRaises(OverflowError, pollster.poll, 1L << 64)
x = 2 + 3
if x != 5:
self.fail('Overflow must have occurred')
# Issues #15989, #17919
self.assertRaises(OverflowError, pollster.register, 0, -1)
self.assertRaises(OverflowError, pollster.register, 0, 1 << 64)
self.assertRaises(OverflowError, pollster.modify, 1, -1)
self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64)
@cpython_only
def test_poll_c_limits(self):
from _testcapi import USHRT_MAX, INT_MAX, UINT_MAX
pollster = select.poll()
pollster.register(1)
# Issues #15989, #17919
self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, INT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, UINT_MAX + 1)
@unittest.skipUnless(threading, 'Threading required for this test.')
@reap_threads
def test_threaded_poll(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
rfds = []
for i in range(10):
fd = os.dup(r)
self.addCleanup(os.close, fd)
rfds.append(fd)
pollster = select.poll()
for fd in rfds:
pollster.register(fd, select.POLLIN)
t = threading.Thread(target=pollster.poll)
t.start()
try:
time.sleep(0.5)
# trigger ufds array reallocation
for fd in rfds:
pollster.unregister(fd)
pollster.register(w, select.POLLOUT)
self.assertRaises(RuntimeError, pollster.poll)
finally:
# and make the call to poll() from the thread return
os.write(w, b'spam')
t.join()
@unittest.skipUnless(threading, 'Threading required for this test.')
@reap_threads
def test_poll_blocks_with_negative_ms(self):
for timeout_ms in [None, -1000, -1, -1.0]:
# Create two file descriptors. This will be used to unlock
# the blocking call to poll.poll inside the thread
r, w = os.pipe()
pollster = select.poll()
pollster.register(r, select.POLLIN)
poll_thread = threading.Thread(target=pollster.poll, args=(timeout_ms,))
poll_thread.start()
poll_thread.join(timeout=0.1)
self.assertTrue(poll_thread.is_alive())
# Write to the pipe so pollster.poll unblocks and the thread ends.
os.write(w, b'spam')
poll_thread.join()
self.assertFalse(poll_thread.is_alive())
os.close(r)
os.close(w)
def test_main():
run_unittest(PollTests)
if __name__ == '__main__':
test_main()
| apache-2.0 |
gundalow/ansible | lib/ansible/executor/task_queue_manager.py | 11 | 18711 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import tempfile
import threading
import time
import multiprocessing.queues
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.stats import AggregateStats
from ansible.executor.task_result import TaskResult
from ansible.module_utils.six import PY3, string_types
from ansible.module_utils._text import to_text, to_native
from ansible.playbook.play_context import PlayContext
from ansible.playbook.task import Task
from ansible.plugins.loader import callback_loader, strategy_loader, module_loader
from ansible.plugins.callback import CallbackBase
from ansible.template import Templar
from ansible.vars.hostvars import HostVars
from ansible.vars.reserved import warn_if_reserved
from ansible.utils.display import Display
from ansible.utils.lock import lock_decorator
from ansible.utils.multiprocessing import context as multiprocessing_context
__all__ = ['TaskQueueManager']
display = Display()
class CallbackSend:
def __init__(self, method_name, *args, **kwargs):
self.method_name = method_name
self.args = args
self.kwargs = kwargs
class FinalQueue(multiprocessing.queues.Queue):
def __init__(self, *args, **kwargs):
if PY3:
kwargs['ctx'] = multiprocessing_context
super(FinalQueue, self).__init__(*args, **kwargs)
def send_callback(self, method_name, *args, **kwargs):
self.put(
CallbackSend(method_name, *args, **kwargs),
block=False
)
def send_task_result(self, *args, **kwargs):
if isinstance(args[0], TaskResult):
tr = args[0]
else:
tr = TaskResult(*args, **kwargs)
self.put(
tr,
block=False
)
class AnsibleEndPlay(Exception):
def __init__(self, result):
self.result = result
class TaskQueueManager:
'''
This class handles the multiprocessing requirements of Ansible by
creating a pool of worker forks, a result handler fork, and a
manager object with shared datastructures/queues for coordinating
work between all processes.
The queue manager is responsible for loading the play strategy plugin,
which dispatches the Play's tasks to hosts.
'''
RUN_OK = 0
RUN_ERROR = 1
RUN_FAILED_HOSTS = 2
RUN_UNREACHABLE_HOSTS = 4
RUN_FAILED_BREAK_PLAY = 8
RUN_UNKNOWN_ERROR = 255
def __init__(self, inventory, variable_manager, loader, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False, forks=None):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
self._run_additional_callbacks = run_additional_callbacks
self._run_tree = run_tree
self._forks = forks or 5
self._callbacks_loaded = False
self._callback_plugins = []
self._start_at_done = False
# make sure any module paths (if specified) are added to the module_loader
if context.CLIARGS.get('module_path', False):
for path in context.CLIARGS['module_path']:
if path:
module_loader.add_directory(path)
# a special flag to help us exit cleanly
self._terminated = False
# dictionaries to keep track of failed/unreachable hosts
self._failed_hosts = dict()
self._unreachable_hosts = dict()
try:
self._final_q = FinalQueue()
except OSError as e:
raise AnsibleError("Unable to use multiprocessing, this is normally caused by lack of access to /dev/shm: %s" % to_native(e))
self._callback_lock = threading.Lock()
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
def _initialize_processes(self, num):
self._workers = []
for i in range(num):
self._workers.append(None)
def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if isinstance(self._stdout_callback, CallbackBase):
stdout_callback_loaded = True
elif isinstance(self._stdout_callback, string_types):
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
else:
self._stdout_callback = callback_loader.get(self._stdout_callback)
self._stdout_callback.set_options()
stdout_callback_loaded = True
else:
raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
# get all configured loadable callbacks (adjacent, builtin)
callback_list = list(callback_loader.all(class_only=True))
# add enabled callbacks that refer to collections, which might not appear in normal listing
for c in C.CALLBACKS_ENABLED:
# load all, as collection ones might be using short/redirected names and not a fqcn
plugin = callback_loader.get(c, class_only=True)
# TODO: check if this skip is redundant, loader should handle bad file/plugin cases already
if plugin:
# avoids incorrect and dupes possible due to collections
if plugin not in callback_list:
callback_list.append(plugin)
else:
display.warning("Skipping callback plugin '%s', unable to load" % c)
# for each callback in the list see if we should add it to 'active callbacks' used in the play
for callback_plugin in callback_list:
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', '')
callback_needs_enabled = getattr(callback_plugin, 'CALLBACK_NEEDS_ENABLED', getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False))
# try to get colleciotn world name first
cnames = getattr(callback_plugin, '_redirected_names', [])
if cnames:
# store the name the plugin was loaded as, as that's what we'll need to compare to the configured callback list later
callback_name = cnames[0]
else:
# fallback to 'old loader name'
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
display.vvvvv("Attempting to use '%s' callback." % (callback_name))
if callback_type == 'stdout':
# we only allow one callback of type 'stdout' to be loaded,
if callback_name != self._stdout_callback or stdout_callback_loaded:
display.vv("Skipping callback '%s', as we already have a stdout callback." % (callback_name))
continue
stdout_callback_loaded = True
elif callback_name == 'tree' and self._run_tree:
# TODO: remove special case for tree, which is an adhoc cli option --tree
pass
elif not self._run_additional_callbacks or (callback_needs_enabled and (
# only run if not adhoc, or adhoc was specifically configured to run + check enabled list
C.CALLBACKS_ENABLED is None or callback_name not in C.CALLBACKS_ENABLED)):
# 2.x plugins shipped with ansible should require enabling, older or non shipped should load automatically
continue
try:
callback_obj = callback_plugin()
# avoid bad plugin not returning an object, only needed cause we do class_only load and bypass loader checks,
# really a bug in the plugin itself which we ignore as callback errors are not supposed to be fatal.
if callback_obj:
# skip initializing if we already did the work for the same plugin (even with diff names)
if callback_obj not in self._callback_plugins:
callback_obj.set_options()
self._callback_plugins.append(callback_obj)
else:
display.vv("Skipping callback '%s', already loaded as '%s'." % (callback_plugin, callback_name))
else:
display.warning("Skipping callback '%s', as it does not create a valid plugin instance." % callback_name)
continue
except Exception as e:
display.warning("Skipping callback '%s', unable to load due to: %s" % (callback_name, to_native(e)))
continue
self._callbacks_loaded = True
def run(self, play):
'''
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
'''
if not self._callbacks_loaded:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(play=play)
templar = Templar(loader=self._loader, variables=all_vars)
warn_if_reserved(all_vars, templar.environment.globals.keys())
new_play = play.copy()
new_play.post_validate(templar)
new_play.handlers = new_play.compile_roles_handlers() + new_play.handlers
self.hostvars = HostVars(
inventory=self._inventory,
variable_manager=self._variable_manager,
loader=self._loader,
)
play_context = PlayContext(new_play, self.passwords, self._connection_lockfile.fileno())
if (self._stdout_callback and
hasattr(self._stdout_callback, 'set_play_context')):
self._stdout_callback.set_play_context(play_context)
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
# build the iterator
iterator = PlayIterator(
inventory=self._inventory,
play=new_play,
play_context=play_context,
variable_manager=self._variable_manager,
all_vars=all_vars,
start_at_done=self._start_at_done,
)
# adjust to # of workers to configured forks or size of batch, whatever is lower
self._initialize_processes(min(self._forks, iterator.batch_size))
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# Because the TQM may survive multiple play runs, we start by marking
# any hosts as failed in the iterator here which may have been marked
# as failed in previous runs. Then we clear the internal list of failed
# hosts so we know what failed this round.
for host_name in self._failed_hosts.keys():
host = self._inventory.get_host(host_name)
iterator.mark_host_failed(host)
for host_name in self._unreachable_hosts.keys():
iterator._play._removed_hosts.append(host_name)
self.clear_failed_hosts()
# during initialization, the PlayContext will clear the start_at_task
# field to signal that a matching task was found, so check that here
# and remember it so we don't try to skip tasks on future plays
if context.CLIARGS.get('start_at_task') is not None and play_context.start_at_task is None:
self._start_at_done = True
# and run the play using the strategy and cleanup on way out
try:
play_return = strategy.run(iterator, play_context)
finally:
strategy.cleanup()
self._cleanup_processes()
# now re-save the hosts that failed from the iterator to our internal list
for host_name in iterator.get_failed_hosts():
self._failed_hosts[host_name] = True
if iterator.end_play:
raise AnsibleEndPlay(play_return)
return play_return
def cleanup(self):
display.debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._cleanup_processes()
# A bug exists in Python 2.6 that causes an exception to be raised during
# interpreter shutdown. This is only an issue in our CI testing but we
# hit it frequently enough to add a small sleep to avoid the issue.
# This can be removed once we have split controller available in CI.
#
# Further information:
# Issue: https://bugs.python.org/issue4106
# Fix: https://hg.python.org/cpython/rev/d316315a8781
#
try:
if (2, 6) == (sys.version_info[0:2]):
time.sleep(0.0001)
except (IndexError, AttributeError):
# In case there is an issue getting the version info, don't raise an Exception
pass
def _cleanup_processes(self):
if hasattr(self, '_workers'):
for attempts_remaining in range(C.WORKER_SHUTDOWN_POLL_COUNT - 1, -1, -1):
if not any(worker_prc and worker_prc.is_alive() for worker_prc in self._workers):
break
if attempts_remaining:
time.sleep(C.WORKER_SHUTDOWN_POLL_DELAY)
else:
display.warning('One or more worker processes are still running and will be terminated.')
for worker_prc in self._workers:
if worker_prc and worker_prc.is_alive():
try:
worker_prc.terminate()
except AttributeError:
pass
def clear_failed_hosts(self):
self._failed_hosts = dict()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def has_dead_workers(self):
# [<WorkerProcess(WorkerProcess-2, stopped[SIGKILL])>,
# <WorkerProcess(WorkerProcess-2, stopped[SIGTERM])>
defunct = False
for x in self._workers:
if getattr(x, 'exitcode', None):
defunct = True
return defunct
@lock_decorator(attr='_callback_lock')
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
# a plugin can opt in to implicit tasks (such as meta). It does this
# by declaring self.wants_implicit_tasks = True.
wants_implicit_tasks = getattr(callback_plugin, 'wants_implicit_tasks', False)
# try to find v2 method, fallback to v1 method, ignore callback if no method found
methods = []
for possible in [method_name, 'v2_on_any']:
gotit = getattr(callback_plugin, possible, None)
if gotit is None:
gotit = getattr(callback_plugin, possible.replace('v2_', ''), None)
if gotit is not None:
methods.append(gotit)
# send clean copies
new_args = []
# If we end up being given an implicit task, we'll set this flag in
# the loop below. If the plugin doesn't care about those, then we
# check and continue to the next iteration of the outer loop.
is_implicit_task = False
for arg in args:
# FIXME: add play/task cleaners
if isinstance(arg, TaskResult):
new_args.append(arg.clean_copy())
# elif isinstance(arg, Play):
# elif isinstance(arg, Task):
else:
new_args.append(arg)
if isinstance(arg, Task) and arg.implicit:
is_implicit_task = True
if is_implicit_task and not wants_implicit_tasks:
continue
for method in methods:
try:
method(*new_args, **kwargs)
except Exception as e:
# TODO: add config toggle to make this fatal or not?
display.warning(u"Failure using method (%s) in callback plugin (%s): %s" % (to_text(method_name), to_text(callback_plugin), to_text(e)))
from traceback import format_tb
from sys import exc_info
display.vvv('Callback Exception: \n' + ' '.join(format_tb(exc_info()[2])))
| gpl-3.0 |
suhe/odoo | addons/pad/py_etherpad/__init__.py | 505 | 7804 | """Module to talk to EtherpadLite API."""
import json
import urllib
import urllib2
class EtherpadLiteClient:
"""Client to talk to EtherpadLite API."""
API_VERSION = 1 # TODO probably 1.1 sometime soon
CODE_OK = 0
CODE_INVALID_PARAMETERS = 1
CODE_INTERNAL_ERROR = 2
CODE_INVALID_FUNCTION = 3
CODE_INVALID_API_KEY = 4
TIMEOUT = 20
apiKey = ""
baseUrl = "http://localhost:9001/api"
def __init__(self, apiKey=None, baseUrl=None):
if apiKey:
self.apiKey = apiKey
if baseUrl:
self.baseUrl = baseUrl
def call(self, function, arguments=None):
"""Create a dictionary of all parameters"""
url = '%s/%d/%s' % (self.baseUrl, self.API_VERSION, function)
params = arguments or {}
params.update({'apikey': self.apiKey})
data = urllib.urlencode(params, True)
try:
opener = urllib2.build_opener()
request = urllib2.Request(url=url, data=data)
response = opener.open(request, timeout=self.TIMEOUT)
result = response.read()
response.close()
except urllib2.HTTPError:
raise
result = json.loads(result)
if result is None:
raise ValueError("JSON response could not be decoded")
return self.handleResult(result)
def handleResult(self, result):
"""Handle API call result"""
if 'code' not in result:
raise Exception("API response has no code")
if 'message' not in result:
raise Exception("API response has no message")
if 'data' not in result:
result['data'] = None
if result['code'] == self.CODE_OK:
return result['data']
elif result['code'] == self.CODE_INVALID_PARAMETERS or result['code'] == self.CODE_INVALID_API_KEY:
raise ValueError(result['message'])
elif result['code'] == self.CODE_INTERNAL_ERROR:
raise Exception(result['message'])
elif result['code'] == self.CODE_INVALID_FUNCTION:
raise Exception(result['message'])
else:
raise Exception("An unexpected error occurred whilst handling the response")
# GROUPS
# Pads can belong to a group. There will always be public pads that do not belong to a group (or we give this group the id 0)
def createGroup(self):
"""creates a new group"""
return self.call("createGroup")
def createGroupIfNotExistsFor(self, groupMapper):
"""this functions helps you to map your application group ids to etherpad lite group ids"""
return self.call("createGroupIfNotExistsFor", {
"groupMapper": groupMapper
})
def deleteGroup(self, groupID):
"""deletes a group"""
return self.call("deleteGroup", {
"groupID": groupID
})
def listPads(self, groupID):
"""returns all pads of this group"""
return self.call("listPads", {
"groupID": groupID
})
def createGroupPad(self, groupID, padName, text=''):
"""creates a new pad in this group"""
params = {
"groupID": groupID,
"padName": padName,
}
if text:
params['text'] = text
return self.call("createGroupPad", params)
# AUTHORS
# Theses authors are bind to the attributes the users choose (color and name).
def createAuthor(self, name=''):
"""creates a new author"""
params = {}
if name:
params['name'] = name
return self.call("createAuthor", params)
def createAuthorIfNotExistsFor(self, authorMapper, name=''):
"""this functions helps you to map your application author ids to etherpad lite author ids"""
params = {
'authorMapper': authorMapper
}
if name:
params['name'] = name
return self.call("createAuthorIfNotExistsFor", params)
# SESSIONS
# Sessions can be created between a group and a author. This allows
# an author to access more than one group. The sessionID will be set as
# a cookie to the client and is valid until a certain date.
def createSession(self, groupID, authorID, validUntil):
"""creates a new session"""
return self.call("createSession", {
"groupID": groupID,
"authorID": authorID,
"validUntil": validUntil
})
def deleteSession(self, sessionID):
"""deletes a session"""
return self.call("deleteSession", {
"sessionID": sessionID
})
def getSessionInfo(self, sessionID):
"""returns informations about a session"""
return self.call("getSessionInfo", {
"sessionID": sessionID
})
def listSessionsOfGroup(self, groupID):
"""returns all sessions of a group"""
return self.call("listSessionsOfGroup", {
"groupID": groupID
})
def listSessionsOfAuthor(self, authorID):
"""returns all sessions of an author"""
return self.call("listSessionsOfAuthor", {
"authorID": authorID
})
# PAD CONTENT
# Pad content can be updated and retrieved through the API
def getText(self, padID, rev=None):
"""returns the text of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getText", params)
# introduced with pull request merge
def getHtml(self, padID, rev=None):
"""returns the html of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getHTML", params)
def setText(self, padID, text):
"""sets the text of a pad"""
return self.call("setText", {
"padID": padID,
"text": text
})
def setHtml(self, padID, html):
"""sets the text of a pad from html"""
return self.call("setHTML", {
"padID": padID,
"html": html
})
# PAD
# Group pads are normal pads, but with the name schema
# GROUPID$PADNAME. A security manager controls access of them and its
# forbidden for normal pads to include a in the name.
def createPad(self, padID, text=''):
"""creates a new pad"""
params = {
"padID": padID,
}
if text:
params['text'] = text
return self.call("createPad", params)
def getRevisionsCount(self, padID):
"""returns the number of revisions of this pad"""
return self.call("getRevisionsCount", {
"padID": padID
})
def deletePad(self, padID):
"""deletes a pad"""
return self.call("deletePad", {
"padID": padID
})
def getReadOnlyID(self, padID):
"""returns the read only link of a pad"""
return self.call("getReadOnlyID", {
"padID": padID
})
def setPublicStatus(self, padID, publicStatus):
"""sets a boolean for the public status of a pad"""
return self.call("setPublicStatus", {
"padID": padID,
"publicStatus": publicStatus
})
def getPublicStatus(self, padID):
"""return true of false"""
return self.call("getPublicStatus", {
"padID": padID
})
def setPassword(self, padID, password):
"""returns ok or a error message"""
return self.call("setPassword", {
"padID": padID,
"password": password
})
def isPasswordProtected(self, padID):
"""returns true or false"""
return self.call("isPasswordProtected", {
"padID": padID
})
| gpl-3.0 |
shubhamVerma/code-eval | Category - Easy/sumdigitsCodeEval.py | 1 | 1271 | '''
sumdigitsCodeEval.py - Solution to Problem Lowercase (Category - Easy)
Copyright (C) 2013, Shubham Verma
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
'''
Description:
Given a positive integer, find the sum of its constituent digits.
Input sample:
The first argument will be a text file containing positive integers, one per line.
e.g.
23
496
Output sample:
Print to stdout, the sum of the numbers that make up the integer, one per line.
e.g.
5
19
'''
import sys
if __name__ == '__main__':
f = open(sys.argv[1], 'r')
test_cases = f.read().split('\n')
for test_case in test_cases:
print sum( map(int, test_case) )
f.close() | gpl-3.0 |
DazWorrall/ansible | lib/ansible/modules/packaging/language/composer.py | 24 | 9023 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Dimitrios Tydeas Mengidis <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: composer
author:
- "Dimitrios Tydeas Mengidis (@dmtrs)"
- "René Moser (@resmo)"
short_description: Dependency Manager for PHP
version_added: "1.6"
description:
- >
Composer is a tool for dependency management in PHP. It allows you to
declare the dependent libraries your project needs and it will install
them in your project for you.
options:
command:
version_added: "1.8"
description:
- Composer command like "install", "update" and so on.
required: false
default: install
arguments:
version_added: "2.0"
description:
- Composer arguments like required package, version and so on.
required: false
default: null
executable:
version_added: "2.4"
description:
- Path to PHP Executable on the remote host, if PHP is not in PATH
required: false
default: null
aliases: [ "php_path" ]
working_dir:
description:
- Directory of your project (see --working-dir). This is required when
the command is not run globally.
- Will be ignored if C(global_command=true).
required: false
default: null
aliases: [ "working-dir" ]
global_command:
version_added: "2.4"
description:
- Runs the specified command globally.
required: false
choices: [ true, false]
default: false
aliases: [ "global-command" ]
prefer_source:
description:
- Forces installation from package sources when possible (see --prefer-source).
required: false
default: false
choices: [ true, false]
aliases: [ "prefer-source" ]
prefer_dist:
description:
- Forces installation from package dist even for dev versions (see --prefer-dist).
required: false
default: false
choices: [ true, false]
aliases: [ "prefer-dist" ]
no_dev:
description:
- Disables installation of require-dev packages (see --no-dev).
required: false
default: true
choices: [ true, false]
aliases: [ "no-dev" ]
no_scripts:
description:
- Skips the execution of all scripts defined in composer.json (see --no-scripts).
required: false
default: false
choices: [ true, false]
aliases: [ "no-scripts" ]
no_plugins:
description:
- Disables all plugins ( see --no-plugins ).
required: false
default: false
choices: [ true, false]
aliases: [ "no-plugins" ]
optimize_autoloader:
description:
- Optimize autoloader during autoloader dump (see --optimize-autoloader).
- Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
- Recommended especially for production, but can take a bit of time to run.
required: false
default: true
choices: [ true, false]
aliases: [ "optimize-autoloader" ]
ignore_platform_reqs:
version_added: "2.0"
description:
- Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
required: false
default: false
choices: [ true, false]
aliases: [ "ignore-platform-reqs" ]
requirements:
- php
- composer installed in bin path (recommended /usr/local/bin)
notes:
- Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
- We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues.
'''
EXAMPLES = '''
# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock
- composer:
command: install
working_dir: /path/to/project
- composer:
command: require
arguments: my/package
working_dir: /path/to/project
# Clone project and install with all dependencies
- composer:
command: create-project
arguments: package/package /path/to/project ~1.0
working_dir: /path/to/project
prefer_dist: yes
# Installs package globally
- composer:
command: require
global_command: yes
arguments: my/package
'''
import re
from ansible.module_utils.basic import AnsibleModule
def parse_out(string):
return re.sub("\s+", " ", string).strip()
def has_changed(string):
return "Nothing to install or update" not in string
def get_available_options(module, command='install'):
# get all available options from a composer command using composer help to json
rc, out, err = composer_command(module, "help %s --format=json" % command)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output)
command_help_json = module.from_json(out)
return command_help_json['definition']['options']
def composer_command(module, command, arguments="", options=None, global_command=False):
if options is None:
options = []
if module.params['executable'] is None:
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
else:
php_path = module.params['executable']
composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments)
return module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(default="install", type="str", required=False),
arguments=dict(default="", type="str", required=False),
executable=dict(type="path", required=False, aliases=["php_path"]),
working_dir=dict(type="path", aliases=["working-dir"]),
global_command=dict(default=False, type="bool", aliases=["global-command"]),
prefer_source=dict(default=False, type="bool", aliases=["prefer-source"]),
prefer_dist=dict(default=False, type="bool", aliases=["prefer-dist"]),
no_dev=dict(default=True, type="bool", aliases=["no-dev"]),
no_scripts=dict(default=False, type="bool", aliases=["no-scripts"]),
no_plugins=dict(default=False, type="bool", aliases=["no-plugins"]),
optimize_autoloader=dict(default=True, type="bool", aliases=["optimize-autoloader"]),
ignore_platform_reqs=dict(default=False, type="bool", aliases=["ignore-platform-reqs"]),
),
required_if=[('global_command', False, ['working_dir'])],
supports_check_mode=True
)
# Get composer command with fallback to default
command = module.params['command']
if re.search(r"\s", command):
module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
arguments = module.params['arguments']
global_command = module.params['global_command']
available_options = get_available_options(module=module, command=command)
options = []
# Default options
default_options = [
'no-ansi',
'no-interaction',
'no-progress',
]
for option in default_options:
if option in available_options:
option = "--%s" % option
options.append(option)
if not global_command:
options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
option_params = {
'prefer_source': 'prefer-source',
'prefer_dist': 'prefer-dist',
'no_dev': 'no-dev',
'no_scripts': 'no-scripts',
'no_plugins': 'no_plugins',
'optimize_autoloader': 'optimize-autoloader',
'ignore_platform_reqs': 'ignore-platform-reqs',
}
for param, option in option_params.items():
if module.params.get(param) and option in available_options:
option = "--%s" % option
options.append(option)
if module.check_mode:
options.append('--dry-run')
rc, out, err = composer_command(module, command, arguments, options, global_command)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output, stdout=err)
else:
# Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
output = parse_out(out + err)
module.exit_json(changed=has_changed(output), msg=output, stdout=out + err)
if __name__ == '__main__':
main()
| gpl-3.0 |
vmora/QGIS | python/plugins/processing/algs/gdal/rearrange_bands.py | 5 | 5727 | # -*- coding: utf-8 -*-
"""
***************************************************************************
rearrange_bands.py
---------------------
Date : August 2018
Copyright : (C) 2018 by Mathieu Pellerin
Email : nirvn dot asia at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Mathieu Pellerin'
__date__ = 'August 2018'
__copyright__ = '(C) 2018, Mathieu Pellerin'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import re
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterEnum,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class rearrange_bands(GdalAlgorithm):
INPUT = 'INPUT'
BANDS = 'BANDS'
OPTIONS = 'OPTIONS'
DATA_TYPE = 'DATA_TYPE'
OUTPUT = 'OUTPUT'
TYPES = ['Use input layer data type', 'Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64']
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBand(self.BANDS,
self.tr('Selected band(s)'),
None,
self.INPUT,
allowMultiple=True))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
dataType_param = QgsProcessingParameterEnum(self.DATA_TYPE,
self.tr('Output data type'),
self.TYPES,
allowMultiple=False,
defaultValue=0)
dataType_param.setFlags(dataType_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(dataType_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Converted')))
def name(self):
return 'rearrange_bands'
def displayName(self):
return self.tr('Rearrange bands')
def group(self):
return self.tr('Raster conversion')
def groupId(self):
return 'rasterconversion'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'translate.png'))
def shortHelpString(self):
return self.tr("This algorithm creates a new raster using selected band(s) from a given raster layer.\n\n"
"The algorithm also makes it possible to reorder the bands for the newly-created raster.")
def commandName(self):
return 'gdal_translate'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if inLayer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
arguments = []
bands = self.parameterAsInts(parameters, self.BANDS, context)
for band in bands:
arguments.append('-b {}'.format(band))
data_type = self.parameterAsEnum(parameters, self.DATA_TYPE, context)
if data_type:
arguments.append('-ot ' + self.TYPES[data_type])
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
arguments.extend(GdalUtils.parseCreationOptions(options))
arguments.append(inLayer.source())
arguments.append(out)
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
frohoff/Empire | lib/modules/powershell/exploitation/exploit_jenkins.py | 2 | 3352 | import base64
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Exploit-Jenkins',
'Author': ['@luxcupitor'],
'Description': ("Run command on unauthenticated Jenkins Script consoles."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'Pass a command to run. If windows, you may have to prepend "cmd /c ".'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Rhost' : {
'Description' : 'Specify the host to exploit.',
'Required' : True,
'Value' : ''
},
'Port' : {
'Description' : 'Specify the port to use.',
'Required' : True,
'Value' : '8080'
},
'Cmd' : {
'Description' : 'command to run on remote jenkins script console.',
'Required' : True,
'Value' : 'whoami'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/exploitation/Exploit-Jenkins.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = "\nExploit-Jenkins"
scriptEnd += " -Rhost "+str(self.options['Rhost']['Value'])
scriptEnd += " -Port "+str(self.options['Port']['Value'])
command = str(self.options['Cmd']['Value'])
# if the command contains spaces, wrap it in quotes before passing to ps script
if " " in command:
scriptEnd += " -Cmd \"" + command + "\""
else:
scriptEnd += " -Cmd " + command
if obfuscate:
scriptEnd = helpers.obfuscate(psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
| bsd-3-clause |
schimar/ngs_tools | remove_collapsed_clusters.py | 2 | 1110 | #! /usr/bin/python
#
# This script reads a fasta file (the last vsearch run with --id 0.8 # to test for whether clusters collapse at a lower id) and removes
# all entries that have(the 2nd) seqs > 1.
#
# Usage: ./remove_collapsed_clusters.py <input-file_name.fasta> <new_file_name.fasta>
import sys
import re
#import shutil
#import tempfile
newfile = open(sys.argv[2], 'a')
n_clusters = int()
with open(sys.argv[1], 'rb') as file:
for i, line in enumerate(file):
if line[0] == ">":
cluster = re.findall(';;seqs=[0-9]+', line)[0]
seq_n = int(re.findall('[0-9]+', cluster)[0])
# newline = str(cluster + ',' + seq_n + '\n')
#newfile.write(newline)
if seq_n != 1:
continue
else:
n_clusters += 1
newfile.write(line)
else:
if seq_n == 1:
newfile.write(line)
else:
continue
print n_clusters, "uncollapsed clusters found"
file.close()
newfile.close()
| gpl-2.0 |
kajgan/stbgui | lib/python/Components/Converter/ClientsStreaming.py | 1 | 3432 | from Converter import Converter
from Poll import Poll
from Components.Element import cached
from Components.Sources.StreamService import StreamServiceList
from enigma import eStreamServer
from ServiceReference import ServiceReference
import socket
class ClientsStreaming(Converter, Poll, object):
UNKNOWN = -1
REF = 0
IP = 1
NAME = 2
ENCODER = 3
NUMBER = 4
SHORT_ALL = 5
ALL = 6
INFO = 7
INFO_RESOLVE = 8
INFO_RESOLVE_SHORT = 9
EXTRA_INFO = 10
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.poll_interval = 30000
self.poll_enabled = True
if type == "REF":
self.type = self.REF
elif type == "IP":
self.type = self.IP
elif type == "NAME":
self.type = self.NAME
elif type == "ENCODER":
self.type = self.ENCODER
elif type == "NUMBER":
self.type = self.NUMBER
elif type == "SHORT_ALL":
self.type = self.SHORT_ALL
elif type == "ALL":
self.type = self.ALL
elif type == "INFO":
self.type = self.INFO
elif type == "INFO_RESOLVE":
self.type = self.INFO_RESOLVE
elif type == "INFO_RESOLVE_SHORT":
self.type = self.INFO_RESOLVE_SHORT
elif type == "EXTRA_INFO":
self.type = self.EXTRA_INFO
else:
self.type = self.UNKNOWN
self.streamServer = eStreamServer.getInstance()
@cached
def getText(self):
if self.streamServer is None:
return ""
clients = []
refs = []
ips = []
names = []
encoders = []
extrainfo = _("ClientIP") + "\t" + _("Transcode") + "\t" + _("Channel") + "\n"
info = ""
for x in self.streamServer.getConnectedClients():
refs.append((x[1]))
servicename = ServiceReference(x[1]).getServiceName() or "(unknown service)"
service_name = servicename
names.append((service_name))
ip = x[0]
ips.append((ip))
if int(x[2]) == 0:
strtype = "S"
encoder = _('NO')
else:
strtype = "T"
encoder = _('YES')
encoders.append((encoder))
if self.type == self.INFO_RESOLVE or self.type == self.INFO_RESOLVE_SHORT:
try:
raw = socket.gethostbyaddr(ip)
ip = raw[0]
except:
pass
if self.type == self.INFO_RESOLVE_SHORT:
ip, sep, tail = ip.partition('.')
info += ("%s %-8s %s\n") % (strtype, ip, service_name)
clients.append((ip, service_name, encoder))
extrainfo += ("%-8s\t%s\t%s") % (ip, encoder, service_name) +"\n"
if self.type == self.REF:
return ' '.join(refs)
elif self.type == self.IP:
return ' '.join(ips)
elif self.type == self.NAME:
return ' '.join(names)
elif self.type == self.ENCODER:
return _("Transcoding: ") + ' '.join(encoders)
elif self.type == self.NUMBER:
return str(len(clients))
elif self.type == self.EXTRA_INFO:
return extrainfo
elif self.type == self.SHORT_ALL:
return _("Total clients streaming: %d (%s)") % (len(clients), ' '.join(names))
elif self.type == self.ALL:
return '\n'.join(' '.join(elems) for elems in clients)
elif self.type == self.INFO or self.type == self.INFO_RESOLVE or self.type == self.INFO_RESOLVE_SHORT:
return info
else:
return "(unknown)"
return ""
text = property(getText)
@cached
def getBoolean(self):
if self.streamServer is None:
return False
return (self.streamServer.getConnectedClients() or StreamServiceList) and True or False
boolean = property(getBoolean)
def changed(self, what):
Converter.changed(self, (self.CHANGED_POLL,))
def doSuspend(self, suspended):
pass | gpl-2.0 |
viaict/viaduct | app/forms/pimpy.py | 1 | 1268 | import datetime
from flask_babel import _
from flask_login import current_user
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, DateTimeField, SelectField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import InputRequired, Optional
from app import constants
from app.service import group_service, pimpy_service
class AddTaskForm(FlaskForm):
name = StringField(_('Name'), validators=[InputRequired()])
content = TextAreaField(_('Content'), validators=[Optional()])
group = QuerySelectField(
_('Group'),
query_factory=lambda: group_service.get_groups_for_user(current_user),
get_label=lambda x: x.name)
users = StringField(_('Users'))
status = SelectField(_('Status'), coerce=int,
choices=pimpy_service.get_task_status_choices())
class AddMinuteForm(FlaskForm):
content = TextAreaField(_('Minute content'), validators=[InputRequired()])
group = QuerySelectField(
_('Group'),
query_factory=lambda: group_service.get_groups_for_user(current_user),
get_label=lambda x: x.name)
date = DateTimeField(_('Date'), format=constants.DATE_FORMAT,
default=datetime.date.today)
| mit |
Cito/DBUtils | tests/mock_db.py | 1 | 3341 | """This module serves as a mock object for the DB-API 2 module"""
threadsafety = 2
class Error(Exception):
pass
class DatabaseError(Error):
pass
class OperationalError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
def connect(database=None, user=None):
return Connection(database, user)
class Connection:
has_ping = False
num_pings = 0
def __init__(self, database=None, user=None):
self.database = database
self.user = user
self.valid = False
if database == 'error':
raise OperationalError
self.open_cursors = 0
self.num_uses = 0
self.num_queries = 0
self.num_pings = 0
self.session = []
self.valid = True
def close(self):
if not self.valid:
raise InternalError
self.open_cursors = 0
self.num_uses = 0
self.num_queries = 0
self.session = []
self.valid = False
def commit(self):
if not self.valid:
raise InternalError
self.session.append('commit')
def rollback(self):
if not self.valid:
raise InternalError
self.session.append('rollback')
def ping(self):
cls = self.__class__
cls.num_pings += 1
if not cls.has_ping:
raise AttributeError
if not self.valid:
raise OperationalError
def cursor(self, name=None):
if not self.valid:
raise InternalError
return Cursor(self, name)
class Cursor:
def __init__(self, con, name=None):
self.con = con
self.valid = False
if name == 'error':
raise OperationalError
self.result = None
self.inputsizes = []
self.outputsizes = {}
con.open_cursors += 1
self.valid = True
def close(self):
if not self.valid:
raise InternalError
self.con.open_cursors -= 1
self.valid = False
def execute(self, operation):
if not self.valid or not self.con.valid:
raise InternalError
self.con.num_uses += 1
if operation.startswith('select '):
self.con.num_queries += 1
self.result = operation[7:]
elif operation.startswith('set '):
self.con.session.append(operation[4:])
self.result = None
elif operation == 'get sizes':
self.result = (self.inputsizes, self.outputsizes)
self.inputsizes = []
self.outputsizes = {}
else:
raise ProgrammingError
def fetchone(self):
if not self.valid:
raise InternalError
result = self.result
self.result = None
return result
def callproc(self, procname):
if not self.valid or not self.con.valid or not procname:
raise InternalError
self.con.num_uses += 1
def setinputsizes(self, sizes):
if not self.valid:
raise InternalError
self.inputsizes = sizes
def setoutputsize(self, size, column=None):
if not self.valid:
raise InternalError
self.outputsizes[column] = size
def __del__(self):
if self.valid:
self.close()
| mit |
amaret/wind.util | windutil/main.py | 1 | 5085 | # Copyright Amaret, Inc 2011-2015. All rights reserved.
''' Wind Docker Container Util '''
import os
import time
import json
from subprocess import call
from windutil.argparser import parse
from windutil.scrlogger import ScrLogger
LOG = ScrLogger()
DEFAULT_CONTAINER_CONFIG = [
{
'name': 'redis',
'priority': 0,
'run': 'docker run --name redis -p 6379:6379 -d redis',
'image': 'redis'
}
]
CONFIG_FILE_PATH = os.path.expanduser('~') + '/.wutilrc'
def _read_config():
''' look up config, if not found init '''
rcfile = os.path.expanduser('~') + '/.wutilrc'
if not os.path.exists(rcfile):
wutilrc = open(CONFIG_FILE_PATH, 'w')
LOG.debug("writing config to %s" % CONFIG_FILE_PATH)
wutilrc.write(
json.dumps(
DEFAULT_CONTAINER_CONFIG,
sort_keys=True,
indent=4,
separators=(',', ': ')))
wutilrc.close()
return DEFAULT_CONTAINER_CONFIG
LOG.debug("reading config from %s" % CONFIG_FILE_PATH)
wutilrc = open(CONFIG_FILE_PATH, 'r')
json_str = wutilrc.read()
wutilrc.close()
return json.loads(json_str)
def _load_config():
'''store by name for key'''
info = {}
for cntr in CONTAINER_CONFIG:
info[cntr['name']] = cntr
return info
CONTAINER_CONFIG = _read_config()
CONTAINER_INFO = _load_config()
def _rm(pargs):
'''rm'''
if pargs.use_all:
_container_command('rm', _sorted_config_names())
else:
_container_command('rm', pargs.containers)
def _start(pargs):
'''start'''
if pargs.use_all:
_container_command('start', _sorted_config_names())
else:
_container_command('start', pargs.containers)
def _stop(pargs):
'''stop'''
if pargs.use_all:
_container_command('stop', _reversed_config_names())
else:
_container_command('stop', pargs.containers)
def _container_command(command, names):
'''command'''
LOG.debug(command + "(ing) ")
for container in names:
LOG.debug(command + " " + container)
call(["docker", command, container])
if 'delay' in CONTAINER_INFO[container]:
secs = CONTAINER_INFO[container]['delay']
LOG.debug("sleeping %s seconds" % (secs))
time.sleep(secs)
def _run(pargs):
'''run'''
LOG.debug("run(ing)")
names = []
if pargs.use_all:
names = _sorted_config_names()
else:
names = pargs.containers
for container in names:
LOG.debug("run " + container)
arglist = CONTAINER_INFO[container]['run'].split()
call(arglist)
if 'delay' in CONTAINER_INFO[container]:
secs = CONTAINER_INFO[container]['delay']
LOG.debug("sleeping %s seconds" % (secs))
time.sleep(secs)
def _pull(pargs):
'''run'''
LOG.debug("pull(ing)")
names = []
if pargs.use_all:
names = _sorted_config_names()
else:
names = pargs.containers
for container in names:
LOG.debug("pull " + container)
img = CONTAINER_INFO[container]['image']
call(['docker', 'pull', img])
def _upgrade(pargs):
'''upgrade'''
if pargs.local is False:
_pull(pargs)
_stop(pargs)
_rm(pargs)
_run(pargs)
def _ps(pargs):
'''ps'''
option = '-a'
from subprocess import Popen, PIPE
process = Popen(["docker", "ps", option], stdout=PIPE)
(output, _) = process.communicate()
process.wait()
import string
lines = string.split(output, '\n')
status_idx = lines[0].index('STATUS')
print lines[0][status_idx:]
keys = CONTAINER_INFO.keys()
for line in lines[1:]:
if len(line) > 0:
cname = line[status_idx:].split()[-1]
if pargs.all or cname in keys:
print line[status_idx:]
def _reversed_config_names():
'''reverse list'''
return [x for x in reversed(_sorted_config_names())]
def _sorted_config_names():
'''manage dependencies'''
newlist = sorted(CONTAINER_INFO.values(), key=lambda x: x['priority'],
reverse=False)
return [x['name'] for x in newlist]
def main():
'''main entry point'''
# pylint: disable=too-many-branches
try:
cmd, pargs = parse()
pargs.use_all = 'containers' in pargs and pargs.containers[0] == 'all'
if cmd is 'init':
print "Initialized"
return
if cmd is 'ps':
_ps(pargs)
return
if cmd is 'start':
_start(pargs)
if cmd is 'login':
print "login command"
if cmd is 'pull':
_pull(pargs)
if cmd is 'rm':
_rm(pargs)
if cmd is 'run':
_run(pargs)
if cmd is 'stop':
_stop(pargs)
if cmd is 'upgrade':
_upgrade(pargs)
# pylint: disable=broad-except
except Exception, ex:
LOG.error(ex)
import traceback
trace = traceback.format_exc()
LOG.trace(trace)
| gpl-2.0 |
miconof/headphones | headphones/notifiers.py | 1 | 28911 | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
from headphones import logger, helpers, common, request
from xml.dom import minidom
from httplib import HTTPSConnection
from urlparse import parse_qsl
from urllib import urlencode
from pynma import pynma
import base64
import cherrypy
import urllib
import urllib2
import headphones
import os.path
import subprocess
import gntp.notifier
import json
import oauth2 as oauth
import pythontwitter as twitter
from email.mime.text import MIMEText
import smtplib
import email.utils
class GROWL(object):
"""
Growl notifications, for OS X.
"""
def __init__(self):
self.enabled = headphones.CONFIG.GROWL_ENABLED
self.host = headphones.CONFIG.GROWL_HOST
self.password = headphones.CONFIG.GROWL_PASSWORD
def conf(self, options):
return cherrypy.config['config'].get('Growl', options)
def notify(self, message, event):
if not self.enabled:
return
# Split host and port
if self.host == "":
host, port = "localhost", 23053
if ":" in self.host:
host, port = self.host.split(':', 1)
port = int(port)
else:
host, port = self.host, 23053
# If password is empty, assume none
if self.password == "":
password = None
else:
password = self.password
# Register notification
growl = gntp.notifier.GrowlNotifier(
applicationName='Headphones',
notifications=['New Event'],
defaultNotifications=['New Event'],
hostname=host,
port=port,
password=password
)
try:
growl.register()
except gntp.notifier.errors.NetworkError:
logger.warning(u'Growl notification failed: network error')
return
except gntp.notifier.errors.AuthError:
logger.warning(u'Growl notification failed: authentication error')
return
# Fix message
message = message.encode(headphones.SYS_ENCODING, "replace")
# Send it, including an image
image_file = os.path.join(str(headphones.PROG_DIR),
"data/images/headphoneslogo.png")
with open(image_file, 'rb') as f:
image = f.read()
try:
growl.notify(
noteType='New Event',
title=event,
description=message,
icon=image
)
except gntp.notifier.errors.NetworkError:
logger.warning(u'Growl notification failed: network error')
return
logger.info(u"Growl notifications sent.")
def updateLibrary(self):
#For uniformity reasons not removed
return
def test(self, host, password):
self.enabled = True
self.host = host
self.password = password
self.notify('ZOMG Lazors Pewpewpew!', 'Test Message')
class PROWL(object):
"""
Prowl notifications.
"""
def __init__(self):
self.enabled = headphones.CONFIG.PROWL_ENABLED
self.keys = headphones.CONFIG.PROWL_KEYS
self.priority = headphones.CONFIG.PROWL_PRIORITY
def conf(self, options):
return cherrypy.config['config'].get('Prowl', options)
def notify(self, message, event):
if not headphones.CONFIG.PROWL_ENABLED:
return
http_handler = HTTPSConnection("api.prowlapp.com")
data = {'apikey': headphones.CONFIG.PROWL_KEYS,
'application': 'Headphones',
'event': event,
'description': message.encode("utf-8"),
'priority': headphones.CONFIG.PROWL_PRIORITY}
http_handler.request("POST",
"/publicapi/add",
headers={'Content-type': "application/x-www-form-urlencoded"},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
if request_status == 200:
logger.info(u"Prowl notifications sent.")
return True
elif request_status == 401:
logger.info(u"Prowl auth failed: %s" % response.reason)
return False
else:
logger.info(u"Prowl notification failed.")
return False
def updateLibrary(self):
#For uniformity reasons not removed
return
def test(self, keys, priority):
self.enabled = True
self.keys = keys
self.priority = priority
self.notify('ZOMG Lazors Pewpewpew!', 'Test Message')
class MPC(object):
"""
MPC library update
"""
def __init__(self):
pass
def notify(self):
subprocess.call(["mpc", "update"])
class XBMC(object):
"""
XBMC notifications
"""
def __init__(self):
self.hosts = headphones.CONFIG.XBMC_HOST
self.username = headphones.CONFIG.XBMC_USERNAME
self.password = headphones.CONFIG.XBMC_PASSWORD
def _sendhttp(self, host, command):
url_command = urllib.urlencode(command)
url = host + '/xbmcCmds/xbmcHttp/?' + url_command
if self.password:
return request.request_content(url, auth=(self.username, self.password))
else:
return request.request_content(url)
def _sendjson(self, host, method, params={}):
data = [{'id': 0, 'jsonrpc': '2.0', 'method': method, 'params': params}]
headers = {'Content-Type': 'application/json'}
url = host + '/jsonrpc'
if self.password:
response = request.request_json(url, method="post", data=json.dumps(data), headers=headers, auth=(self.username, self.password))
else:
response = request.request_json(url, method="post", data=json.dumps(data), headers=headers)
if response:
return response[0]['result']
def update(self):
# From what I read you can't update the music library on a per directory or per path basis
# so need to update the whole thing
hosts = [x.strip() for x in self.hosts.split(',')]
for host in hosts:
logger.info('Sending library update command to XBMC @ ' + host)
request = self._sendjson(host, 'AudioLibrary.Scan')
if not request:
logger.warn('Error sending update request to XBMC')
def notify(self, artist, album, albumartpath):
hosts = [x.strip() for x in self.hosts.split(',')]
header = "Headphones"
message = "%s - %s added to your library" % (artist, album)
time = "3000" # in ms
for host in hosts:
logger.info('Sending notification command to XMBC @ ' + host)
try:
version = self._sendjson(host, 'Application.GetProperties', {'properties': ['version']})['version']['major']
if version < 12: #Eden
notification = header + "," + message + "," + time + "," + albumartpath
notifycommand = {'command': 'ExecBuiltIn', 'parameter': 'Notification(' + notification + ')'}
request = self._sendhttp(host, notifycommand)
else: #Frodo
params = {'title': header, 'message': message, 'displaytime': int(time), 'image': albumartpath}
request = self._sendjson(host, 'GUI.ShowNotification', params)
if not request:
raise Exception
except Exception:
logger.error('Error sending notification request to XBMC')
class LMS(object):
"""
Class for updating a Logitech Media Server
"""
def __init__(self):
self.hosts = headphones.CONFIG.LMS_HOST
def _sendjson(self, host):
data = {'id': 1, 'method': 'slim.request', 'params': ["", ["rescan"]]}
data = json.JSONEncoder().encode(data)
content = {'Content-Type': 'application/json'}
req = urllib2.Request(host + '/jsonrpc.js', data, content)
try:
handle = urllib2.urlopen(req)
except Exception as e:
logger.warn('Error opening LMS url: %s' % e)
return
response = json.JSONDecoder().decode(handle.read())
try:
return response['result']
except:
logger.warn('LMS returned error: %s' % response['error'])
return response['error']
def update(self):
hosts = [x.strip() for x in self.hosts.split(',')]
for host in hosts:
logger.info('Sending library rescan command to LMS @ ' + host)
request = self._sendjson(host)
if request:
logger.warn('Error sending rescan request to LMS')
class Plex(object):
def __init__(self):
self.server_hosts = headphones.CONFIG.PLEX_SERVER_HOST
self.client_hosts = headphones.CONFIG.PLEX_CLIENT_HOST
self.username = headphones.CONFIG.PLEX_USERNAME
self.password = headphones.CONFIG.PLEX_PASSWORD
self.token = headphones.CONFIG.PLEX_TOKEN
def _sendhttp(self, host, command):
url = host + '/xbmcCmds/xbmcHttp/?' + command
if self.password:
response = request.request_response(url, auth=(self.username, self.password))
else:
response = request.request_response(url)
return response
def _sendjson(self, host, method, params={}):
data = [{'id': 0, 'jsonrpc': '2.0', 'method': method, 'params': params}]
headers = {'Content-Type': 'application/json'}
url = host + '/jsonrpc'
if self.password:
response = request.request_json(url, method="post", data=json.dumps(data), headers=headers, auth=(self.username, self.password))
else:
response = request.request_json(url, method="post", data=json.dumps(data), headers=headers)
if response:
return response[0]['result']
def update(self):
# From what I read you can't update the music library on a per directory or per path basis
# so need to update the whole thing
hosts = [x.strip() for x in self.server_hosts.split(',')]
for host in hosts:
logger.info('Sending library update command to Plex Media Server@ ' + host)
url = "%s/library/sections" % host
if self.token:
params = {'X-Plex-Token': self.token}
else:
params = False
r = request.request_minidom(url, params=params)
sections = r.getElementsByTagName('Directory')
if not sections:
logger.info(u"Plex Media Server not running on: " + host)
return False
for s in sections:
if s.getAttribute('type') == "artist":
url = "%s/library/sections/%s/refresh" % (host, s.getAttribute('key'))
request.request_response(url, params=params)
def notify(self, artist, album, albumartpath):
hosts = [x.strip() for x in self.client_hosts.split(',')]
header = "Headphones"
message = "%s - %s added to your library" % (artist, album)
time = "3000" # in ms
for host in hosts:
logger.info('Sending notification command to Plex client @ ' + host)
try:
version = self._sendjson(host, 'Application.GetProperties', {'properties': ['version']})['version']['major']
if version < 12: #Eden
notification = header + "," + message + "," + time + "," + albumartpath
notifycommand = {'command': 'ExecBuiltIn', 'parameter': 'Notification(' + notification + ')'}
request = self._sendhttp(host, notifycommand)
else: #Frodo
params = {'title': header, 'message': message, 'displaytime': int(time), 'image': albumartpath}
request = self._sendjson(host, 'GUI.ShowNotification', params)
if not request:
raise Exception
except Exception:
logger.error('Error sending notification request to Plex client @ ' + host)
class NMA(object):
def notify(self, artist=None, album=None, snatched=None):
title = 'Headphones'
api = headphones.CONFIG.NMA_APIKEY
nma_priority = headphones.CONFIG.NMA_PRIORITY
logger.debug(u"NMA title: " + title)
logger.debug(u"NMA API: " + api)
logger.debug(u"NMA Priority: " + str(nma_priority))
if snatched:
event = snatched + " snatched!"
message = "Headphones has snatched: " + snatched
else:
event = artist + ' - ' + album + ' complete!'
message = "Headphones has downloaded and postprocessed: " + artist + ' [' + album + ']'
logger.debug(u"NMA event: " + event)
logger.debug(u"NMA message: " + message)
batch = False
p = pynma.PyNMA()
keys = api.split(',')
p.addkey(keys)
if len(keys) > 1:
batch = True
response = p.push(title, event, message, priority=nma_priority, batch_mode=batch)
if not response[api][u'code'] == u'200':
logger.error(u'Could not send notification to NotifyMyAndroid')
return False
else:
return True
class PUSHBULLET(object):
def __init__(self):
self.apikey = headphones.CONFIG.PUSHBULLET_APIKEY
self.deviceid = headphones.CONFIG.PUSHBULLET_DEVICEID
def notify(self, message):
if not headphones.CONFIG.PUSHBULLET_ENABLED:
return
url = "https://api.pushbullet.com/v2/pushes"
data = {'type': "note",
'title': "Headphones",
'body': message}
if self.deviceid:
data['device_iden'] = self.deviceid
headers={'Content-type': "application/json",
'Authorization': 'Bearer ' + headphones.CONFIG.PUSHBULLET_APIKEY}
response = request.request_json(url, method="post", headers=headers, data=json.dumps(data))
if response:
logger.info(u"PushBullet notifications sent.")
return True
else:
logger.info(u"PushBullet notification failed.")
return False
class PUSHALOT(object):
def notify(self, message, event):
if not headphones.CONFIG.PUSHALOT_ENABLED:
return
pushalot_authorizationtoken = headphones.CONFIG.PUSHALOT_APIKEY
logger.debug(u"Pushalot event: " + event)
logger.debug(u"Pushalot message: " + message)
logger.debug(u"Pushalot api: " + pushalot_authorizationtoken)
http_handler = HTTPSConnection("pushalot.com")
data = {'AuthorizationToken': pushalot_authorizationtoken,
'Title': event.encode('utf-8'),
'Body': message.encode("utf-8")}
http_handler.request("POST",
"/api/sendmessage",
headers={'Content-type': "application/x-www-form-urlencoded"},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
logger.debug(u"Pushalot response status: %r" % request_status)
logger.debug(u"Pushalot response headers: %r" % response.getheaders())
logger.debug(u"Pushalot response body: %r" % response.read())
if request_status == 200:
logger.info(u"Pushalot notifications sent.")
return True
elif request_status == 410:
logger.info(u"Pushalot auth failed: %s" % response.reason)
return False
else:
logger.info(u"Pushalot notification failed.")
return False
class Synoindex(object):
def __init__(self, util_loc='/usr/syno/bin/synoindex'):
self.util_loc = util_loc
def util_exists(self):
return os.path.exists(self.util_loc)
def notify(self, path):
path = os.path.abspath(path)
if not self.util_exists():
logger.warn("Error sending notification: synoindex utility not found at %s" % self.util_loc)
return
if os.path.isfile(path):
cmd_arg = '-a'
elif os.path.isdir(path):
cmd_arg = '-A'
else:
logger.warn("Error sending notification: Path passed to synoindex was not a file or folder.")
return
cmd = [self.util_loc, cmd_arg, path]
logger.info("Calling synoindex command: %s" % str(cmd))
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=headphones.PROG_DIR)
out, error = p.communicate()
#synoindex never returns any codes other than '0', highly irritating
except OSError, e:
logger.warn("Error sending notification: %s" % str(e))
def notify_multiple(self, path_list):
if isinstance(path_list, list):
for path in path_list:
self.notify(path)
class PUSHOVER(object):
def __init__(self):
self.enabled = headphones.CONFIG.PUSHOVER_ENABLED
self.keys = headphones.CONFIG.PUSHOVER_KEYS
self.priority = headphones.CONFIG.PUSHOVER_PRIORITY
if headphones.CONFIG.PUSHOVER_APITOKEN:
self.application_token = headphones.CONFIG.PUSHOVER_APITOKEN
else:
self.application_token = "LdPCoy0dqC21ktsbEyAVCcwvQiVlsz"
def conf(self, options):
return cherrypy.config['config'].get('Pushover', options)
def notify(self, message, event):
if not headphones.CONFIG.PUSHOVER_ENABLED:
return
url = "https://api.pushover.net/1/messages.json"
data = {'token': self.application_token,
'user': headphones.CONFIG.PUSHOVER_KEYS,
'title': event,
'message': message.encode("utf-8"),
'priority': headphones.CONFIG.PUSHOVER_PRIORITY}
headers = {'Content-type': "application/x-www-form-urlencoded"}
response = request.request_response(url, method="POST", headers=headers, data=data)
if response:
logger.info(u"Pushover notifications sent.")
return True
else:
logger.error(u"Pushover notification failed.")
return False
def updateLibrary(self):
#For uniformity reasons not removed
return
def test(self, keys, priority):
self.enabled = True
self.keys = keys
self.priority = priority
self.notify('Main Screen Activate', 'Test Message')
class TwitterNotifier(object):
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
def __init__(self):
self.consumer_key = "oYKnp2ddX5gbARjqX8ZAAg"
self.consumer_secret = "A4Xkw9i5SjHbTk7XT8zzOPqivhj9MmRDR9Qn95YA9sk"
def notify_snatch(self, title):
if headphones.CONFIG.TWITTER_ONSNATCH:
self._notifyTwitter(common.notifyStrings[common.NOTIFY_SNATCH] + ': ' + title + ' at ' + helpers.now())
def notify_download(self, title):
if headphones.CONFIG.TWITTER_ENABLED:
self._notifyTwitter(common.notifyStrings[common.NOTIFY_DOWNLOAD] + ': ' + title + ' at ' + helpers.now())
def test_notify(self):
return self._notifyTwitter("This is a test notification from Headphones at " + helpers.now(), force=True)
def _get_authorization(self):
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
logger.info('Requesting temp token from Twitter')
resp, content = oauth_client.request(self.REQUEST_TOKEN_URL, 'GET')
if resp['status'] != '200':
logger.info('Invalid respond from Twitter requesting temp token: %s' % resp['status'])
else:
request_token = dict(parse_qsl(content))
headphones.CONFIG.TWITTER_USERNAME = request_token['oauth_token']
headphones.CONFIG.TWITTER_PASSWORD = request_token['oauth_token_secret']
return self.AUTHORIZATION_URL + "?oauth_token=" + request_token['oauth_token']
def _get_credentials(self, key):
request_token = {}
request_token['oauth_token'] = headphones.CONFIG.TWITTER_USERNAME
request_token['oauth_token_secret'] = headphones.CONFIG.TWITTER_PASSWORD
request_token['oauth_callback_confirmed'] = 'true'
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(key)
logger.info('Generating and signing request for an access token using key ' + key)
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
logger.info('oauth_consumer: ' + str(oauth_consumer))
oauth_client = oauth.Client(oauth_consumer, token)
logger.info('oauth_client: ' + str(oauth_client))
resp, content = oauth_client.request(self.ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % key)
logger.info('resp, content: ' + str(resp) + ',' + str(content))
access_token = dict(parse_qsl(content))
logger.info('access_token: ' + str(access_token))
logger.info('resp[status] = ' + str(resp['status']))
if resp['status'] != '200':
logger.info('The request for a token with did not succeed: ' + str(resp['status']), logger.ERROR)
return False
else:
logger.info('Your Twitter Access Token key: %s' % access_token['oauth_token'])
logger.info('Access Token secret: %s' % access_token['oauth_token_secret'])
headphones.CONFIG.TWITTER_USERNAME = access_token['oauth_token']
headphones.CONFIG.TWITTER_PASSWORD = access_token['oauth_token_secret']
return True
def _send_tweet(self, message=None):
username = self.consumer_key
password = self.consumer_secret
access_token_key = headphones.CONFIG.TWITTER_USERNAME
access_token_secret = headphones.CONFIG.TWITTER_PASSWORD
logger.info(u"Sending tweet: " + message)
api = twitter.Api(username, password, access_token_key, access_token_secret)
try:
api.PostUpdate(message)
except Exception as e:
logger.info(u"Error Sending Tweet: %s" % e)
return False
return True
def _notifyTwitter(self, message='', force=False):
prefix = headphones.CONFIG.TWITTER_PREFIX
if not headphones.CONFIG.TWITTER_ENABLED and not force:
return False
return self._send_tweet(prefix + ": " + message)
class OSX_NOTIFY(object):
def __init__(self):
try:
self.objc = __import__("objc")
self.AppKit = __import__("AppKit")
except:
logger.warn('OS X Notification: Cannot import objc or AppKit')
return False
def swizzle(self, cls, SEL, func):
old_IMP = getattr(cls, SEL, None)
if old_IMP is None:
old_IMP = cls.instanceMethodForSelector_(SEL)
def wrapper(self, *args, **kwargs):
return func(self, old_IMP, *args, **kwargs)
new_IMP = self.objc.selector(
wrapper,
selector=old_IMP.selector,
signature=old_IMP.signature
)
self.objc.classAddMethod(cls, SEL.encode(), new_IMP)
def notify(self, title, subtitle=None, text=None, sound=True, image=None):
try:
self.swizzle(
self.objc.lookUpClass('NSBundle'),
'bundleIdentifier',
self.swizzled_bundleIdentifier
)
NSUserNotification = self.objc.lookUpClass('NSUserNotification')
NSUserNotificationCenter = self.objc.lookUpClass('NSUserNotificationCenter')
NSAutoreleasePool = self.objc.lookUpClass('NSAutoreleasePool')
if not NSUserNotification or not NSUserNotificationCenter:
return False
pool = NSAutoreleasePool.alloc().init()
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
if subtitle:
notification.setSubtitle_(subtitle)
if text:
notification.setInformativeText_(text)
if sound:
notification.setSoundName_("NSUserNotificationDefaultSoundName")
if image:
source_img = self.AppKit.NSImage.alloc().initByReferencingFile_(image)
notification.setContentImage_(source_img)
#notification.set_identityImage_(source_img)
notification.setHasActionButton_(False)
notification_center = NSUserNotificationCenter.defaultUserNotificationCenter()
notification_center.deliverNotification_(notification)
del pool
return True
except Exception as e:
logger.warn('Error sending OS X Notification: %s' % e)
return False
def swizzled_bundleIdentifier(self, original, swizzled):
return 'ade.headphones.osxnotify'
class BOXCAR(object):
def __init__(self):
self.url = 'https://new.boxcar.io/api/notifications'
def notify(self, title, message, rgid=None):
try:
if rgid:
message += '<br></br><a href="http://musicbrainz.org/release-group/%s">MusicBrainz</a>' % rgid
data = urllib.urlencode({
'user_credentials': headphones.CONFIG.BOXCAR_TOKEN,
'notification[title]': title.encode('utf-8'),
'notification[long_message]': message.encode('utf-8'),
'notification[sound]': "done"
})
req = urllib2.Request(self.url)
handle = urllib2.urlopen(req, data)
handle.close()
return True
except urllib2.URLError as e:
logger.warn('Error sending Boxcar2 Notification: %s' % e)
return False
class SubSonicNotifier(object):
def __init__(self):
self.host = headphones.CONFIG.SUBSONIC_HOST
self.username = headphones.CONFIG.SUBSONIC_USERNAME
self.password = headphones.CONFIG.SUBSONIC_PASSWORD
def notify(self, albumpaths):
# Correct URL
if not self.host.lower().startswith("http"):
self.host = "http://" + self.host
if not self.host.lower().endswith("/"):
self.host = self.host + "/"
# Invoke request
request.request_response(self.host + "musicFolderSettings.view?scanNow",
auth=(self.username, self.password))
class Email(object):
def notify(self, subject, message):
message = MIMEText(message, 'plain', "utf-8")
message['Subject'] = subject
message['From'] = email.utils.formataddr(('Headphones', headphones.CONFIG.EMAIL_FROM))
message['To'] = headphones.CONFIG.EMAIL_TO
try:
if (headphones.CONFIG.EMAIL_SSL):
mailserver = smtplib.SMTP_SSL(headphones.CONFIG.EMAIL_SMTP_SERVER, headphones.CONFIG.EMAIL_SMTP_PORT)
else:
mailserver = smtplib.SMTP(headphones.CONFIG.EMAIL_SMTP_SERVER, headphones.CONFIG.EMAIL_SMTP_PORT)
if (headphones.CONFIG.EMAIL_TLS):
mailserver.starttls()
mailserver.ehlo()
if headphones.CONFIG.EMAIL_SMTP_USER:
mailserver.login(headphones.CONFIG.EMAIL_SMTP_USER, headphones.CONFIG.EMAIL_SMTP_PASSWORD)
mailserver.sendmail(headphones.CONFIG.EMAIL_FROM, headphones.CONFIG.EMAIL_TO, message.as_string())
mailserver.quit()
return True
except Exception, e:
logger.warn('Error sending Email: %s' % e)
return False
| gpl-3.0 |
termoshtt/DataProcessor | lib/dataprocessor/tests/test_scan.py | 3 | 9544 | # coding=utf-8
"""Test for scan."""
import os
from .utils import TestNodeListAndDir
from ..pipes.scan import directory
class TestScan(TestNodeListAndDir):
"""Unittest for dataprocessor.pipes.scan.
Attributes
----------
tempdir_paths : list
list of project root dir path
node_list : list
"""
def setUp(self):
"""Prepare test environment."""
self._generate_test_directories()
def _generate_test_directories(self):
"""Generate test directories.
Generated directories and files are as follows,
(dir-path, including-dirs, including-files)
('/tmpdir_path', ['run0', 'run1', 'run2'], [])
('/tmpdir_path/run0', ['run0', 'run1'], ['test.conf'])
('/tmpdir_path/run0/run0', ['data'], [])
('/tmpdir_path/run0/run0/data', [], ['hoge.conf'])
('/tmpdir_path/run0/run1', [], ['test.conf'])
('/tmpdir_path/run1', [], ['test.conf'])
('/tmpdir_path/run2', ['data'], [])
('/tmpdir_path/run2/data', [], ['test.conf'])
('/tmpdir_path/run2/dummy', [], [])
('/tmpdir_path/run3', ['data'], []) # symboliclink to run2
"""
import tempfile
self.tempdir_path = tempfile.mkdtemp()
root = self.tempdir_path
for i in range(3):
os.mkdir(os.path.join(root, "run" + str(i)))
for i in range(2):
open(os.path.join(root, "run" + str(i), "test.conf"),
"w").close()
for i in range(2):
os.mkdir(os.path.join(root, "run0", "run" + str(i)))
os.mkdir(os.path.join(root, "run2", "data"))
os.mkdir(os.path.join(root, "run2", "dummy"))
os.mkdir(os.path.join(root, "run0", "run0", "data"))
open(os.path.join(root, "run0", "run1", "test.conf"), "w").close()
open(os.path.join(root, "run2", "data", "test.conf"), "w").close()
open(os.path.join(root, "run0", "run0", "data", "hoge.conf"),
"w").close()
os.symlink(os.path.join(root, "run2"), os.path.join(root, "run3"))
def test_directory_for_first_scan1(self):
"""Test for initial scan."""
node_list = []
root_dir = self.tempdir_path
# whitelist specifies directory.
node_list = directory(node_list, root_dir, "data")
compare_node_list = [
{'path': root_dir,
'parents': [],
'children': [os.path.join(root_dir, "run2")],
'name': os.path.basename(root_dir),
'type': 'project'},
{'path': os.path.join(root_dir, "run0"),
'parents': [],
'children': [os.path.join(root_dir, "run0/run0")],
'name': 'run0',
'type': 'project'},
{'path': os.path.join(root_dir, "run0/run0"),
'parents': [os.path.join(root_dir, "run0")],
'children': [],
'name': 'run0',
'type': 'run'},
{'path': os.path.join(root_dir, "run2"),
'parents': [root_dir],
'children': [],
'name': 'run2',
'type': 'run'}]
self.assertEqual(node_list, compare_node_list)
def test_directory_for_first_scan2(self):
"""Test for initial scan."""
node_list = []
root_dir = self.tempdir_path
# whitelist have two elements.
node_list = directory(node_list, root_dir,
["data/hoge*", "data/test*"])
compare_node_list = [
{'path': root_dir,
'parents': [],
'children': [os.path.join(root_dir, "run2")],
'name': os.path.basename(root_dir),
'type': 'project'},
{'path': os.path.join(root_dir, "run0"),
'parents': [],
'children': [os.path.join(root_dir, "run0/run0")],
'name': 'run0',
'type': 'project'},
{'path': os.path.join(root_dir, "run0/run0"),
'parents': [os.path.join(root_dir, "run0")],
'children': [],
'name': 'run0',
'type': 'run'},
{'path': os.path.join(root_dir, "run2"),
'parents': [root_dir],
'children': [],
'name': 'run2',
'type': 'run'}]
self.assertEqual(node_list, compare_node_list)
def test_directory_for_first_scan3(self):
"""Test for initial scan."""
node_list = []
root_dir = self.tempdir_path
# whitelist has `..`.
node_list = directory(node_list, root_dir,
"../data")
compare_node_list = [
{'path': os.path.join(root_dir, "run0", "run0"),
'parents': [],
'children': [os.path.join(root_dir, "run0", "run0", "data")],
'name': "run0",
'type': 'project'},
{'path': os.path.join(root_dir, "run0", "run0", "data"),
'parents': [os.path.join(root_dir, "run0", "run0")],
'children': [],
'name': 'data',
'type': 'run'},
{'path': os.path.join(root_dir, "run2"),
'parents': [],
'children': [os.path.join(root_dir, "run2", "data"),
os.path.join(root_dir, "run2", "dummy")],
'name': "run2",
'type': 'project'},
{'path': os.path.join(root_dir, "run2", "data"),
'parents': [os.path.join(root_dir, "run2")],
'children': [],
'name': 'data',
'type': 'run'},
# This path is also added to node list.
{'path': os.path.join(root_dir, "run2", "dummy"),
'parents': [os.path.join(root_dir, "run2")],
'children': [],
'name': 'dummy',
'type': 'run'}]
self.assertEqual(node_list, compare_node_list)
def test_directory_for_first_scan4(self):
"""Test for initial scan with symbolic link."""
node_list = []
root_dir = self.tempdir_path
# followlinks is `True`.
node_list = directory(node_list, root_dir,
"data/test.conf", followlinks=True)
compare_node_list = [
{'path': root_dir,
'parents': [],
'children': [os.path.join(root_dir, "run2"),
os.path.join(root_dir, "run3")],
'name': os.path.basename(root_dir),
'type': 'project'},
{'path': os.path.join(root_dir, "run2"),
'parents': [root_dir],
'children': [],
'name': 'run2',
'type': 'run'},
# Symbolic link is also added to node list.
{'path': os.path.join(root_dir, "run3"),
'parents': [root_dir],
'children': [],
'name': 'run3',
'type': 'run'}]
self.assertEqual(node_list, compare_node_list)
def test_directory_for_rescan(self):
"""Test for rescan."""
root_dir = self.tempdir_path
node_list = [{'path': os.path.join(root_dir, "run0"),
'parents': [], # empty
'children': [], # empty
'name': 'run0',
'type': 'run'}]
node_list = directory(node_list, root_dir, "*.conf")
compare_node_list = [
{'path': os.path.join(root_dir, 'run0'),
'parents': [root_dir], # fill
'children': [os.path.join(root_dir, 'run0/run1')], # fill
'name': 'run0',
'type': 'run'},
{'path': root_dir,
'parents': [],
'children': [os.path.join(root_dir, 'run0'),
os.path.join(root_dir, 'run1')],
'name': os.path.basename(root_dir),
'type': 'project'},
{'path': os.path.join(root_dir, 'run0/run0'),
'parents': [],
'children': [os.path.join(root_dir, 'run0/run0/data')],
'name': 'run0',
'type': 'project'},
{'path': os.path.join(root_dir, 'run0/run0/data'),
'parents': [os.path.join(root_dir, 'run0/run0')],
'children': [],
'name': 'data',
'type': 'run'},
{'path': os.path.join(root_dir, 'run0/run1'),
'parents': [os.path.join(root_dir, 'run0')],
'children': [],
'name': 'run1',
'type': 'run'},
{'path': os.path.join(root_dir, 'run1'),
'parents': [root_dir],
'children': [],
'name': 'run1',
'type': 'run'},
{'path': os.path.join(root_dir, 'run2'),
'parents': [],
'children': [os.path.join(root_dir, 'run2/data')],
'name': 'run2',
'type': 'project'},
{'path': os.path.join(root_dir, 'run2/data'),
'parents': [os.path.join(root_dir, 'run2')],
'children': [],
'name': 'data',
'type': 'run'}]
self.assertEqual(node_list, compare_node_list)
def test_rescan_failed(self):
root_dir = self.tempdir_path
node_list = [{'path': os.path.join(root_dir, "run0"),
'children': [], # empty and no parents key.
'name': 'run0',
'type': 'run'}]
with self.assertRaises(KeyError):
node_list = directory(node_list, root_dir, ["*.conf"])
| gpl-3.0 |
freakboy3742/django | tests/forms_tests/field_tests/test_charfield.py | 27 | 6355 | from django.core.exceptions import ValidationError
from django.forms import (
CharField, HiddenInput, PasswordInput, Textarea, TextInput,
)
from django.test import SimpleTestCase
from . import FormFieldAssertionsMixin
class CharFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_charfield_1(self):
f = CharField()
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertIsNone(f.max_length)
self.assertIsNone(f.min_length)
def test_charfield_2(self):
f = CharField(required=False)
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertEqual('', f.clean(None))
self.assertEqual('', f.clean(''))
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertIsNone(f.max_length)
self.assertIsNone(f.min_length)
def test_charfield_3(self):
f = CharField(max_length=10, required=False)
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
msg = "'Ensure this value has at most 10 characters (it has 11).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('1234567890a')
self.assertEqual(f.max_length, 10)
self.assertIsNone(f.min_length)
def test_charfield_4(self):
f = CharField(min_length=10, required=False)
self.assertEqual('', f.clean(''))
msg = "'Ensure this value has at least 10 characters (it has 5).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertIsNone(f.max_length)
self.assertEqual(f.min_length, 10)
def test_charfield_5(self):
f = CharField(min_length=10, required=True)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
msg = "'Ensure this value has at least 10 characters (it has 5).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertIsNone(f.max_length)
self.assertEqual(f.min_length, 10)
def test_charfield_length_not_int(self):
"""
Setting min_length or max_length to something that is not a number
raises an exception.
"""
with self.assertRaises(ValueError):
CharField(min_length='a')
with self.assertRaises(ValueError):
CharField(max_length='a')
msg = '__init__() takes 1 positional argument but 2 were given'
with self.assertRaisesMessage(TypeError, msg):
CharField('a')
def test_charfield_widget_attrs(self):
"""
CharField.widget_attrs() always returns a dictionary and includes
minlength/maxlength if min_length/max_length are defined on the field
and the widget is not hidden.
"""
# Return an empty dictionary if max_length and min_length are both None.
f = CharField()
self.assertEqual(f.widget_attrs(TextInput()), {})
self.assertEqual(f.widget_attrs(Textarea()), {})
# Return a maxlength attribute equal to max_length.
f = CharField(max_length=10)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10'})
# Return a minlength attribute equal to min_length.
f = CharField(min_length=5)
self.assertEqual(f.widget_attrs(TextInput()), {'minlength': '5'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'minlength': '5'})
self.assertEqual(f.widget_attrs(Textarea()), {'minlength': '5'})
# Return both maxlength and minlength when both max_length and
# min_length are set.
f = CharField(max_length=10, min_length=5)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10', 'minlength': '5'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10', 'minlength': '5'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10', 'minlength': '5'})
self.assertEqual(f.widget_attrs(HiddenInput()), {})
def test_charfield_strip(self):
"""
Values have whitespace stripped but not if strip=False.
"""
f = CharField()
self.assertEqual(f.clean(' 1'), '1')
self.assertEqual(f.clean('1 '), '1')
f = CharField(strip=False)
self.assertEqual(f.clean(' 1'), ' 1')
self.assertEqual(f.clean('1 '), '1 ')
def test_strip_before_checking_empty(self):
"""
A whitespace-only value, ' ', is stripped to an empty string and then
converted to the empty value, None.
"""
f = CharField(required=False, empty_value=None)
self.assertIsNone(f.clean(' '))
def test_clean_non_string(self):
"""CharField.clean() calls str(value) before stripping it."""
class StringWrapper:
def __init__(self, v):
self.v = v
def __str__(self):
return self.v
value = StringWrapper(' ')
f1 = CharField(required=False, empty_value=None)
self.assertIsNone(f1.clean(value))
f2 = CharField(strip=False)
self.assertEqual(f2.clean(value), ' ')
def test_charfield_disabled(self):
f = CharField(disabled=True)
self.assertWidgetRendersTo(f, '<input type="text" name="f" id="id_f" disabled required>')
def test_null_characters_prohibited(self):
f = CharField()
msg = 'Null characters are not allowed.'
with self.assertRaisesMessage(ValidationError, msg):
f.clean('\x00something')
| bsd-3-clause |
shaggytwodope/qutebrowser | tests/end2end/fixtures/test_webserver.py | 9 | 2499 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Test the httpbin webserver used for tests."""
import json
import urllib.request
import urllib.error
import pytest
@pytest.mark.parametrize('path, content, expected', [
('/', '<title>httpbin(1): HTTP Client Testing Service</title>', True),
# https://github.com/Runscope/httpbin/issues/245
('/', 'www.google-analytics.com', False),
('/data/hello.txt', 'Hello World!', True),
])
def test_httpbin(httpbin, qtbot, path, content, expected):
with qtbot.waitSignal(httpbin.new_request, timeout=100):
url = 'http://localhost:{}{}'.format(httpbin.port, path)
try:
response = urllib.request.urlopen(url)
except urllib.error.HTTPError as e:
# "Though being an exception (a subclass of URLError), an HTTPError
# can also function as a non-exceptional file-like return value
# (the same thing that urlopen() returns)."
# ...wat
print(e.read().decode('utf-8'))
raise
data = response.read().decode('utf-8')
assert httpbin.get_requests() == [httpbin.ExpectedRequest('GET', path)]
assert (content in data) == expected
@pytest.mark.parametrize('line, verb, path, equal', [
({'verb': 'GET', 'path': '/', 'status': 200}, 'GET', '/', True),
({'verb': 'GET', 'path': '/foo/', 'status': 200}, 'GET', '/foo', True),
({'verb': 'GET', 'path': '/', 'status': 200}, 'GET', '/foo', False),
({'verb': 'POST', 'path': '/', 'status': 200}, 'GET', '/', False),
])
def test_expected_request(httpbin, line, verb, path, equal):
expected = httpbin.ExpectedRequest(verb, path)
request = httpbin.Request(json.dumps(line))
assert (expected == request) == equal
| gpl-3.0 |
goliveirab/odoo | addons/point_of_sale/wizard/pos_open_statement.py | 387 | 4217 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class pos_open_statement(osv.osv_memory):
_name = 'pos.open.statement'
_description = 'Open Statements'
def open_statement(self, cr, uid, ids, context=None):
"""
Open the statements
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Blank Directory
"""
data = {}
mod_obj = self.pool.get('ir.model.data')
statement_obj = self.pool.get('account.bank.statement')
sequence_obj = self.pool.get('ir.sequence')
journal_obj = self.pool.get('account.journal')
if context is None:
context = {}
st_ids = []
j_ids = journal_obj.search(cr, uid, [('journal_user','=',1)], context=context)
if not j_ids:
raise osv.except_osv(_('No Cash Register Defined!'), _('You have to define which payment method must be available in the point of sale by reusing existing bank and cash through "Accounting / Configuration / Journals / Journals". Select a journal and check the field "PoS Payment Method" from the "Point of Sale" tab. You can also create new payment methods directly from menu "PoS Backend / Configuration / Payment Methods".'))
for journal in journal_obj.browse(cr, uid, j_ids, context=context):
ids = statement_obj.search(cr, uid, [('state', '!=', 'confirm'), ('user_id', '=', uid), ('journal_id', '=', journal.id)], context=context)
if journal.sequence_id:
number = sequence_obj.next_by_id(cr, uid, journal.sequence_id.id, context=context)
else:
number = sequence_obj.next_by_code(cr, uid, 'account.cash.statement', context=context)
data.update({
'journal_id': journal.id,
'user_id': uid,
'state': 'draft',
'name': number
})
statement_id = statement_obj.create(cr, uid, data, context=context)
st_ids.append(int(statement_id))
if journal.cash_control:
statement_obj.button_open(cr, uid, [statement_id], context)
tree_res = mod_obj.get_object_reference(cr, uid, 'point_of_sale', 'view_cash_statement_pos_tree')
tree_id = tree_res and tree_res[1] or False
form_res = mod_obj.get_object_reference(cr, uid, 'account', 'view_bank_statement_form2')
form_id = form_res and form_res[1] or False
search_res = mod_obj.get_object_reference(cr, uid, 'account', 'view_account_bank_statement_filter')
search_id = search_res and search_res[1] or False
return {
'type': 'ir.actions.act_window',
'name': _('List of Cash Registers'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.bank.statement',
'domain': str([('id', 'in', st_ids)]),
'views': [(tree_id, 'tree'), (form_id, 'form')],
'search_view_id': search_id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sorenk/ansible | lib/ansible/modules/cloud/amazon/ec2_snapshot.py | 27 | 9687 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_snapshot
short_description: creates a snapshot from an existing volume
description:
- creates an EC2 snapshot from an existing EBS volume
version_added: "1.5"
options:
volume_id:
description:
- volume from which to take the snapshot
required: false
description:
description:
- description to be applied to the snapshot
required: false
instance_id:
description:
- instance that has the required volume to snapshot mounted
required: false
device_name:
description:
- device name of a mounted volume to be snapshotted
required: false
snapshot_tags:
description:
- a hash/dictionary of tags to add to the snapshot
required: false
version_added: "1.6"
wait:
description:
- wait for the snapshot to be ready
choices: ['yes', 'no']
required: false
default: yes
version_added: "1.5.1"
wait_timeout:
description:
- how long before wait gives up, in seconds
- specify 0 to wait forever
required: false
default: 0
version_added: "1.5.1"
state:
description:
- whether to add or create a snapshot
required: false
default: present
choices: ['absent', 'present']
version_added: "1.9"
snapshot_id:
description:
- snapshot id to remove
required: false
version_added: "1.9"
last_snapshot_min_age:
description:
- If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created.
required: false
default: 0
version_added: "2.0"
author: "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple snapshot of volume using volume_id
- ec2_snapshot:
volume_id: vol-abcdef12
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume mounted on device_name attached to instance_id
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume with tagging
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
snapshot_tags:
frequency: hourly
source: /data
# Remove a snapshot
- local_action:
module: ec2_snapshot
snapshot_id: snap-abcd1234
state: absent
# Create a snapshot only if the most recent one is older than 1 hour
- local_action:
module: ec2_snapshot
volume_id: vol-abcdef12
last_snapshot_min_age: 60
'''
import time
import datetime
try:
import boto.exception
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
# Find the most recent snapshot
def _get_snapshot_starttime(snap):
return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None):
"""
Gets the most recently created snapshot and optionally filters the result
if the snapshot is too old
:param snapshots: list of snapshots to search
:param max_snapshot_age_secs: filter the result if its older than this
:param now: simulate time -- used for unit testing
:return:
"""
if len(snapshots) == 0:
return None
if not now:
now = datetime.datetime.utcnow()
youngest_snapshot = max(snapshots, key=_get_snapshot_starttime)
# See if the snapshot is younger that the given max age
snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
snapshot_age = now - snapshot_start
if max_snapshot_age_secs is not None:
if snapshot_age.total_seconds() > max_snapshot_age_secs:
return None
return youngest_snapshot
def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep):
"""
Wait for the snapshot to be created
:param snapshot:
:param wait_timeout_secs: fail this step after this many seconds
:param sleep_func:
:return:
"""
time_waited = 0
snapshot.update()
while snapshot.status != 'completed':
sleep_func(3)
snapshot.update()
time_waited += 3
if wait_timeout_secs and time_waited > wait_timeout_secs:
return False
return True
def create_snapshot(module, ec2, state=None, description=None, wait=None,
wait_timeout=None, volume_id=None, instance_id=None,
snapshot_id=None, device_name=None, snapshot_tags=None,
last_snapshot_min_age=None):
snapshot = None
changed = False
required = [volume_id, snapshot_id, instance_id]
if required.count(None) != len(required) - 1: # only 1 must be set
module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json(msg='Instance ID and device name must both be specified')
if instance_id:
try:
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if not volumes:
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
volume_id = volumes[0].id
if state == 'absent':
if not snapshot_id:
module.fail_json(msg='snapshot_id must be set when state is absent')
try:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError as e:
# exception is raised if snapshot does not exist
if e.error_code == 'InvalidSnapshot.NotFound':
module.exit_json(changed=False)
else:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# successful delete
module.exit_json(changed=True)
if last_snapshot_min_age > 0:
try:
current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
snapshot = _get_most_recent_snapshot(current_snapshots,
max_snapshot_age_secs=last_snapshot_min_age)
try:
# Create a new snapshot if we didn't find an existing one to use
if snapshot is None:
snapshot = ec2.create_snapshot(volume_id, description=description)
changed = True
if wait:
if not _create_with_wait(snapshot, wait_timeout):
module.fail_json(msg='Timed out while creating snapshot.')
if snapshot_tags:
for k, v in snapshot_tags.items():
snapshot.add_tag(k, v)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
module.exit_json(changed=changed,
snapshot_id=snapshot.id,
volume_id=snapshot.volume_id,
volume_size=snapshot.volume_size,
tags=snapshot.tags.copy())
def create_snapshot_ansible_module():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
volume_id=dict(),
description=dict(),
instance_id=dict(),
snapshot_id=dict(),
device_name=dict(),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=0),
last_snapshot_min_age=dict(type='int', default=0),
snapshot_tags=dict(type='dict', default=dict()),
state=dict(choices=['absent', 'present'], default='present'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
return module
def main():
module = create_snapshot_ansible_module()
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
volume_id = module.params.get('volume_id')
snapshot_id = module.params.get('snapshot_id')
description = module.params.get('description')
instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
last_snapshot_min_age = module.params.get('last_snapshot_min_age')
snapshot_tags = module.params.get('snapshot_tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
create_snapshot(
module=module,
state=state,
description=description,
wait=wait,
wait_timeout=wait_timeout,
ec2=ec2,
volume_id=volume_id,
instance_id=instance_id,
snapshot_id=snapshot_id,
device_name=device_name,
snapshot_tags=snapshot_tags,
last_snapshot_min_age=last_snapshot_min_age
)
if __name__ == '__main__':
main()
| gpl-3.0 |
bluestemscott/librarygadget | librarygadget/librarybot/migrations/0001_initial.py | 1 | 15532 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserProfile'
db.create_table('librarybot_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
('api_key', self.gf('django.db.models.fields.CharField')(max_length=50, null=True)),
('account_level', self.gf('django.db.models.fields.CharField')(default='free', max_length=10)),
('paid_last_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('paid_first_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
))
db.send_create_signal('librarybot', ['UserProfile'])
# Adding model 'Library'
db.create_table('librarybot_library', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('state', self.gf('django.db.models.fields.CharField')(max_length=2)),
('catalogurl', self.gf('django.db.models.fields.URLField')(max_length=200)),
('librarysystem', self.gf('django.db.models.fields.CharField')(max_length=20)),
('renew_supported_code', self.gf('django.db.models.fields.CharField')(default='untested', max_length=10)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True)),
('lastmodified', self.gf('django.db.models.fields.DateField')(auto_now=True, blank=True)),
))
db.send_create_signal('librarybot', ['Library'])
# Adding model 'Patron'
db.create_table('librarybot_patron', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('library', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['librarybot.Library'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('patronid', self.gf('django.db.models.fields.CharField')(max_length=40)),
('pin', self.gf('django.db.models.fields.CharField')(max_length=75)),
('name', self.gf('django.db.models.fields.CharField')(max_length=150, null=True)),
('save_history', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('lastchecked', self.gf('django.db.models.fields.DateTimeField')()),
('batch_last_run', self.gf('django.db.models.fields.DateField')(null=True)),
))
db.send_create_signal('librarybot', ['Patron'])
# Adding model 'Item'
db.create_table('librarybot_item', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('patron', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['librarybot.Patron'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('author', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)),
('outDate', self.gf('django.db.models.fields.DateField')(null=True)),
('dueDate', self.gf('django.db.models.fields.DateField')(null=True)),
('timesRenewed', self.gf('django.db.models.fields.SmallIntegerField')(null=True)),
('isbn', self.gf('django.db.models.fields.CharField')(max_length=25, null=True)),
('asof', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal('librarybot', ['Item'])
# Adding model 'AccessLog'
db.create_table('librarybot_accesslog', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('patron', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['librarybot.Patron'])),
('library', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['librarybot.Library'])),
('viewfunc', self.gf('django.db.models.fields.CharField')(max_length=50)),
('error', self.gf('django.db.models.fields.CharField')(max_length=150)),
('error_stacktrace', self.gf('django.db.models.fields.CharField')(max_length=3000)),
('date', self.gf('django.db.models.fields.DateField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('librarybot', ['AccessLog'])
# Adding model 'LibraryRequest'
db.create_table('librarybot_libraryrequest', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('libraryname', self.gf('django.db.models.fields.CharField')(max_length=100)),
('state', self.gf('django.db.models.fields.CharField')(max_length=2)),
('catalogurl', self.gf('django.db.models.fields.URLField')(max_length=200)),
('name', self.gf('django.db.models.fields.CharField')(max_length=60)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('patronid', self.gf('django.db.models.fields.CharField')(max_length=40)),
('password', self.gf('django.db.models.fields.CharField')(max_length=20)),
))
db.send_create_signal('librarybot', ['LibraryRequest'])
# Adding model 'RenewalResponse'
db.create_table('librarybot_renewalresponse', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('token', self.gf('django.db.models.fields.CharField')(max_length=36)),
('response', self.gf('django.db.models.fields.TextField')()),
('cachedate', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('librarybot', ['RenewalResponse'])
def backwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table('librarybot_userprofile')
# Deleting model 'Library'
db.delete_table('librarybot_library')
# Deleting model 'Patron'
db.delete_table('librarybot_patron')
# Deleting model 'Item'
db.delete_table('librarybot_item')
# Deleting model 'AccessLog'
db.delete_table('librarybot_accesslog')
# Deleting model 'LibraryRequest'
db.delete_table('librarybot_libraryrequest')
# Deleting model 'RenewalResponse'
db.delete_table('librarybot_renewalresponse')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'librarybot.accesslog': {
'Meta': {'object_name': 'AccessLog'},
'date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'error_stacktrace': ('django.db.models.fields.CharField', [], {'max_length': '3000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'library': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['librarybot.Library']"}),
'patron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['librarybot.Patron']"}),
'viewfunc': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'librarybot.item': {
'Meta': {'object_name': 'Item'},
'asof': ('django.db.models.fields.DateField', [], {}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'dueDate': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True'}),
'outDate': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'patron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['librarybot.Patron']"}),
'timesRenewed': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'librarybot.library': {
'Meta': {'object_name': 'Library'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'catalogurl': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastmodified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'librarysystem': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'renew_supported_code': ('django.db.models.fields.CharField', [], {'default': "'untested'", 'max_length': '10'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'librarybot.libraryrequest': {
'Meta': {'object_name': 'LibraryRequest'},
'catalogurl': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'libraryname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'patronid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'librarybot.patron': {
'Meta': {'object_name': 'Patron'},
'batch_last_run': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastchecked': ('django.db.models.fields.DateTimeField', [], {}),
'library': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['librarybot.Library']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True'}),
'patronid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'pin': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'save_history': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'librarybot.renewalresponse': {
'Meta': {'object_name': 'RenewalResponse'},
'cachedate': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.TextField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'librarybot.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'account_level': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '10'}),
'api_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid_first_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'paid_last_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['librarybot']
| mit |
jaredjennings/snowy | libs/openshiftlibs.py | 8 | 3967 | #!/usr/bin/env python
__author__ = 'N. Harrison Ripps'
"""
This library was written to the original django-example project -
https://github.com/openshift/django-example
by @url(https://github.com/nhr), but since it was placed inside
the django project folder I've removed it when I started working
on my fork -
https://github.com/ZackYovel/django-example
Since it is required by the .openshift/action_hooks/secure_db.py
action hook and since this library is basically a recommendation
of the openshift providers, I'm adding it again but placing it
in the libs folder, as a generic gependency and not a project
specific file.
running 'grep -r openshiftlibs' resulted in one file that
references this library: .openshift/action_hooks/secure_db.py.
"""
import hashlib, inspect, os, random, sys
# Gets the secret token provided by OpenShift
# or generates one (this is slightly less secure, but good enough for now)
def get_openshift_secret_token():
token = os.getenv('OPENSHIFT_SECRET_TOKEN')
name = os.getenv('OPENSHIFT_APP_NAME')
uuid = os.getenv('OPENSHIFT_APP_UUID')
if token is not None:
return token
elif (name is not None and uuid is not None):
return hashlib.sha256(name + '-' + uuid).hexdigest()
return None
# Loop through all provided variables and generate secure versions
# If not running on OpenShift, returns defaults and logs an error message
#
# This function calls secure_function and passes an array of:
# {
# 'hash': generated sha hash,
# 'variable': name of variable,
# 'original': original value
# }
def openshift_secure(default_keys, secure_function = 'make_secure_key'):
# Attempts to get secret token
my_token = get_openshift_secret_token()
# Only generate random values if on OpenShift
my_list = default_keys
if my_token is not None:
# Loop over each default_key and set the new value
for key, value in default_keys.iteritems():
# Create hash out of token and this key's name
sha = hashlib.sha256(my_token + '-' + key).hexdigest()
# Pass a dictionary so we can add stuff without breaking existing calls
vals = { 'hash': sha, 'variable': key, 'original': value }
# Call user specified function or just return hash
my_list[key] = sha
if secure_function is not None:
# Pick through the global and local scopes to find the function.
possibles = globals().copy()
possibles.update(locals())
supplied_function = possibles.get(secure_function)
if not supplied_function:
raise Exception("Cannot find supplied security function")
else:
my_list[key] = supplied_function(vals)
else:
calling_file = inspect.stack()[1][1]
if os.getenv('OPENSHIFT_REPO_DIR'):
base = os.getenv('OPENSHIFT_REPO_DIR')
calling_file.replace(base,'')
sys.stderr.write("OPENSHIFT WARNING: Using default values for secure variables, please manually modify in " + calling_file + "\n")
return my_list
# This function transforms default keys into per-deployment random keys;
def make_secure_key(key_info):
hashcode = key_info['hash']
key = key_info['variable']
original = key_info['original']
# These are the legal password characters
# as per the Django source code
# (django/contrib/auth/models.py)
chars = 'abcdefghjkmnpqrstuvwxyz'
chars += 'ABCDEFGHJKLMNPQRSTUVWXYZ'
chars += '23456789'
# Use the hash to seed the RNG
random.seed(int("0x" + hashcode[:8], 0))
# Create a random string the same length as the default
rand_key = ''
for _ in range(len(original)):
rand_pos = random.randint(0,len(chars))
rand_key += chars[rand_pos:(rand_pos+1)]
# Reset the RNG
random.seed()
# Set the value
return rand_key
| agpl-3.0 |
rcmachado/pysuru | pysuru/tests/test_http.py | 1 | 1256 | # coding: utf-8
try:
from unittest import mock
except ImportError:
import mock
from pysuru.http import HttpClient
def test_headers_attribute_should_always_have_authorization_header_with_token():
client = HttpClient('TARGET', 'TOKEN')
assert 'Authorization' in client.headers
assert client.headers['Authorization'] == 'bearer TOKEN'
def test_urlopen_should_build_full_url_using_target_and_path():
client = HttpClient('example.com/api', 'TOKEN')
client.conn.request = mock.MagicMock()
client.urlopen('GET', '/sample')
expected_url = 'http://example.com/api/sample'
assert client.conn.request.call_args_list == [
mock.call('GET', expected_url, headers=mock.ANY, fields=None)]
def test_urlopen_should_merge_headers_argument_with_headers_attribute():
my_headers = {
'X-Custom-Header': 'custom value'
}
expected_headers = {
'Authorization': 'bearer TOKEN',
'X-Custom-Header': 'custom value'
}
client = HttpClient('TARGET', 'TOKEN')
client.conn.request = mock.MagicMock()
client.urlopen('GET', '/sample', headers=my_headers)
assert client.conn.request.call_args_list == [
mock.call('GET', mock.ANY, headers=expected_headers, fields=None)]
| mit |
ardumont/linux | scripts/checkkconfigsymbols.py | 88 | 15783 | #!/usr/bin/env python2
"""Find Kconfig symbols that are referenced but not defined."""
# (c) 2014-2015 Valentin Rothberg <[email protected]>
# (c) 2014 Stefan Hengelein <[email protected]>
#
# Licensed under the terms of the GNU GPL License version 2
import difflib
import os
import re
import signal
import sys
from multiprocessing import Pool, cpu_count
from optparse import OptionParser
from subprocess import Popen, PIPE, STDOUT
# regex expressions
OPERATORS = r"&|\(|\)|\||\!"
FEATURE = r"(?:\w*[A-Z0-9]\w*){2,}"
DEF = r"^\s*(?:menu){,1}config\s+(" + FEATURE + r")\s*"
EXPR = r"(?:" + OPERATORS + r"|\s|" + FEATURE + r")+"
DEFAULT = r"default\s+.*?(?:if\s.+){,1}"
STMT = r"^\s*(?:if|select|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR
SOURCE_FEATURE = r"(?:\W|\b)+[D]{,1}CONFIG_(" + FEATURE + r")"
# regex objects
REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$")
REGEX_FEATURE = re.compile(r'(?!\B)' + FEATURE + r'(?!\B)')
REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE)
REGEX_KCONFIG_DEF = re.compile(DEF)
REGEX_KCONFIG_EXPR = re.compile(EXPR)
REGEX_KCONFIG_STMT = re.compile(STMT)
REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$")
REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$")
REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
REGEX_QUOTES = re.compile("(\"(.*?)\")")
def parse_options():
"""The user interface of this module."""
usage = "%prog [options]\n\n" \
"Run this tool to detect Kconfig symbols that are referenced but " \
"not defined in\nKconfig. The output of this tool has the " \
"format \'Undefined symbol\\tFile list\'\n\n" \
"If no option is specified, %prog will default to check your\n" \
"current tree. Please note that specifying commits will " \
"\'git reset --hard\'\nyour current tree! You may save " \
"uncommitted changes to avoid losing data."
parser = OptionParser(usage=usage)
parser.add_option('-c', '--commit', dest='commit', action='store',
default="",
help="Check if the specified commit (hash) introduces "
"undefined Kconfig symbols.")
parser.add_option('-d', '--diff', dest='diff', action='store',
default="",
help="Diff undefined symbols between two commits. The "
"input format bases on Git log's "
"\'commmit1..commit2\'.")
parser.add_option('-f', '--find', dest='find', action='store_true',
default=False,
help="Find and show commits that may cause symbols to be "
"missing. Required to run with --diff.")
parser.add_option('-i', '--ignore', dest='ignore', action='store',
default="",
help="Ignore files matching this pattern. Note that "
"the pattern needs to be a Python regex. To "
"ignore defconfigs, specify -i '.*defconfig'.")
parser.add_option('-s', '--sim', dest='sim', action='store', default="",
help="Print a list of maximum 10 string-similar symbols.")
parser.add_option('', '--force', dest='force', action='store_true',
default=False,
help="Reset current Git tree even when it's dirty.")
(opts, _) = parser.parse_args()
if opts.commit and opts.diff:
sys.exit("Please specify only one option at once.")
if opts.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", opts.diff):
sys.exit("Please specify valid input in the following format: "
"\'commmit1..commit2\'")
if opts.commit or opts.diff:
if not opts.force and tree_is_dirty():
sys.exit("The current Git tree is dirty (see 'git status'). "
"Running this script may\ndelete important data since it "
"calls 'git reset --hard' for some performance\nreasons. "
" Please run this script in a clean Git tree or pass "
"'--force' if you\nwant to ignore this warning and "
"continue.")
if opts.commit:
opts.find = False
if opts.ignore:
try:
re.match(opts.ignore, "this/is/just/a/test.c")
except:
sys.exit("Please specify a valid Python regex.")
return opts
def main():
"""Main function of this module."""
opts = parse_options()
if opts.sim and not opts.commit and not opts.diff:
sims = find_sims(opts.sim, opts.ignore)
if sims:
print "%s: %s" % (yel("Similar symbols"), ', '.join(sims))
else:
print "%s: no similar symbols found" % yel("Similar symbols")
sys.exit(0)
# dictionary of (un)defined symbols
defined = {}
undefined = {}
if opts.commit or opts.diff:
head = get_head()
# get commit range
commit_a = None
commit_b = None
if opts.commit:
commit_a = opts.commit + "~"
commit_b = opts.commit
elif opts.diff:
split = opts.diff.split("..")
commit_a = split[0]
commit_b = split[1]
undefined_a = {}
undefined_b = {}
# get undefined items before the commit
execute("git reset --hard %s" % commit_a)
undefined_a, _ = check_symbols(opts.ignore)
# get undefined items for the commit
execute("git reset --hard %s" % commit_b)
undefined_b, defined = check_symbols(opts.ignore)
# report cases that are present for the commit but not before
for feature in sorted(undefined_b):
# feature has not been undefined before
if not feature in undefined_a:
files = sorted(undefined_b.get(feature))
undefined[feature] = files
# check if there are new files that reference the undefined feature
else:
files = sorted(undefined_b.get(feature) -
undefined_a.get(feature))
if files:
undefined[feature] = files
# reset to head
execute("git reset --hard %s" % head)
# default to check the entire tree
else:
undefined, defined = check_symbols(opts.ignore)
# now print the output
for feature in sorted(undefined):
print red(feature)
files = sorted(undefined.get(feature))
print "%s: %s" % (yel("Referencing files"), ", ".join(files))
sims = find_sims(feature, opts.ignore, defined)
sims_out = yel("Similar symbols")
if sims:
print "%s: %s" % (sims_out, ', '.join(sims))
else:
print "%s: %s" % (sims_out, "no similar symbols found")
if opts.find:
print "%s:" % yel("Commits changing symbol")
commits = find_commits(feature, opts.diff)
if commits:
for commit in commits:
commit = commit.split(" ", 1)
print "\t- %s (\"%s\")" % (yel(commit[0]), commit[1])
else:
print "\t- no commit found"
print # new line
def yel(string):
"""
Color %string yellow.
"""
return "\033[33m%s\033[0m" % string
def red(string):
"""
Color %string red.
"""
return "\033[31m%s\033[0m" % string
def execute(cmd):
"""Execute %cmd and return stdout. Exit in case of error."""
pop = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
(stdout, _) = pop.communicate() # wait until finished
if pop.returncode != 0:
sys.exit(stdout)
return stdout
def find_commits(symbol, diff):
"""Find commits changing %symbol in the given range of %diff."""
commits = execute("git log --pretty=oneline --abbrev-commit -G %s %s"
% (symbol, diff))
return [x for x in commits.split("\n") if x]
def tree_is_dirty():
"""Return true if the current working tree is dirty (i.e., if any file has
been added, deleted, modified, renamed or copied but not committed)."""
stdout = execute("git status --porcelain")
for line in stdout:
if re.findall(r"[URMADC]{1}", line[:2]):
return True
return False
def get_head():
"""Return commit hash of current HEAD."""
stdout = execute("git rev-parse HEAD")
return stdout.strip('\n')
def partition(lst, size):
"""Partition list @lst into eveni-sized lists of size @size."""
return [lst[i::size] for i in xrange(size)]
def init_worker():
"""Set signal handler to ignore SIGINT."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def find_sims(symbol, ignore, defined = []):
"""Return a list of max. ten Kconfig symbols that are string-similar to
@symbol."""
if defined:
return sorted(difflib.get_close_matches(symbol, set(defined), 10))
pool = Pool(cpu_count(), init_worker)
kfiles = []
for gitfile in get_files():
if REGEX_FILE_KCONFIG.match(gitfile):
kfiles.append(gitfile)
arglist = []
for part in partition(kfiles, cpu_count()):
arglist.append((part, ignore))
for res in pool.map(parse_kconfig_files, arglist):
defined.extend(res[0])
return sorted(difflib.get_close_matches(symbol, set(defined), 10))
def get_files():
"""Return a list of all files in the current git directory."""
# use 'git ls-files' to get the worklist
stdout = execute("git ls-files")
if len(stdout) > 0 and stdout[-1] == "\n":
stdout = stdout[:-1]
files = []
for gitfile in stdout.rsplit("\n"):
if ".git" in gitfile or "ChangeLog" in gitfile or \
".log" in gitfile or os.path.isdir(gitfile) or \
gitfile.startswith("tools/"):
continue
files.append(gitfile)
return files
def check_symbols(ignore):
"""Find undefined Kconfig symbols and return a dict with the symbol as key
and a list of referencing files as value. Files matching %ignore are not
checked for undefined symbols."""
pool = Pool(cpu_count(), init_worker)
try:
return check_symbols_helper(pool, ignore)
except KeyboardInterrupt:
pool.terminate()
pool.join()
sys.exit(1)
def check_symbols_helper(pool, ignore):
"""Helper method for check_symbols(). Used to catch keyboard interrupts in
check_symbols() in order to properly terminate running worker processes."""
source_files = []
kconfig_files = []
defined_features = []
referenced_features = dict() # {file: [features]}
for gitfile in get_files():
if REGEX_FILE_KCONFIG.match(gitfile):
kconfig_files.append(gitfile)
else:
if ignore and not re.match(ignore, gitfile):
continue
# add source files that do not match the ignore pattern
source_files.append(gitfile)
# parse source files
arglist = partition(source_files, cpu_count())
for res in pool.map(parse_source_files, arglist):
referenced_features.update(res)
# parse kconfig files
arglist = []
for part in partition(kconfig_files, cpu_count()):
arglist.append((part, ignore))
for res in pool.map(parse_kconfig_files, arglist):
defined_features.extend(res[0])
referenced_features.update(res[1])
defined_features = set(defined_features)
# inverse mapping of referenced_features to dict(feature: [files])
inv_map = dict()
for _file, features in referenced_features.iteritems():
for feature in features:
inv_map[feature] = inv_map.get(feature, set())
inv_map[feature].add(_file)
referenced_features = inv_map
undefined = {} # {feature: [files]}
for feature in sorted(referenced_features):
# filter some false positives
if feature == "FOO" or feature == "BAR" or \
feature == "FOO_BAR" or feature == "XXX":
continue
if feature not in defined_features:
if feature.endswith("_MODULE"):
# avoid false positives for kernel modules
if feature[:-len("_MODULE")] in defined_features:
continue
undefined[feature] = referenced_features.get(feature)
return undefined, defined_features
def parse_source_files(source_files):
"""Parse each source file in @source_files and return dictionary with source
files as keys and lists of references Kconfig symbols as values."""
referenced_features = dict()
for sfile in source_files:
referenced_features[sfile] = parse_source_file(sfile)
return referenced_features
def parse_source_file(sfile):
"""Parse @sfile and return a list of referenced Kconfig features."""
lines = []
references = []
if not os.path.exists(sfile):
return references
with open(sfile, "r") as stream:
lines = stream.readlines()
for line in lines:
if not "CONFIG_" in line:
continue
features = REGEX_SOURCE_FEATURE.findall(line)
for feature in features:
if not REGEX_FILTER_FEATURES.search(feature):
continue
references.append(feature)
return references
def get_features_in_line(line):
"""Return mentioned Kconfig features in @line."""
return REGEX_FEATURE.findall(line)
def parse_kconfig_files(args):
"""Parse kconfig files and return tuple of defined and references Kconfig
symbols. Note, @args is a tuple of a list of files and the @ignore
pattern."""
kconfig_files = args[0]
ignore = args[1]
defined_features = []
referenced_features = dict()
for kfile in kconfig_files:
defined, references = parse_kconfig_file(kfile)
defined_features.extend(defined)
if ignore and re.match(ignore, kfile):
# do not collect references for files that match the ignore pattern
continue
referenced_features[kfile] = references
return (defined_features, referenced_features)
def parse_kconfig_file(kfile):
"""Parse @kfile and update feature definitions and references."""
lines = []
defined = []
references = []
skip = False
if not os.path.exists(kfile):
return defined, references
with open(kfile, "r") as stream:
lines = stream.readlines()
for i in range(len(lines)):
line = lines[i]
line = line.strip('\n')
line = line.split("#")[0] # ignore comments
if REGEX_KCONFIG_DEF.match(line):
feature_def = REGEX_KCONFIG_DEF.findall(line)
defined.append(feature_def[0])
skip = False
elif REGEX_KCONFIG_HELP.match(line):
skip = True
elif skip:
# ignore content of help messages
pass
elif REGEX_KCONFIG_STMT.match(line):
line = REGEX_QUOTES.sub("", line)
features = get_features_in_line(line)
# multi-line statements
while line.endswith("\\"):
i += 1
line = lines[i]
line = line.strip('\n')
features.extend(get_features_in_line(line))
for feature in set(features):
if REGEX_NUMERIC.match(feature):
# ignore numeric values
continue
references.append(feature)
return defined, references
if __name__ == "__main__":
main()
| gpl-2.0 |
ridfrustum/lettuce | tests/integration/lib/Django-1.3/django/core/management/commands/inspectdb.py | 203 | 7614 | import keyword
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
class Command(NoArgsCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
option_list = NoArgsCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.'),
)
requires_model_validation = False
db_module = 'django.db'
def handle_noargs(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
cursor = connection.cursor()
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'"
yield "# into your database."
yield ''
yield 'from %s import models' % self.db_module
yield ''
for table_name in connection.introspection.get_table_list(cursor):
yield 'class %s(models.Model):' % table2model(table_name)
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
column_name = row[0]
att_name = column_name.lower()
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
# If the column name can't be used verbatim as a Python
# attribute, set the "db_column" for this Field.
if ' ' in att_name or '-' in att_name or keyword.iskeyword(att_name) or column_name != att_name:
extra_params['db_column'] = column_name
# Modify the field name to make it Python-compatible.
if ' ' in att_name:
att_name = att_name.replace(' ', '_')
comment_notes.append('Field renamed to remove spaces.')
if '-' in att_name:
att_name = att_name.replace('-', '_')
comment_notes.append('Field renamed to remove dashes.')
if column_name != att_name:
comment_notes.append('Field name made lowercase.')
if i in relations:
rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1])
field_type = 'ForeignKey(%s' % rel_to
if att_name.endswith('_id'):
att_name = att_name[:-3]
else:
extra_params['db_column'] = column_name
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
field_type += '('
if keyword.iskeyword(att_name):
att_name += '_field'
comment_notes.append('Field renamed because it was a Python reserved word.')
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}:
continue
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = models.%s' % (att_name, field_type)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join(['%s=%r' % (k, v) for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
return [' class Meta:',
' db_table = %r' % table_name,
'']
| gpl-3.0 |
JaneliaSciComp/Neuroptikon | Source/lib/CrossPlatform/networkx/generators/small.py | 1 | 12813 | """
Various small and named graphs, together with some compact generators.
"""
__author__ ="""Aric Hagberg ([email protected])\nPieter Swart ([email protected])"""
# Copyright (C) 2004-2008 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
__all__ = ['make_small_graph',
'LCF_graph',
'bull_graph',
'chvatal_graph',
'cubical_graph',
'desargues_graph',
'diamond_graph',
'dodecahedral_graph',
'frucht_graph',
'heawood_graph',
'house_graph',
'house_x_graph',
'icosahedral_graph',
'krackhardt_kite_graph',
'moebius_kantor_graph',
'octahedral_graph',
'pappus_graph',
'petersen_graph',
'sedgewick_maze_graph',
'tetrahedral_graph',
'truncated_cube_graph',
'truncated_tetrahedron_graph',
'tutte_graph']
from networkx.generators.classic import empty_graph, cycle_graph, path_graph, complete_graph
from networkx.exception import NetworkXError
#------------------------------------------------------------------------------
# Tools for creating small graphs
#------------------------------------------------------------------------------
def make_small_undirected_graph(graph_description, create_using=None):
"""
Return a small undirected graph described by graph_description.
See make_small_graph.
"""
if create_using is not None and create_using.is_directed():
raise NetworkXError("Directed Graph not supported")
return make_small_graph(graph_description, create_using)
def make_small_graph(graph_description, create_using=None):
"""
Return the small graph described by graph_description.
graph_description is a list of the form [ltype,name,n,xlist]
Here ltype is one of "adjacencylist" or "edgelist",
name is the name of the graph and n the number of nodes.
This constructs a graph of n nodes with integer labels 0,..,n-1.
If ltype="adjacencylist" then xlist is an adjacency list
with exactly n entries, in with the j'th entry (which can be empty)
specifies the nodes connected to vertex j.
e.g. the "square" graph C_4 can be obtained by
>>> G=nx.make_small_graph(["adjacencylist","C_4",4,[[2,4],[1,3],[2,4],[1,3]]])
or, since we do not need to add edges twice,
>>> G=nx.make_small_graph(["adjacencylist","C_4",4,[[2,4],[3],[4],[]]])
If ltype="edgelist" then xlist is an edge list
written as [[v1,w2],[v2,w2],...,[vk,wk]],
where vj and wj integers in the range 1,..,n
e.g. the "square" graph C_4 can be obtained by
>>> G=nx.make_small_graph(["edgelist","C_4",4,[[1,2],[3,4],[2,3],[4,1]]])
Use the create_using argument to choose the graph class/type.
"""
ltype=graph_description[0]
name=graph_description[1]
n=graph_description[2]
G=empty_graph(n, create_using)
nodes=G.nodes()
if ltype=="adjacencylist":
adjlist=graph_description[3]
if len(adjlist) != n:
raise NetworkXError,"invalid graph_description"
G.add_edges_from([(u-1,v) for v in nodes for u in adjlist[v]])
elif ltype=="edgelist":
edgelist=graph_description[3]
for e in edgelist:
v1=e[0]-1
v2=e[1]-1
if v1<0 or v1>n-1 or v2<0 or v2>n-1:
raise NetworkXError,"invalid graph_description"
else:
G.add_edge(v1,v2)
G.name=name
return G
def LCF_graph(n,shift_list,repeats,create_using=None):
"""
Return the cubic graph specified in LCF notation.
LCF notation (LCF=Lederberg-Coxeter-Fruchte) is a compressed
notation used in the generation of various cubic Hamiltonian
graphs of high symmetry. See, for example, dodecahedral_graph,
desargues_graph, heawood_graph and pappus_graph below.
n (number of nodes)
The starting graph is the n-cycle with nodes 0,...,n-1.
(The null graph is returned if n < 0.)
shift_list = [s1,s2,..,sk], a list of integer shifts mod n,
repeats
integer specifying the number of times that shifts in shift_list
are successively applied to each v_current in the n-cycle
to generate an edge between v_current and v_current+shift mod n.
For v1 cycling through the n-cycle a total of k*repeats
with shift cycling through shiftlist repeats times connect
v1 with v1+shift mod n
The utility graph K_{3,3}
>>> G=nx.LCF_graph(6,[3,-3],3)
The Heawood graph
>>> G=nx.LCF_graph(14,[5,-5],7)
See http://mathworld.wolfram.com/LCFNotation.html for a description
and references.
"""
if create_using is not None and create_using.is_directed():
raise NetworkXError("Directed Graph not supported")
if n <= 0:
return empty_graph(0, create_using)
# start with the n-cycle
G=cycle_graph(n, create_using)
G.name="LCF_graph"
nodes=G.nodes()
n_extra_edges=repeats*len(shift_list)
# edges are added n_extra_edges times
# (not all of these need be new)
if n_extra_edges < 1:
return G
for i in range(n_extra_edges):
shift=shift_list[i%len(shift_list)] #cycle through shift_list
v1=nodes[i%n] # cycle repeatedly through nodes
v2=nodes[(i + shift)%n]
G.add_edge(v1, v2)
return G
#-------------------------------------------------------------------------------
# Various small and named graphs
#-------------------------------------------------------------------------------
def bull_graph(create_using=None):
"""Return the Bull graph. """
description=[
"adjacencylist",
"Bull Graph",
5,
[[2,3],[1,3,4],[1,2,5],[2],[3]]
]
G=make_small_undirected_graph(description, create_using)
return G
def chvatal_graph(create_using=None):
"""Return the Chvatal graph."""
description=[
"adjacencylist",
"Chvatal Graph",
12,
[[2,5,7,10],[3,6,8],[4,7,9],[5,8,10],
[6,9],[11,12],[11,12],[9,12],
[11],[11,12],[],[]]
]
G=make_small_undirected_graph(description, create_using)
return G
def cubical_graph(create_using=None):
"""Return the 3-regular Platonic Cubical graph."""
description=[
"adjacencylist",
"Platonic Cubical Graph",
8,
[[2,4,5],[1,3,8],[2,4,7],[1,3,6],
[1,6,8],[4,5,7],[3,6,8],[2,5,7]]
]
G=make_small_undirected_graph(description, create_using)
return G
def desargues_graph(create_using=None):
""" Return the Desargues graph."""
G=LCF_graph(20, [5,-5,9,-9], 5, create_using)
G.name="Desargues Graph"
return G
def diamond_graph(create_using=None):
"""Return the Diamond graph. """
description=[
"adjacencylist",
"Diamond Graph",
4,
[[2,3],[1,3,4],[1,2,4],[2,3]]
]
G=make_small_undirected_graph(description, create_using)
return G
def dodecahedral_graph(create_using=None):
""" Return the Platonic Dodecahedral graph. """
G=LCF_graph(20, [10,7,4,-4,-7,10,-4,7,-7,4], 2, create_using)
G.name="Dodecahedral Graph"
return G
def frucht_graph(create_using=None):
"""Return the Frucht Graph.
The Frucht Graph is the smallest cubical graph whose
automorphism group consists only of the identity element.
"""
G=cycle_graph(7, create_using)
G.add_edges_from([[0,7],[1,7],[2,8],[3,9],[4,9],[5,10],[6,10],
[7,11],[8,11],[8,9],[10,11]])
G.name="Frucht Graph"
return G
def heawood_graph(create_using=None):
""" Return the Heawood graph, a (3,6) cage. """
G=LCF_graph(14, [5,-5], 7, create_using)
G.name="Heawood Graph"
return G
def house_graph(create_using=None):
"""Return the House graph (square with triangle on top)."""
description=[
"adjacencylist",
"House Graph",
5,
[[2,3],[1,4],[1,4,5],[2,3,5],[3,4]]
]
G=make_small_undirected_graph(description, create_using)
return G
def house_x_graph(create_using=None):
"""Return the House graph with a cross inside the house square."""
description=[
"adjacencylist",
"House-with-X-inside Graph",
5,
[[2,3,4],[1,3,4],[1,2,4,5],[1,2,3,5],[3,4]]
]
G=make_small_undirected_graph(description, create_using)
return G
def icosahedral_graph(create_using=None):
"""Return the Platonic Icosahedral graph."""
description=[
"adjacencylist",
"Platonic Icosahedral Graph",
12,
[[2,6,8,9,12],[3,6,7,9],[4,7,9,10],[5,7,10,11],
[6,7,11,12],[7,12],[],[9,10,11,12],
[10],[11],[12],[]]
]
G=make_small_undirected_graph(description, create_using)
return G
def krackhardt_kite_graph(create_using=None):
"""
Return the Krackhardt Kite Social Network.
A 10 actor social network introduced by David Krackhardt
to illustrate: degree, betweenness, centrality, closeness, etc.
The traditional labeling is:
Andre=1, Beverley=2, Carol=3, Diane=4,
Ed=5, Fernando=6, Garth=7, Heather=8, Ike=9, Jane=10.
"""
description=[
"adjacencylist",
"Krackhardt Kite Social Network",
10,
[[2,3,4,6],[1,4,5,7],[1,4,6],[1,2,3,5,6,7],[2,4,7],
[1,3,4,7,8],[2,4,5,6,8],[6,7,9],[8,10],[9]]
]
G=make_small_undirected_graph(description, create_using)
return G
def moebius_kantor_graph(create_using=None):
"""Return the Moebius-Kantor graph."""
G=LCF_graph(16, [5,-5], 8, create_using)
G.name="Moebius-Kantor Graph"
return G
def octahedral_graph(create_using=None):
"""Return the Platonic Octahedral graph."""
description=[
"adjacencylist",
"Platonic Octahedral Graph",
6,
[[2,3,4,5],[3,4,6],[5,6],[5,6],[6],[]]
]
G=make_small_undirected_graph(description, create_using)
return G
def pappus_graph():
""" Return the Pappus graph."""
G=LCF_graph(18,[5,7,-7,7,-7,-5],3)
G.name="Pappus Graph"
return G
def petersen_graph(create_using=None):
"""Return the Petersen graph."""
description=[
"adjacencylist",
"Petersen Graph",
10,
[[2,5,6],[1,3,7],[2,4,8],[3,5,9],[4,1,10],[1,8,9],[2,9,10],
[3,6,10],[4,6,7],[5,7,8]]
]
G=make_small_undirected_graph(description, create_using)
return G
def sedgewick_maze_graph(create_using=None):
"""
Return a small maze with a cycle.
This is the maze used in Sedgewick,3rd Edition, Part 5, Graph
Algorithms, Chapter 18, e.g. Figure 18.2 and following.
Nodes are numbered 0,..,7
"""
G=empty_graph(0, create_using)
G.add_nodes_from(range(8))
G.add_edges_from([[0,2],[0,7],[0,5]])
G.add_edges_from([[1,7],[2,6]])
G.add_edges_from([[3,4],[3,5]])
G.add_edges_from([[4,5],[4,7],[4,6]])
G.name="Sedgewick Maze"
return G
def tetrahedral_graph(create_using=None):
""" Return the 3-regular Platonic Tetrahedral graph."""
G=complete_graph(4, create_using)
G.name="Platonic Tetrahedral graph"
return G
def truncated_cube_graph(create_using=None):
"""Return the skeleton of the truncated cube."""
description=[
"adjacencylist",
"Truncated Cube Graph",
24,
[[2,3,5],[12,15],[4,5],[7,9],
[6],[17,19],[8,9],[11,13],
[10],[18,21],[12,13],[15],
[14],[22,23],[16],[20,24],
[18,19],[21],[20],[24],
[22],[23],[24],[]]
]
G=make_small_undirected_graph(description, create_using)
return G
def truncated_tetrahedron_graph(create_using=None):
"""Return the skeleton of the truncated Platonic tetrahedron."""
G=path_graph(12, create_using)
# G.add_edges_from([(1,3),(1,10),(2,7),(4,12),(5,12),(6,8),(9,11)])
G.add_edges_from([(0,2),(0,9),(1,6),(3,11),(4,11),(5,7),(8,10)])
G.name="Truncated Tetrahedron Graph"
return G
def tutte_graph(create_using=None):
"""Return the Tutte graph."""
description=[
"adjacencylist",
"Tutte's Graph",
46,
[[2,3,4],[5,27],[11,12],[19,20],[6,34],
[7,30],[8,28],[9,15],[10,39],[11,38],
[40],[13,40],[14,36],[15,16],[35],
[17,23],[18,45],[19,44],[46],[21,46],
[22,42],[23,24],[41],[25,28],[26,33],
[27,32],[34],[29],[30,33],[31],
[32,34],[33],[],[],[36,39],
[37],[38,40],[39],[],[],
[42,45],[43],[44,46],[45],[],[]]
]
G=make_small_undirected_graph(description, create_using)
return G
| bsd-3-clause |
firebase/grpc-SwiftPM | test/http2_test/http2_server_health_check.py | 13 | 1319 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import hyper
import sys
# Utility to healthcheck the http2 server. Used when starting the server to
# verify that the server is live before tests begin.
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--server_host', type=str, default='localhost')
parser.add_argument('--server_port', type=int, default=8080)
args = parser.parse_args()
server_host = args.server_host
server_port = args.server_port
conn = hyper.HTTP20Connection('%s:%d' % (server_host, server_port))
conn.request('POST', '/grpc.testing.TestService/UnaryCall')
resp = conn.get_response()
if resp.headers.get('grpc-encoding') is None:
sys.exit(1)
else:
sys.exit(0)
| apache-2.0 |
navrasio/mxnet | example/reinforcement-learning/ddpg/ddpg.py | 42 | 13263 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from replay_mem import ReplayMem
from utils import discount_return, sample_rewards
import rllab.misc.logger as logger
import pyprind
import mxnet as mx
import numpy as np
class DDPG(object):
def __init__(
self,
env,
policy,
qfunc,
strategy,
ctx=mx.gpu(0),
batch_size=32,
n_epochs=1000,
epoch_length=1000,
memory_size=1000000,
memory_start_size=1000,
discount=0.99,
max_path_length=1000,
eval_samples=10000,
qfunc_updater="adam",
qfunc_lr=1e-4,
policy_updater="adam",
policy_lr=1e-4,
soft_target_tau=1e-3,
n_updates_per_sample=1,
include_horizon_terminal=False,
seed=12345):
mx.random.seed(seed)
np.random.seed(seed)
self.env = env
self.ctx = ctx
self.policy = policy
self.qfunc = qfunc
self.strategy = strategy
self.batch_size = batch_size
self.n_epochs = n_epochs
self.epoch_length = epoch_length
self.memory_size = memory_size
self.memory_start_size = memory_start_size
self.discount = discount
self.max_path_length = max_path_length
self.eval_samples = eval_samples
self.qfunc_updater = qfunc_updater
self.qfunc_lr = qfunc_lr
self.policy_updater = policy_updater
self.policy_lr = policy_lr
self.soft_target_tau = soft_target_tau
self.n_updates_per_sample = n_updates_per_sample
self.include_horizon_terminal = include_horizon_terminal
self.init_net()
# logging
self.qfunc_loss_averages = []
self.policy_loss_averages = []
self.q_averages = []
self.y_averages = []
self.strategy_path_returns = []
def init_net(self):
# qfunc init
qfunc_init = mx.initializer.Normal()
loss_symbols = self.qfunc.get_loss_symbols()
qval_sym = loss_symbols["qval"]
yval_sym = loss_symbols["yval"]
# define loss here
loss = 1.0 / self.batch_size * mx.symbol.sum(
mx.symbol.square(qval_sym - yval_sym))
qfunc_loss = loss
qfunc_updater = mx.optimizer.get_updater(
mx.optimizer.create(self.qfunc_updater,
learning_rate=self.qfunc_lr))
self.qfunc_input_shapes = {
"obs": (self.batch_size, self.env.observation_space.flat_dim),
"act": (self.batch_size, self.env.action_space.flat_dim),
"yval": (self.batch_size, 1)}
self.qfunc.define_loss(qfunc_loss)
self.qfunc.define_exe(
ctx=self.ctx,
init=qfunc_init,
updater=qfunc_updater,
input_shapes=self.qfunc_input_shapes)
# qfunc_target init
qfunc_target_shapes = {
"obs": (self.batch_size, self.env.observation_space.flat_dim),
"act": (self.batch_size, self.env.action_space.flat_dim)
}
self.qfunc_target = qval_sym.simple_bind(ctx=self.ctx,
**qfunc_target_shapes)
# parameters are not shared but initialized the same
for name, arr in self.qfunc_target.arg_dict.items():
if name not in self.qfunc_input_shapes:
self.qfunc.arg_dict[name].copyto(arr)
# policy init
policy_init = mx.initializer.Normal()
loss_symbols = self.policy.get_loss_symbols()
act_sym = loss_symbols["act"]
policy_qval = qval_sym
# note the negative one here: the loss maximizes the average return
loss = -1.0 / self.batch_size * mx.symbol.sum(policy_qval)
policy_loss = loss
policy_loss = mx.symbol.MakeLoss(policy_loss, name="policy_loss")
policy_updater = mx.optimizer.get_updater(
mx.optimizer.create(self.policy_updater,
learning_rate=self.policy_lr))
self.policy_input_shapes = {
"obs": (self.batch_size, self.env.observation_space.flat_dim)}
self.policy.define_exe(
ctx=self.ctx,
init=policy_init,
updater=policy_updater,
input_shapes=self.policy_input_shapes)
# policy network and q-value network are combined to backpropage
# gradients from the policy loss
# since the loss is different, yval is not needed
args = {}
for name, arr in self.qfunc.arg_dict.items():
if name != "yval":
args[name] = arr
args_grad = {}
policy_grad_dict = dict(zip(self.qfunc.loss.list_arguments(), self.qfunc.exe.grad_arrays))
for name, arr in policy_grad_dict.items():
if name != "yval":
args_grad[name] = arr
self.policy_executor = policy_loss.bind(
ctx=self.ctx,
args=args,
args_grad=args_grad,
grad_req="write")
self.policy_executor_arg_dict = self.policy_executor.arg_dict
self.policy_executor_grad_dict = dict(zip(
policy_loss.list_arguments(),
self.policy_executor.grad_arrays))
# policy_target init
# target policy only needs to produce actions, not loss
# parameters are not shared but initialized the same
self.policy_target = act_sym.simple_bind(ctx=self.ctx,
**self.policy_input_shapes)
for name, arr in self.policy_target.arg_dict.items():
if name not in self.policy_input_shapes:
self.policy.arg_dict[name].copyto(arr)
def train(self):
memory = ReplayMem(
obs_dim=self.env.observation_space.flat_dim,
act_dim=self.env.action_space.flat_dim,
memory_size=self.memory_size)
itr = 0
path_length = 0
path_return = 0
end = False
obs = self.env.reset()
for epoch in xrange(self.n_epochs):
logger.push_prefix("epoch #%d | " % epoch)
logger.log("Training started")
for epoch_itr in pyprind.prog_bar(range(self.epoch_length)):
# run the policy
if end:
# reset the environment and stretegy when an episode ends
obs = self.env.reset()
self.strategy.reset()
# self.policy.reset()
self.strategy_path_returns.append(path_return)
path_length = 0
path_return = 0
# note action is sampled from the policy not the target policy
act = self.strategy.get_action(obs, self.policy)
nxt, rwd, end, _ = self.env.step(act)
path_length += 1
path_return += rwd
if not end and path_length >= self.max_path_length:
end = True
if self.include_horizon_terminal:
memory.add_sample(obs, act, rwd, end)
else:
memory.add_sample(obs, act, rwd, end)
obs = nxt
if memory.size >= self.memory_start_size:
for update_time in xrange(self.n_updates_per_sample):
batch = memory.get_batch(self.batch_size)
self.do_update(itr, batch)
itr += 1
logger.log("Training finished")
if memory.size >= self.memory_start_size:
self.evaluate(epoch, memory)
logger.dump_tabular(with_prefix=False)
logger.pop_prefix()
# self.env.terminate()
# self.policy.terminate()
def do_update(self, itr, batch):
obss, acts, rwds, ends, nxts = batch
self.policy_target.arg_dict["obs"][:] = nxts
self.policy_target.forward(is_train=False)
next_acts = self.policy_target.outputs[0].asnumpy()
policy_acts = self.policy.get_actions(obss)
self.qfunc_target.arg_dict["obs"][:] = nxts
self.qfunc_target.arg_dict["act"][:] = next_acts
self.qfunc_target.forward(is_train=False)
next_qvals = self.qfunc_target.outputs[0].asnumpy()
# executor accepts 2D tensors
rwds = rwds.reshape((-1, 1))
ends = ends.reshape((-1, 1))
ys = rwds + (1.0 - ends) * self.discount * next_qvals
# since policy_executor shares the grad arrays with qfunc
# the update order could not be changed
self.qfunc.update_params(obss, acts, ys)
# in update values all computed
# no need to recompute qfunc_loss and qvals
qfunc_loss = self.qfunc.exe.outputs[0].asnumpy()
qvals = self.qfunc.exe.outputs[1].asnumpy()
self.policy_executor.arg_dict["obs"][:] = obss
self.policy_executor.arg_dict["act"][:] = policy_acts
self.policy_executor.forward(is_train=True)
policy_loss = self.policy_executor.outputs[0].asnumpy()
self.policy_executor.backward()
self.policy.update_params(self.policy_executor_grad_dict["act"])
# update target networks
for name, arr in self.policy_target.arg_dict.items():
if name not in self.policy_input_shapes:
arr[:] = (1.0 - self.soft_target_tau) * arr[:] + \
self.soft_target_tau * self.policy.arg_dict[name][:]
for name, arr in self.qfunc_target.arg_dict.items():
if name not in self.qfunc_input_shapes:
arr[:] = (1.0 - self.soft_target_tau) * arr[:] + \
self.soft_target_tau * self.qfunc.arg_dict[name][:]
self.qfunc_loss_averages.append(qfunc_loss)
self.policy_loss_averages.append(policy_loss)
self.q_averages.append(qvals)
self.y_averages.append(ys)
def evaluate(self, epoch, memory):
if epoch == self.n_epochs - 1:
logger.log("Collecting samples for evaluation")
rewards = sample_rewards(env=self.env,
policy=self.policy,
eval_samples=self.eval_samples,
max_path_length=self.max_path_length)
average_discounted_return = np.mean(
[discount_return(reward, self.discount) for reward in rewards])
returns = [sum(reward) for reward in rewards]
all_qs = np.concatenate(self.q_averages)
all_ys = np.concatenate(self.y_averages)
average_qfunc_loss = np.mean(self.qfunc_loss_averages)
average_policy_loss = np.mean(self.policy_loss_averages)
logger.record_tabular('Epoch', epoch)
if epoch == self.n_epochs - 1:
logger.record_tabular('AverageReturn',
np.mean(returns))
logger.record_tabular('StdReturn',
np.std(returns))
logger.record_tabular('MaxReturn',
np.max(returns))
logger.record_tabular('MinReturn',
np.min(returns))
logger.record_tabular('AverageDiscountedReturn',
average_discounted_return)
if len(self.strategy_path_returns) > 0:
logger.record_tabular('AverageEsReturn',
np.mean(self.strategy_path_returns))
logger.record_tabular('StdEsReturn',
np.std(self.strategy_path_returns))
logger.record_tabular('MaxEsReturn',
np.max(self.strategy_path_returns))
logger.record_tabular('MinEsReturn',
np.min(self.strategy_path_returns))
logger.record_tabular('AverageQLoss', average_qfunc_loss)
logger.record_tabular('AveragePolicyLoss', average_policy_loss)
logger.record_tabular('AverageQ', np.mean(all_qs))
logger.record_tabular('AverageAbsQ', np.mean(np.abs(all_qs)))
logger.record_tabular('AverageY', np.mean(all_ys))
logger.record_tabular('AverageAbsY', np.mean(np.abs(all_ys)))
logger.record_tabular('AverageAbsQYDiff',
np.mean(np.abs(all_qs - all_ys)))
self.qfunc_loss_averages = []
self.policy_loss_averages = []
self.q_averages = []
self.y_averages = []
self.strategy_path_returns = []
| apache-2.0 |
facebook/buck | docs/soy2html.py | 3 | 3318 | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import subprocess
import sys
import time
URL_ROOT = "http://localhost:9814/"
def main(output_dir):
# Iterate over the files in the docs directory and copy them, as
# appropriate.
for root, dirs, files in os.walk("."):
for file_name in files:
if file_name.endswith(".soy") and not file_name.startswith("__"):
# Strip the './' prefix, if appropriate.
if root.startswith("./"):
root = root[2:]
# Construct the URL where the .soy file is being served.
soy_file = file_name
html_file = root + "/" + soy_file[: -len(".soy")] + ".html"
url = URL_ROOT + html_file
copy_dest = ensure_dir(html_file, output_dir)
subprocess.check_call(["curl", "--fail", "--output", copy_dest, url])
elif (
file_name == ".nojekyll"
or file_name == "CNAME"
or file_name.endswith(".css")
or file_name.endswith(".jpg")
or file_name.endswith(".js")
or file_name.endswith(".png")
or file_name.endswith(".gif")
or file_name.endswith(".html")
or file_name.endswith(".md")
or file_name.endswith(".svg")
or file_name.endswith(".ttf")
or file_name.endswith(".txt")
):
# Copy the static resource to output_dir.
relative_path = os.path.join(root, file_name)
with open(relative_path, "rb") as resource_file:
resource = resource_file.read()
copy_to_output_dir(relative_path, output_dir, resource)
def ensure_dir(path, output_dir):
last_slash = path.rfind("/")
if last_slash != -1:
output_subdir = os.path.join(output_dir, path[:last_slash])
if not os.path.exists(output_subdir):
os.makedirs(output_subdir)
return os.path.join(output_dir, path)
def copy_to_output_dir(path, output_dir, content):
output_file = ensure_dir(path, output_dir)
with open(output_file, "wb") as f:
f.write(content)
def pollForServerReady():
SERVER_START_POLL = 5
print("Waiting for server to start.")
for _ in range(0, SERVER_START_POLL):
result = subprocess.call(["curl", "--fail", "-I", URL_ROOT])
if result == 0:
return
time.sleep(1)
print("Server failed to start after %s seconds." % SERVER_START_POLL)
if __name__ == "__main__":
output_dir = sys.argv[1]
pollForServerReady()
main(output_dir)
| apache-2.0 |
looopTools/sw9-source | .waf-1.9.8-6657823688b736c1d1a4e2c4e8e198b4/waflib/extras/wurf/dependency.py | 1 | 2578 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import hashlib
import json
import collections
import pprint
class Dependency(object):
def __init__(self,**kwargs):
assert"sha1"not in kwargs
if'recurse'not in kwargs:
kwargs['recurse']=True
if'optional'not in kwargs:
kwargs['optional']=False
if'internal'not in kwargs:
kwargs['internal']=False
hash_attributes=kwargs.copy()
hash_attributes.pop('optional',None)
hash_attributes.pop('internal',None)
s=json.dumps(hash_attributes,sort_keys=True)
sha1=hashlib.sha1(s.encode('utf-8')).hexdigest()
object.__setattr__(self,'info',kwargs)
self.info['sha1']=sha1
self.info['hash']=None
object.__setattr__(self,'read_write',dict())
object.__setattr__(self,'audit',list())
self.error_messages=[]
def rewrite(self,attribute,value,reason):
if value==None:
self.__delete(attribute=attribute,reason=reason)
elif attribute not in self.info:
self.__create(attribute=attribute,value=value,reason=reason)
else:
self.__modify(attribute=attribute,value=value,reason=reason)
def __delete(self,attribute,reason):
if attribute not in self.info:
raise AttributeError("Cannot delete non existing attribute {}".format(attribute))
audit='Deleting "{}". Reason: {}'.format(attribute,reason)
del self.info[attribute]
self.audit.append(audit)
def __create(self,attribute,value,reason):
audit='Creating "{}" value "{}". Reason: {}'.format(attribute,value,reason)
self.audit.append(audit)
self.info[attribute]=value
def __modify(self,attribute,value,reason):
audit='Modifying "{}" from "{}" to "{}". Reason: {}'.format(attribute,self.info[attribute],value,reason)
self.audit.append(audit)
self.info[attribute]=value
def __getattr__(self,attribute):
if attribute in self.info:
return self.info[attribute]
elif attribute in self.read_write:
return self.read_write[attribute]
else:
return None
def __setattr__(self,attribute,value):
if attribute in self.info:
raise AttributeError("Attribute {} read-only.".format(attribute))
else:
self.read_write[attribute]=value
def __contains__(self,attribute):
return(attribute in self.info)or(attribute in self.read_write)
def __str__(self):
return"Dependency info:\n{}\nread_write: {}\naudit: {}".format(pprint.pformat(self.info,indent=2),pprint.pformat(self.read_write,indent=2),pprint.pformat(self.audit,indent=2))
def __hash__(self):
if not self.info['hash']:
self.info['hash']=hash(self.info['sha1'])
return self.info['hash']
| mit |
guijomatos/SickRage | sickbeard/providers/bitsoup.py | 7 | 10171 | # Author: Idan Gutman
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import traceback
import datetime
import sickbeard
import generic
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import db
from sickbeard import classes
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickbeard.helpers import sanitizeSceneName
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.exceptions import AuthException
class BitSoupProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "BitSoup")
self.urls = {
'base_url': 'https://www.bitsoup.me',
'login': 'https://www.bitsoup.me/takelogin.php',
'detail': 'https://www.bitsoup.me/details.php?id=%s',
'search': 'https://www.bitsoup.me/browse.php',
'download': 'https://bitsoup.me/%s',
}
self.url = self.urls['base_url']
self.supportsBacklog = True
self.public = False
self.enabled = False
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.cache = BitSoupCache(self)
self.search_params = {
"c42": 1, "c45": 1, "c49": 1, "c7": 1
}
def isEnabled(self):
return self.enabled
def imageName(self):
return 'bitsoup.png'
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _checkAuth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _doLogin(self):
login_params = {
'username': self.username,
'password': self.password,
'ssl': 'yes'
}
response = self.getURL(self.urls['login'], post_data=login_params, timeout=30)
if not response:
logger.log(u'Unable to connect to ' + self.name + ' provider.', logger.ERROR)
return False
if re.search('Username or password incorrect', response):
logger.log(u'Invalid username or password for ' + self.name + ' Check your settings', logger.ERROR)
return False
return True
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season) #1) showName SXX
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
if self.show.air_by_date:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
"%i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
return [search_string]
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if not self._doLogin():
return results
for mode in search_strings.keys():
for search_string in search_strings[mode]:
logger.log(u"Search string: " + search_string, logger.DEBUG)
self.search_params['search'] = search_string
data = self.getURL(self.urls['search'], params=self.search_params)
if not data:
continue
try:
with BS4Parser(data, "html.parser") as html:
torrent_table = html.find('table', attrs={'class': 'koptekst'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
#Continue only if one Release is found
if len(torrent_rows) < 2:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
logger.DEBUG)
continue
for result in torrent_rows[1:]:
cells = result.find_all('td')
link = cells[1].find('a')
download_url = self.urls['download'] % cells[2].find('a')['href']
id = link['href']
id = id.replace('details.php?id=','')
id = id.replace('&hit=1', '')
try:
title = link.getText()
id = int(id)
seeders = int(cells[10].getText())
leechers = int(cells[11].getText())
except (AttributeError, TypeError):
continue
#Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
continue
if not title or not download_url:
continue
item = title, download_url, id, seeders, leechers
logger.log(u"Found result: " + title.replace(' ','.') + " (" + search_string + ")", logger.DEBUG)
items[mode].append(item)
except Exception, e:
logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
#For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url, id, seeders, leechers = item
if title:
title = self._clean_title_from_provider(title)
if url:
url = str(url).replace('&', '&')
return (title, url)
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
for sqlshow in sqlResults or []:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
return results
def seedRatio(self):
return self.ratio
class BitSoupCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll TorrentBytes every 20 minutes max
self.minTime = 20
def _getRSSData(self):
search_strings = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_strings)}
provider = BitSoupProvider()
| gpl-3.0 |
Qalthos/ansible | test/integration/targets/want_json_modules_posix/library/helloworld.py | 62 | 1047 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# WANT_JSON
import json
import sys
try:
with open(sys.argv[1], 'r') as f:
data = json.load(f)
except (IOError, OSError, IndexError):
print(json.dumps(dict(msg="No argument file provided", failed=True)))
sys.exit(1)
salutation = data.get('salutation', 'Hello')
name = data.get('name', 'World')
print(json.dumps(dict(msg='%s, %s!' % (salutation, name))))
| gpl-3.0 |
SuperDARNCanada/placeholderOS | experiments/testing_archive/test_scanbound_not_increasing.py | 2 | 1568 | #!/usr/bin/python
# write an experiment that raises an exception
import sys
import os
BOREALISPATH = os.environ['BOREALISPATH']
sys.path.append(BOREALISPATH)
import experiments.superdarn_common_fields as scf
from experiment_prototype.experiment_prototype import ExperimentPrototype
class TestExperiment(ExperimentPrototype):
def __init__(self):
cpid = 1
super(TestExperiment, self).__init__(cpid)
if scf.IS_FORWARD_RADAR:
beams_to_use = scf.STD_16_FORWARD_BEAM_ORDER
else:
beams_to_use = scf.STD_16_REVERSE_BEAM_ORDER
if scf.opts.site_id in ["cly", "rkn", "inv"]:
num_ranges = scf.POLARDARN_NUM_RANGES
if scf.opts.site_id in ["sas", "pgr"]:
num_ranges = scf.STD_NUM_RANGES
slice_1 = { # slice_id = 0, there is only one slice.
"pulse_sequence": scf.SEQUENCE_7P,
"tau_spacing": scf.TAU_SPACING_7P,
"pulse_len": scf.PULSE_LEN_45KM,
"num_ranges": num_ranges,
"first_range": scf.STD_FIRST_RANGE,
"intt": 3500, # duration of an integration, in ms
"beam_angle": scf.STD_16_BEAM_ANGLE,
"beam_order": beams_to_use,
"scanbound": [i * 3.5 for i in range(len(beams_to_use)-1, -1, -1)], # Not increasing, should fail
"txfreq" : scf.COMMON_MODE_FREQ_1, #kHz
"acf": True,
"xcf": True, # cross-correlation processing
"acfint": True, # interferometer acfs
}
self.add_slice(slice_1)
| gpl-3.0 |
JamiiTech/mplh5canvas | examples/multi_plot.py | 4 | 1357 | #!/usr/bin/python
"""Testbed for the animation functionality of the backend, with multiple figures.
It basically produces an long series of frames that get animated on the client
browser side, this time with two figures.
"""
import matplotlib
matplotlib.use('module://mplh5canvas.backend_h5canvas')
from pylab import *
import time
def refresh_data(ax):
t = arange(0.0 + count, 2.0 + count, 0.01)
s = sin(2*pi*t)
ax.lines[0].set_xdata(t)
ax.lines[0].set_ydata(s)
ax.set_xlim(t[0],t[-1])
t = arange(0.0, 2.0, 0.01)
s = sin(2*pi*t)
plot(t, s, linewidth=1.0)
xlabel('time (s)')
ylabel('voltage (mV)')
title('Frist Post')
f = gcf()
ax = f.gca()
count = 0
f2 = figure()
ax2 = f2.gca()
ax2.set_xlabel('IMDB rating')
ax2.set_ylabel('South African Connections')
ax2.set_title('Luds chart...')
ax2.plot(arange(0.0, 5 + count, 0.01), arange(0.0, 5 + count, 0.01))
show(block=False, layout=2)
# show the figure manager but don't block script execution so animation works..
# layout=2 overrides the default layout manager which only shows a single plot in the browser window
while True:
refresh_data(ax)
d = arange(0.0, 5 + count, 0.01)
ax2.lines[0].set_xdata(d)
ax2.lines[0].set_ydata(d)
ax2.set_xlim(d[0],d[-1])
ax2.set_ylim(d[0],d[-1])
f.canvas.draw()
f2.canvas.draw()
count += 0.01
time.sleep(1)
| bsd-3-clause |
Artanicus/python-cozify | util/device-fade-test.py | 1 | 1301 | #!/usr/bin/env python3
from cozify import hub
import numpy, time
from absl import flags, app
FLAGS = flags.FLAGS
flags.DEFINE_string('device', None, 'Device to operate on.')
flags.DEFINE_float('delay', 0.5, 'Step length in seconds.')
flags.DEFINE_float('steps', 20, 'Amount of steps to divide into.')
flags.DEFINE_bool('verify', False, 'Verify if value went through as-is.')
green = '\u001b[32m'
yellow = '\u001b[33m'
red = '\u001b[31m'
reset = '\u001b[0m'
def main(argv):
del argv
previous = None
for step in numpy.flipud(numpy.linspace(0.0, 1.0, num=FLAGS.steps)):
hub.light_brightness(FLAGS.device, step)
time.sleep(FLAGS.delay)
read = 'N/A'
result = '?'
if FLAGS.verify:
devs = hub.devices()
read = devs[FLAGS.device]['state']['brightness']
if step == read:
result = '✔'
color = green
else:
result = '✖'
if read == previous:
color = yellow
else:
color = red
previous = step
print('{3}[{2}] set: {0} vs. read: {1}{4}'.format(step, read, result, color, reset))
if __name__ == "__main__":
flags.mark_flag_as_required('device')
app.run(main)
| mit |
tdent/pycbc | pycbc/results/table_utils.py | 6 | 4698 | # Copyright (C) 2014 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" This module provides functions to generate sortable html tables
"""
import mako.template, uuid
google_table_template = mako.template.Template("""
<script type='text/javascript' src='https://www.google.com/jsapi'></script>
<script type='text/javascript'>
google.load('visualization', '1', {packages:['table']});
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable();
% for type, name in column_descriptions:
data.addColumn('${str(type)}', '${str(name)}');
% endfor
data.addRows(${data});
% if format_strings is not None:
% for i, format_string in enumerate(format_strings):
% if format_string is not None:
var formatter = new google.visualization.NumberFormat({pattern:'${format_string}'});
formatter.format(data, ${i});
% endif
% endfor
% endif
var table = new google.visualization.Table(document.getElementById('${div_id}'));
table.draw(data, {showRowNumber: 'true',
page: '${page_enable}',
allowHtml: 'true',
pageSize: ${page_size}});
}
</script>
<div id='${div_id}'></div>
""")
def html_table(columns, names, page_size=None, format_strings=None):
""" Return an html table of this data
Parameters
----------
columns : list of numpy arrays
names : list of strings
The list of columns names
page_size : {int, None}, optional
The number of items to show on each page of the table
format_strings : {lists of strings, None}, optional
The ICU format string for this column, None for no formatting. All
columns must have a format string if provided.
Returns
-------
html_table : str
A str containing the html code to display a table of this data
"""
if page_size is None:
page = 'disable'
else:
page = 'enable'
div_id = uuid.uuid4()
column_descriptions = []
for column, name in zip(columns, names):
if column.dtype.kind == 'S':
ctype = 'string'
else:
ctype = 'number'
column_descriptions.append((ctype, name))
data = []
for item in zip(*columns):
data.append(list(item))
return google_table_template.render(div_id=div_id,
page_enable=page,
column_descriptions = column_descriptions,
page_size=page_size,
data=data,
format_strings=format_strings,
)
static_table_template = mako.template.Template("""
<table class="table">
% if titles is not None:
<tr>
% for i in range(len(titles)):
<th>
${titles[i]}
</th>
% endfor
</tr>
% endif
% for i in range(len(data)):
<tr>
% for j in range(len(data[i])):
<td>
${data[i][j]}
</td>
% endfor
</tr>
% endfor
</table>
""")
def static_table(data, titles=None):
""" Return an html tableo of this data
Parameters
----------
data : two-dimensional numpy string array
Array containing the cell values
titles : numpy array
Vector str of titles
Returns
-------
html_table : str
A string containing the html table.
"""
return static_table_template.render(data=data, titles=titles)
| gpl-3.0 |
BreakawayLabs/mom | test/test_bit_reproducibility.py | 3 | 1736 |
from __future__ import print_function
import os
import sys
import re
from model_test_setup import ModelTestSetup
from test_run import tests as test_specs
class TestBitReproducibility(ModelTestSetup):
def __init__(self):
super(TestBitReproducibility, self).__init__()
def checksums_to_dict(self, filename):
"""
Look at each line and make a dictionary entry.
"""
regex = re.compile(r'\[chksum\]\s+(.*)\s+(-?[0-9]+)$')
dict = {}
with open(filename) as f:
for line in f:
m = regex.match(line)
if m is not None:
dict[m.group(1).rstrip()] = int(m.group(2))
return dict
def expected_checksums(self, test_name):
filename = os.path.join(self.my_dir, 'checksums',
'{}.txt'.format(test_name))
return self.checksums_to_dict(filename)
def produced_checksums(self, test_name):
"""
Extract checksums from model run output.
"""
filename = os.path.join(self.work_dir, test_name, 'fms.out')
return self.checksums_to_dict(filename)
def check_run(self, key):
# Compare expected to produced.
expected = self.expected_checksums(key)
produced = self.produced_checksums(key)
for k in expected:
assert(k in produced)
if expected[k] != produced[k]:
print('{}: expected {}, produced {}'.format(key, expected[k],
produced[k]))
assert(expected[k] == produced[k])
def test_checksums(self):
for k in test_specs.keys():
yield self.check_run, k
| gpl-2.0 |
vericred/vericred-python | vericred_client/models/network_comparison_response.py | 1 | 13134 | # coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class NetworkComparisonResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, networks=None, network_comparisons=None):
"""
NetworkComparisonResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'networks': 'list[Network]',
'network_comparisons': 'list[NetworkComparison]'
}
self.attribute_map = {
'networks': 'networks',
'network_comparisons': 'network_comparisons'
}
self._networks = networks
self._network_comparisons = network_comparisons
@property
def networks(self):
"""
Gets the networks of this NetworkComparisonResponse.
Networks
:return: The networks of this NetworkComparisonResponse.
:rtype: list[Network]
"""
return self._networks
@networks.setter
def networks(self, networks):
"""
Sets the networks of this NetworkComparisonResponse.
Networks
:param networks: The networks of this NetworkComparisonResponse.
:type: list[Network]
"""
self._networks = networks
@property
def network_comparisons(self):
"""
Gets the network_comparisons of this NetworkComparisonResponse.
NetworkComparisons
:return: The network_comparisons of this NetworkComparisonResponse.
:rtype: list[NetworkComparison]
"""
return self._network_comparisons
@network_comparisons.setter
def network_comparisons(self, network_comparisons):
"""
Sets the network_comparisons of this NetworkComparisonResponse.
NetworkComparisons
:param network_comparisons: The network_comparisons of this NetworkComparisonResponse.
:type: list[NetworkComparison]
"""
self._network_comparisons = network_comparisons
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 |
emilio/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/browsers/opera.py | 15 | 3783 | from .base import Browser, ExecutorBrowser, require_arg
from .base import get_timeout_multiplier # noqa: F401
from ..webdriver_server import OperaDriverServer
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorselenium import (SeleniumTestharnessExecutor, # noqa: F401
SeleniumRefTestExecutor) # noqa: F401
from ..executors.executoropera import OperaDriverWdspecExecutor # noqa: F401
__wptrunner__ = {"product": "opera",
"check_args": "check_args",
"browser": "OperaBrowser",
"executor": {"testharness": "SeleniumTestharnessExecutor",
"reftest": "SeleniumRefTestExecutor",
"wdspec": "OperaDriverWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"timeout_multiplier": "get_timeout_multiplier"}
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(test_type, run_info_data, config, **kwargs):
return {"binary": kwargs["binary"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
from selenium.webdriver import DesiredCapabilities
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, run_info_data, **kwargs)
executor_kwargs["close_after_done"] = True
capabilities = dict(DesiredCapabilities.OPERA.items())
capabilities.setdefault("operaOptions", {})["prefs"] = {
"profile": {
"default_content_setting_values": {
"popups": 1
}
}
}
for (kwarg, capability) in [("binary", "binary"), ("binary_args", "args")]:
if kwargs[kwarg] is not None:
capabilities["operaOptions"][capability] = kwargs[kwarg]
if test_type == "testharness":
capabilities["operaOptions"]["useAutomationExtension"] = False
capabilities["operaOptions"]["excludeSwitches"] = ["enable-automation"]
if test_type == "wdspec":
capabilities["operaOptions"]["w3c"] = True
executor_kwargs["capabilities"] = capabilities
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
return {}
class OperaBrowser(Browser):
"""Opera is backed by operadriver, which is supplied through
``wptrunner.webdriver.OperaDriverServer``.
"""
def __init__(self, logger, binary, webdriver_binary="operadriver",
webdriver_args=None):
"""Creates a new representation of Opera. The `binary` argument gives
the browser binary to use for testing."""
Browser.__init__(self, logger)
self.binary = binary
self.server = OperaDriverServer(self.logger,
binary=webdriver_binary,
args=webdriver_args)
def start(self, **kwargs):
self.server.start(block=False)
def stop(self, force=False):
self.server.stop(force=force)
def pid(self):
return self.server.pid
def is_alive(self):
# TODO(ato): This only indicates the driver is alive,
# and doesn't say anything about whether a browser session
# is active.
return self.server.is_alive()
def cleanup(self):
self.stop()
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.server.url}
| mpl-2.0 |
saumishr/django | django/template/context.py | 80 | 6146 | from copy import copy
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
# Cache of actual callables.
_standard_context_processors = None
# We need the CSRF processor no matter what the user has in their settings,
# because otherwise it is a security vulnerability, and we can't afford to leave
# this to human error or failure to read migration instructions.
_builtin_context_processors = ('django.core.context_processors.csrf',)
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class BaseContext(object):
def __init__(self, dict_=None):
self._reset_dicts(dict_)
def _reset_dicts(self, value=None):
self.dicts = [value or {}]
def __copy__(self):
duplicate = copy(super(BaseContext, self))
duplicate.dicts = self.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self):
d = {}
self.dicts.append(d)
return d
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
def new(self, values=None):
"""
Returns a new context with the same properties, but with only the
values given in 'values' stored.
"""
new_context = copy(self)
new_context._reset_dicts(values)
return new_context
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True, current_app=None,
use_l10n=None, use_tz=None):
self.autoescape = autoescape
self.current_app = current_app
self.use_l10n = use_l10n
self.use_tz = use_tz
self.render_context = RenderContext()
super(Context, self).__init__(dict_)
def __copy__(self):
duplicate = super(Context, self).__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
self.dicts.append(other_dict)
return other_dict
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
d = self.dicts[-1]
if key in d:
return d[key]
return otherwise
# This is a function rather than module-level procedural code because we only
# want it to execute if somebody uses RequestContext.
def get_standard_processors():
from django.conf import settings
global _standard_context_processors
if _standard_context_processors is None:
processors = []
collect = []
collect.extend(_builtin_context_processors)
collect.extend(settings.TEMPLATE_CONTEXT_PROCESSORS)
for path in collect:
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing request processor module %s: "%s"' % (module, e))
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" callable request processor' % (module, attr))
processors.append(func)
_standard_context_processors = tuple(processors)
return _standard_context_processors
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in TEMPLATE_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict_=None, processors=None, current_app=None,
use_l10n=None, use_tz=None):
Context.__init__(self, dict_, current_app=current_app,
use_l10n=use_l10n, use_tz=use_tz)
if processors is None:
processors = ()
else:
processors = tuple(processors)
for processor in get_standard_processors() + processors:
self.update(processor(request))
| bsd-3-clause |
rooi/CouchPotatoServer | couchpotato/core/plugins/trailer/main.py | 8 | 1661 | from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import getExt, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
import os
log = CPLog(__name__)
class Trailer(Plugin):
def __init__(self):
addEvent('renamer.after', self.searchSingle)
def searchSingle(self, message = None, group = None):
if not group: group = {}
if self.isDisabled() or len(group['files']['trailer']) > 0: return
trailers = fireEvent('trailer.search', group = group, merge = True)
if not trailers or trailers == []:
log.info('No trailers found for: %s', getTitle(group['library']))
return False
for trailer in trailers.get(self.conf('quality'), []):
ext = getExt(trailer)
filename = self.conf('name').replace('<filename>', group['filename']) + ('.%s' % ('mp4' if len(ext) > 5 else ext))
destination = os.path.join(group['destination_dir'], filename)
if not os.path.isfile(destination):
trailer_file = fireEvent('file.download', url = trailer, dest = destination, urlopen_kwargs = {'headers': {'User-Agent': 'Quicktime'}}, single = True)
if os.path.getsize(trailer_file) < (1024 * 1024): # Don't trust small trailers (1MB), try next one
os.unlink(trailer_file)
continue
else:
log.debug('Trailer already exists: %s', destination)
group['renamed_files'].append(destination)
# Download first and break
break
return True
| gpl-3.0 |
blueburningcoder/pybrain | pybrain/supervised/knn/lsh/nearoptimal.py | 25 | 6466 |
"""This module provides functionality for locality sensitive hashing in high
dimensional euclidean spaces.
It is based on the work of Andoni and Indyk, 'Near-Optimal Hashing Algorithms
for Approximate Nearest Neighbor in High Dimensions'."""
__author__ = 'Justin Bayer, [email protected]'
import logging
from collections import defaultdict
from heapq import nlargest
from math import sqrt, log, ceil
from scipy import array, dot, random, ones
try:
# Python 2
from scipy import weave
except ImportError:
# Python 3
pass
class MultiDimHash(object):
"""Class that represents a datastructure that enables nearest neighbours
search and methods to do so."""
# If the dimension of a dataset is bigger than this bound, the
# dimensionality will be reduced by a random projection into 24dimensional
# space
lowerDimensionBound = 24
def _getRadius(self):
return self._radius
def _setRadius(self, value):
self._radius = abs(value)
self.radiusSquared = value ** 2
radius = property(_getRadius, _setRadius)
def __init__(self, dim, omega=4, prob=0.8):
"""Create a hash for arrays of dimension dim.
The hyperspace will be split into hypercubes with a sidelength of
omega * sqrt(sqrt(dim)), that is omega * radius.
Every point in the dim-dimensional euclidean space will be hashed to
its correct bucket with a probability of prob.
"""
message = ("Creating Hash with %i dimensions, sidelength %.2f and " +
"cNN-probability %.2f") % (dim, omega, prob)
logging.debug(message)
self.dim = dim
self.omega = omega
self.prob = prob
self.radius = sqrt(sqrt(min(dim, self.lowerDimensionBound)))
logging.debug("Radius set to %.2f" % self.radius)
self._initializeGrids()
self._initializeProjection()
self.balls = defaultdict(lambda: [])
def _findAmountOfGrids(self):
w = self.radius
omega = self.omega
d = self.dim
prob = self.prob
N = ((omega * w) / (w / sqrt(d))) ** d
result = int(ceil(log((1 - prob) / N, 1 - 1 / N)))
logging.debug("Number of grids: %i" % result)
return result
def _initializeGrids(self):
offset = self.omega * self.radius
radius_offset = ones(self.dim) * self.radius
self.gridBalls = random.random((self._findAmountOfGrids(), self.dim))
self.gridBalls *= offset
self.gridBalls += radius_offset
def _initializeProjection(self):
if self.dim <= self.lowerDimensionBound:
# We only need to reduce the dimension if it's bigger than
# lowerDimensionBound; otherwise, chose identity
self.projection = 1
else:
projection_shape = self.dim, self.lowerDimensionBound
self.projection = random.standard_normal(projection_shape)
self.projection /= sqrt(self.lowerDimensionBound)
def _findHypercube(self, point):
"""Return where a point lies in what hypercube.
The result is a pair of two arrays. The first array is an array of
integers that indicate the multidimensional index of the hypercube it
is in. The second array is an array of floats, specifying the
coordinates of the point in that hypercube.
"""
offset = self.omega * self.radius
divmods = (divmod(p, offset) for p in point)
hypercube_indices, relative_point = [], []
for index, rest in divmods:
hypercube_indices.append(index)
relative_point.append(rest)
return array(hypercube_indices, dtype=int), array(relative_point)
def _findLocalBall_noinline(self, point):
"""Return the index of the ball that the point lies in."""
for i, ball in enumerate(self.gridBalls):
distance = point - ball
if dot(distance.T, distance) <= self.radiusSquared:
return i
def _findLocalBall_inline(self, point):
"""Return the index of the ball that the point lies in."""
balls = self.gridBalls
nBalls, dim = balls.shape #@UnusedVariable
radiusSquared = self.radiusSquared #@UnusedVariable
code = """
#line 121 "nearoptimal.py"
return_val = -1;
for (long i = 0; i < nBalls; i++)
{
double distance = 0.0;
for (long j = 0; j < dim; j++)
{
double diff = balls(i, j) - point(j);
distance += diff * diff;
}
if (distance <= radiusSquared) {
return_val = i;
break;
}
}
"""
variables = 'point', 'balls', 'nBalls', 'dim', 'radiusSquared',
result = weave.inline(
code,
variables,
type_converters=weave.converters.blitz,
compiler='gcc')
return result if result != -1 else None
_findLocalBall = _findLocalBall_noinline
def findBall(self, point):
hypercube_index, relative_point = self._findHypercube(point)
ball_index = self._findLocalBall(relative_point)
return tuple(hypercube_index), ball_index
def insert(self, point, satellite):
"""Put a point and its satellite information into the hash structure.
"""
point = dot(self.projection, point)
index = self.findBall(point)
self.balls[index].append((point, satellite))
def _findKnnCandidates(self, point):
"""Return a set of candidates that might be nearest neighbours of a
query point."""
index = self.findBall(point)
logging.debug("Found %i candidates for cNN" % len(self.balls[index]))
return self.balls[index]
def knn(self, point, k):
"""Return the k approximate nearest neighbours of the item in the
current hash.
Mind that the probabilistic nature of the data structure might not
return a nearest neighbor at all and not the nearest neighbour."""
candidates = self._findKnnCandidates(point)
def sortKey(xxx_todo_changeme):
(point_, satellite_) = xxx_todo_changeme
distance = point - point_
return - dot(distance.T, distance)
return nlargest(k, candidates, key=sortKey)
| bsd-3-clause |
roadmapper/ansible | lib/ansible/modules/storage/netapp/_na_ontap_gather_facts.py | 21 | 21721 | #!/usr/bin/python
# (c) 2018 Piotr Olczak <[email protected]>
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_gather_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(na_ontap_info) instead.
author: Piotr Olczak (@dprts) <[email protected]>
extends_documentation_fragment:
- netapp.na_ontap
short_description: NetApp information gatherer
description:
- This module allows you to gather various information about ONTAP configuration
version_added: "2.7"
requirements:
- netapp_lib
options:
state:
description:
- Returns "info"
default: "info"
choices: ['info']
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
"aggregate_info", "cluster_node_info", "igroup_info", "lun_info", "net_dns_info",
"net_ifgrp_info",
"net_interface_info", "net_port_info", "nvme_info", "nvme_interface_info",
"nvme_namespace_info", "nvme_subsystem_info", "ontap_version",
"qos_adaptive_policy_info", "qos_policy_info", "security_key_manager_key_info",
"security_login_account_info", "storage_failover_info", "volume_info",
"vserver_info", "vserver_login_banner_info", "vserver_motd_info", "vserver_nfs_info"
Can specify a list of values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
- nvme is supported with ONTAP 9.4 onwards.
- use "help" to get a list of supported facts for your system.
default: "all"
version_added: 2.8
'''
EXAMPLES = '''
- name: Get NetApp info (Password Authentication)
na_ontap_gather_facts:
state: info
hostname: "na-vsim"
username: "admin"
password: "admins_password"
- debug:
var: ontap_facts
- name: Limit Fact Gathering to Aggregate Information
na_ontap_gather_facts:
state: info
hostname: "na-vsim"
username: "admin"
password: "admins_password"
gather_subset: "aggregate_info"
- name: Limit Fact Gathering to Volume and Lun Information
na_ontap_gather_facts:
state: info
hostname: "na-vsim"
username: "admin"
password: "admins_password"
gather_subset:
- volume_info
- lun_info
- name: Gather all facts except for volume and lun information
na_ontap_gather_facts:
state: info
hostname: "na-vsim"
username: "admin"
password: "admins_password"
gather_subset:
- "!volume_info"
- "!lun_info"
'''
RETURN = '''
ontap_facts:
description: Returns various information about NetApp cluster configuration
returned: always
type: dict
sample: '{
"ontap_facts": {
"aggregate_info": {...},
"cluster_node_info": {...},
"net_dns_info": {...},
"net_ifgrp_info": {...},
"net_interface_info": {...},
"net_port_info": {...},
"security_key_manager_key_info": {...},
"security_login_account_info": {...},
"volume_info": {...},
"lun_info": {...},
"storage_failover_info": {...},
"vserver_login_banner_info": {...},
"vserver_motd_info": {...},
"vserver_info": {...},
"vserver_nfs_info": {...},
"ontap_version": {...},
"igroup_info": {...},
"qos_policy_info": {...},
"qos_adaptive_policy_info": {...}
}'
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
try:
import xmltodict
HAS_XMLTODICT = True
except ImportError:
HAS_XMLTODICT = False
try:
import json
HAS_JSON = True
except ImportError:
HAS_JSON = False
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPGatherFacts(object):
'''Class with gather facts methods'''
def __init__(self, module):
self.module = module
self.netapp_info = dict()
# thanks to coreywan (https://github.com/ansible/ansible/pull/47016)
# for starting this
# min_version identifies the ontapi version which supports this ZAPI
# use 0 if it is supported since 9.1
self.fact_subsets = {
'net_dns_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'net-dns-get-iter',
'attribute': 'net-dns-info',
'field': 'vserver-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'net_interface_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'net-interface-get-iter',
'attribute': 'net-interface-info',
'field': 'interface-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'net_port_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'net-port-get-iter',
'attribute': 'net-port-info',
'field': ('node', 'port'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'cluster_node_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'cluster-node-get-iter',
'attribute': 'cluster-node-info',
'field': 'node-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'security_login_account_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'security-login-get-iter',
'attribute': 'security-login-account-info',
'field': ('vserver', 'user-name', 'application', 'authentication-method'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'aggregate_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'aggr-get-iter',
'attribute': 'aggr-attributes',
'field': 'aggregate-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'volume_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'volume-get-iter',
'attribute': 'volume-attributes',
'field': ('name', 'owning-vserver-name'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'lun_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'lun-get-iter',
'attribute': 'lun-info',
'field': 'path',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'storage_failover_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'cf-get-iter',
'attribute': 'storage-failover-info',
'field': 'node',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'vserver_motd_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'vserver-motd-get-iter',
'attribute': 'vserver-motd-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'vserver_login_banner_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'vserver-login-banner-get-iter',
'attribute': 'vserver-login-banner-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'security_key_manager_key_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'security-key-manager-key-get-iter',
'attribute': 'security-key-manager-key-info',
'field': ('node', 'key-id'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'vserver_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'vserver-get-iter',
'attribute': 'vserver-info',
'field': 'vserver-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'vserver_nfs_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nfs-service-get-iter',
'attribute': 'nfs-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'net_ifgrp_info': {
'method': self.get_ifgrp_info,
'kwargs': {},
'min_version': '0',
},
'ontap_version': {
'method': self.ontapi,
'kwargs': {},
'min_version': '0',
},
'system_node_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'system-node-get-iter',
'attribute': 'node-details-info',
'field': 'node',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'igroup_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'igroup-get-iter',
'attribute': 'initiator-group-info',
'field': ('vserver', 'initiator-group-name'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'qos_policy_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'qos-policy-group-get-iter',
'attribute': 'qos-policy-group-info',
'field': 'policy-group',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
# supported in ONTAP 9.3 and onwards
'qos_adaptive_policy_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'qos-adaptive-policy-group-get-iter',
'attribute': 'qos-adaptive-policy-group-info',
'field': 'policy-group',
'query': {'max-records': '1024'},
},
'min_version': '130',
},
# supported in ONTAP 9.4 and onwards
'nvme_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nvme-get-iter',
'attribute': 'nvme-target-service-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '140',
},
'nvme_interface_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nvme-interface-get-iter',
'attribute': 'nvme-interface-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '140',
},
'nvme_subsystem_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nvme-subsystem-get-iter',
'attribute': 'nvme-subsystem-info',
'field': 'subsystem',
'query': {'max-records': '1024'},
},
'min_version': '140',
},
'nvme_namespace_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nvme-namespace-get-iter',
'attribute': 'nvme-namespace-info',
'field': 'path',
'query': {'max-records': '1024'},
},
'min_version': '140',
},
}
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def ontapi(self):
'''Method to get ontapi version'''
api = 'system-get-ontapi-version'
api_call = netapp_utils.zapi.NaElement(api)
try:
results = self.server.invoke_successfully(api_call, enable_tunneling=False)
ontapi_version = results.get_child_content('minor-version')
return ontapi_version if ontapi_version is not None else '0'
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error calling API %s: %s" %
(api, to_native(error)), exception=traceback.format_exc())
def call_api(self, call, query=None):
'''Main method to run an API call'''
api_call = netapp_utils.zapi.NaElement(call)
result = None
if query:
for key, val in query.items():
# Can val be nested?
api_call.add_new_child(key, val)
try:
result = self.server.invoke_successfully(api_call, enable_tunneling=False)
return result
except netapp_utils.zapi.NaApiError as error:
if call in ['security-key-manager-key-get-iter']:
return result
else:
self.module.fail_json(msg="Error calling API %s: %s"
% (call, to_native(error)), exception=traceback.format_exc())
def get_ifgrp_info(self):
'''Method to get network port ifgroups info'''
try:
net_port_info = self.netapp_info['net_port_info']
except KeyError:
net_port_info_calls = self.fact_subsets['net_port_info']
net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs'])
interfaces = net_port_info.keys()
ifgrps = []
for ifn in interfaces:
if net_port_info[ifn]['port_type'] == 'if_group':
ifgrps.append(ifn)
net_ifgrp_info = dict()
for ifgrp in ifgrps:
query = dict()
query['node'], query['ifgrp-name'] = ifgrp.split(':')
tmp = self.get_generic_get_iter('net-port-ifgrp-get', field=('node', 'ifgrp-name'),
attribute='net-ifgrp-info', query=query)
net_ifgrp_info = net_ifgrp_info.copy()
net_ifgrp_info.update(tmp)
return net_ifgrp_info
def get_generic_get_iter(self, call, attribute=None, field=None, query=None):
'''Method to run a generic get-iter call'''
generic_call = self.call_api(call, query)
if call == 'net-port-ifgrp-get':
children = 'attributes'
else:
children = 'attributes-list'
if generic_call is None:
return None
if field is None:
out = []
else:
out = {}
attributes_list = generic_call.get_child_by_name(children)
if attributes_list is None:
return None
for child in attributes_list.get_children():
dic = xmltodict.parse(child.to_string(), xml_attribs=False)
if attribute is not None:
dic = dic[attribute]
if isinstance(field, str):
unique_key = _finditem(dic, field)
out = out.copy()
out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
elif isinstance(field, tuple):
unique_key = ':'.join([_finditem(dic, el) for el in field])
out = out.copy()
out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
else:
out.append(convert_keys(json.loads(json.dumps(dic))))
return out
def get_all(self, gather_subset):
'''Method to get all subsets'''
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_gather_facts", cserver)
self.netapp_info['ontap_version'] = self.ontapi()
run_subset = self.get_subset(gather_subset, self.netapp_info['ontap_version'])
if 'help' in gather_subset:
self.netapp_info['help'] = sorted(run_subset)
else:
for subset in run_subset:
call = self.fact_subsets[subset]
self.netapp_info[subset] = call['method'](**call['kwargs'])
return self.netapp_info
def get_subset(self, gather_subset, version):
'''Method to get a single subset'''
runable_subsets = set()
exclude_subsets = set()
usable_subsets = [key for key in self.fact_subsets.keys() if version >= self.fact_subsets[key]['min_version']]
if 'help' in gather_subset:
return usable_subsets
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(usable_subsets)
return runable_subsets
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
return set()
exclude = True
else:
exclude = False
if subset not in usable_subsets:
if subset not in self.fact_subsets.keys():
self.module.fail_json(msg='Bad subset: %s' % subset)
self.module.fail_json(msg='Remote system at version %s does not support %s' %
(version, subset))
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(usable_subsets)
runable_subsets.difference_update(exclude_subsets)
return runable_subsets
# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary
def __finditem(obj, key):
if key in obj:
return obj[key]
for dummy, val in obj.items():
if isinstance(val, dict):
item = __finditem(val, key)
if item is not None:
return item
return None
def _finditem(obj, key):
value = __finditem(obj, key)
if value is not None:
return value
raise KeyError(key)
def convert_keys(d_param):
'''Method to convert hyphen to underscore'''
out = {}
if isinstance(d_param, dict):
for key, val in d_param.items():
val = convert_keys(val)
out[key.replace('-', '_')] = val
else:
return d_param
return out
def main():
'''Execute action'''
argument_spec = netapp_utils.na_ontap_host_argument_spec()
argument_spec.update(dict(
state=dict(default='info', choices=['info']),
gather_subset=dict(default=['all'], type='list'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_XMLTODICT:
module.fail_json(msg="xmltodict missing")
if not HAS_JSON:
module.fail_json(msg="json missing")
state = module.params['state']
gather_subset = module.params['gather_subset']
if gather_subset is None:
gather_subset = ['all']
gf_obj = NetAppONTAPGatherFacts(module)
gf_all = gf_obj.get_all(gather_subset)
result = {'state': state, 'changed': False}
module.exit_json(ansible_facts={'ontap_facts': gf_all}, **result)
if __name__ == '__main__':
main()
| gpl-3.0 |
calamityman/ansible-modules-extras | cloud/amazon/ec2_vpc_vgw.py | 43 | 20238 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: ec2_vpc_vgw
short_description: Create and delete AWS VPN Virtual Gateways.
description:
- Creates AWS VPN Virtual Gateways
- Deletes AWS VPN Virtual Gateways
- Attaches Virtual Gateways to VPCs
- Detaches Virtual Gateways from VPCs
version_added: "2.2"
requirements: [ boto3 ]
options:
state:
description:
- present to ensure resource is created.
- absent to remove resource
required: false
default: present
choices: [ "present", "absent"]
name:
description:
- name of the vgw to be created or deleted
required: false
type:
description:
- type of the virtual gateway to be created
required: false
choices: [ "ipsec.1" ]
vpn_gateway_id:
description:
- vpn gateway id of an existing virtual gateway
required: false
vpc_id:
description:
- the vpc-id of a vpc to attach or detach
required: false
wait_timeout:
description:
- number of seconds to wait for status during vpc attach and detach
required: false
default: 320
tags:
description:
- dictionary of resource tags
required: false
default: null
aliases: [ "resource_tags" ]
author: Nick Aslanidis (@naslanidis)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Create a new vgw attached to a specific VPC
ec2_vpc_vgw:
state: present
region: ap-southeast-2
profile: personal
vpc_id: vpc-12345678
name: personal-testing
type: ipsec.1
register: created_vgw
- name: Create a new unattached vgw
ec2_vpc_vgw:
state: present
region: ap-southeast-2
profile: personal
name: personal-testing
type: ipsec.1
tags:
environment: production
owner: ABC
register: created_vgw
- name: Remove a new vgw using the name
ec2_vpc_vgw:
state: absent
region: ap-southeast-2
profile: personal
name: personal-testing
type: ipsec.1
register: deleted_vgw
- name: Remove a new vgw using the vpn_gateway_id
ec2_vpc_vgw:
state: absent
region: ap-southeast-2
profile: personal
vpn_gateway_id: vgw-3a9aa123
register: deleted_vgw
'''
RETURN = '''
result:
description: The result of the create, or delete action.
returned: success
type: dictionary
'''
try:
import json
import time
import botocore
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def get_vgw_info(vgws):
if not isinstance(vgws, list):
return
for vgw in vgws:
vgw_info = {
'id': vgw['VpnGatewayId'],
'type': vgw['Type'],
'state': vgw['State'],
'vpc_id': None,
'tags': dict()
}
for tag in vgw['Tags']:
vgw_info['tags'][tag['Key']] = tag['Value']
if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached':
vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId']
return vgw_info
def wait_for_status(client, module, vpn_gateway_id, status):
polling_increment_secs = 15
max_retries = (module.params.get('wait_timeout') / polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
try:
response = find_vgw(client, module, vpn_gateway_id)
if response[0]['VpcAttachments'][0]['State'] == status:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return status_achieved, result
def attach_vgw(client, module, vpn_gateway_id):
params = dict()
params['VpcId'] = module.params.get('vpc_id')
try:
response = client.attach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached')
if not status_achieved:
module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console')
result = response
return result
def detach_vgw(client, module, vpn_gateway_id, vpc_id=None):
params = dict()
params['VpcId'] = module.params.get('vpc_id')
if vpc_id:
try:
response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
else:
try:
response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached')
if not status_achieved:
module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console')
result = response
return result
def create_vgw(client, module):
params = dict()
params['Type'] = module.params.get('type')
try:
response = client.create_vpn_gateway(Type=params['Type'])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def delete_vgw(client, module, vpn_gateway_id):
try:
response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
#return the deleted VpnGatewayId as this is not included in the above response
result = vpn_gateway_id
return result
def create_tags(client, module, vpn_gateway_id):
params = dict()
try:
response = client.create_tags(Resources=[vpn_gateway_id],Tags=load_tags(module))
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def delete_tags(client, module, vpn_gateway_id, tags_to_delete=None):
params = dict()
if tags_to_delete:
try:
response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
else:
try:
response = client.delete_tags(Resources=[vpn_gateway_id])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def load_tags(module):
tags = []
if module.params.get('tags'):
for name, value in module.params.get('tags').iteritems():
tags.append({'Key': name, 'Value': str(value)})
tags.append({'Key': "Name", 'Value': module.params.get('name')})
else:
tags.append({'Key': "Name", 'Value': module.params.get('name')})
return tags
def find_tags(client, module, resource_id=None):
if resource_id:
try:
response = client.describe_tags(Filters=[
{'Name': 'resource-id', 'Values': [resource_id]}
])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def check_tags(client, module, existing_vgw, vpn_gateway_id):
params = dict()
params['Tags'] = module.params.get('tags')
vgw = existing_vgw
changed = False
tags_list = {}
#format tags for comparison
for tags in existing_vgw[0]['Tags']:
if tags['Key'] != 'Name':
tags_list[tags['Key']] = tags['Value']
# if existing tags don't match the tags arg, delete existing and recreate with new list
if params['Tags'] != None and tags_list != params['Tags']:
delete_tags(client, module, vpn_gateway_id)
create_tags(client, module, vpn_gateway_id)
vgw = find_vgw(client, module)
changed = True
#if no tag args are supplied, delete any existing tags with the exception of the name tag
if params['Tags'] == None and tags_list != {}:
tags_to_delete = []
for tags in existing_vgw[0]['Tags']:
if tags['Key'] != 'Name':
tags_to_delete.append(tags)
delete_tags(client, module, vpn_gateway_id, tags_to_delete)
vgw = find_vgw(client, module)
changed = True
return vgw, changed
def find_vpc(client, module):
params = dict()
params['vpc_id'] = module.params.get('vpc_id')
if params['vpc_id']:
try:
response = client.describe_vpcs(VpcIds=[params['vpc_id']])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def find_vgw(client, module, vpn_gateway_id=None):
params = dict()
params['Name'] = module.params.get('name')
params['Type'] = module.params.get('type')
params['State'] = module.params.get('state')
if params['State'] == 'present':
try:
response = client.describe_vpn_gateways(Filters=[
{'Name': 'type', 'Values': [params['Type']]},
{'Name': 'tag:Name', 'Values': [params['Name']]}
])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
else:
if vpn_gateway_id:
try:
response = client.describe_vpn_gateways(VpnGatewayIds=vpn_gateway_id)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
else:
try:
response = client.describe_vpn_gateways(Filters=[
{'Name': 'type', 'Values': [params['Type']]},
{'Name': 'tag:Name', 'Values': [params['Name']]}
])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response['VpnGateways']
return result
def ensure_vgw_present(client, module):
# If an existing vgw name and type matches our args, then a match is considered to have been
# found and we will not create another vgw.
changed = False
params = dict()
result = dict()
params['Name'] = module.params.get('name')
params['VpcId'] = module.params.get('vpc_id')
params['Type'] = module.params.get('type')
params['Tags'] = module.params.get('tags')
params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
# Check that a name argument has been supplied.
if not module.params.get('name'):
module.fail_json(msg='A name is required when a status of \'present\' is suppled')
# check if a gateway matching our module args already exists
existing_vgw = find_vgw(client, module)
if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
vgw, changed = check_tags(client, module, existing_vgw, vpn_gateway_id)
# if a vpc_id was provided, check if it exists and if it's attached
if params['VpcId']:
# check that the vpc_id exists. If not, an exception is thrown
vpc = find_vpc(client, module)
current_vpc_attachments = existing_vgw[0]['VpcAttachments']
if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached':
if current_vpc_attachments[0]['VpcId'] == params['VpcId'] and current_vpc_attachments[0]['State'] == 'attached':
changed = False
else:
# detach the existing vpc from the virtual gateway
vpc_to_detach = current_vpc_attachments[0]['VpcId']
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
time.sleep(5)
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
vgw = find_vgw(client, module, [vpn_gateway_id])
changed = True
else:
# attach the vgw to the supplied vpc
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
vgw = find_vgw(client, module, [vpn_gateway_id])
changed = True
# if params['VpcId'] is not provided, check the vgw is attached to a vpc. if so, detach it.
else:
existing_vgw = find_vgw(client, module, [vpn_gateway_id])
if existing_vgw[0]['VpcAttachments'] != []:
if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
# detach the vpc from the vgw
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
changed = True
vgw = find_vgw(client, module, [vpn_gateway_id])
else:
# create a new vgw
new_vgw = create_vgw(client, module)
changed = True
vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId']
# tag the new virtual gateway
create_tags(client, module, vpn_gateway_id)
# return current state of the vgw
vgw = find_vgw(client, module, [vpn_gateway_id])
# if a vpc-id was supplied, attempt to attach it to the vgw
if params['VpcId']:
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
changed = True
vgw = find_vgw(client, module, [vpn_gateway_id])
result = get_vgw_info(vgw)
return changed, result
def ensure_vgw_absent(client, module):
# If an existing vgw name and type matches our args, then a match is considered to have been
# found and we will take steps to delete it.
changed = False
params = dict()
result = dict()
params['Name'] = module.params.get('name')
params['VpcId'] = module.params.get('vpc_id')
params['Type'] = module.params.get('type')
params['Tags'] = module.params.get('tags')
params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
# check if a gateway matching our module args already exists
if params['VpnGatewayIds']:
existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']])
if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted':
existing_vgw = existing_vgw_with_id
if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
if params['VpcId']:
if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
else:
# detach the vpc from the vgw
detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId'])
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
changed = True
else:
# attempt to detach any attached vpcs
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach)
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
changed = True
else:
# no vpc's are attached so attempt to delete the vgw
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
changed = True
else:
changed = False
deleted_vgw = "Nothing to do"
else:
#Check that a name and type argument has been supplied if no vgw-id
if not module.params.get('name') or not module.params.get('type'):
module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is suppled')
existing_vgw = find_vgw(client, module)
if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
if params['VpcId']:
if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
else:
# detach the vpc from the vgw
detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
#now that the vpc has been detached, delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
changed = True
else:
# attempt to detach any attached vpcs
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
changed = True
#now that the vpc has been detached, delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
else:
# no vpc's are attached so attempt to delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
changed = True
else:
changed = False
deleted_vgw = None
result = deleted_vgw
return changed, result
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent']),
region=dict(required=True),
name=dict(),
vpn_gateway_id=dict(),
vpc_id=dict(),
wait_timeout=dict(type='int', default=320),
type=dict(default='ipsec.1', choices=['ipsec.1']),
tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='json and boto3 is required.')
state = module.params.get('state').lower()
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError:
e = get_exception()
module.fail_json(msg="Can't authorize connection - "+str(e))
if state == 'present':
(changed, results) = ensure_vgw_present(client, module)
else:
(changed, results) = ensure_vgw_absent(client, module)
module.exit_json(changed=changed, vgw=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
sgerhart/ansible | lib/ansible/modules/web_infrastructure/django_manage.py | 9 | 11389 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Scott Anderson <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: django_manage
short_description: Manages a Django application.
description:
- Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all
management commands will be executed by the given I(virtualenv) installation.
version_added: "1.1"
options:
command:
choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ]
description:
- The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb,
test, and validate.
- Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run
with the I(--noinput) flag.
required: true
app_path:
description:
- The path to the root of the Django application where B(manage.py) lives.
required: true
settings:
description:
- The Python path to the application's settings module, such as 'myapp.settings'.
required: false
pythonpath:
description:
- A directory to add to the Python path. Typically used to include the settings module if it is located external to the application directory.
required: false
virtualenv:
description:
- An optional path to a I(virtualenv) installation to use while running the manage application.
aliases: [virtualenv]
apps:
description:
- A list of space-delimited apps to target. Used by the 'test' command.
required: false
cache_table:
description:
- The name of the table used for database-backed caching. Used by the 'createcachetable' command.
required: false
clear:
description:
- Clear the existing files before trying to copy or link the original file.
- Used only with the 'collectstatic' command. The C(--noinput) argument will be added automatically.
required: false
default: no
type: bool
database:
description:
- The database to target. Used by the 'createcachetable', 'flush', 'loaddata', and 'syncdb' commands.
required: false
failfast:
description:
- Fail the command immediately if a test fails. Used by the 'test' command.
required: false
default: "no"
type: bool
fixtures:
description:
- A space-delimited list of fixture file names to load in the database. B(Required) by the 'loaddata' command.
required: false
skip:
description:
- Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate)
required: false
version_added: "1.3"
merge:
description:
- Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command
required: false
version_added: "1.3"
link:
description:
- Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command
required: false
version_added: "1.3"
notes:
- I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified.
- This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location.
- This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately.
- To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings.
- To be able to use the collectstatic command, you must have enabled staticfiles in your settings.
- As of ansible 2.x, your I(manage.py) application must be executable (rwxr-xr-x), and must have a valid I(shebang), i.e. "#!/usr/bin/env python",
for invoking the appropriate Python interpreter.
requirements: [ "virtualenv", "django" ]
author: "Scott Anderson (@tastychutney)"
'''
EXAMPLES = """
# Run cleanup on the application installed in 'django_dir'.
- django_manage:
command: cleanup
app_path: "{{ django_dir }}"
# Load the initial_data fixture into the application
- django_manage:
command: loaddata
app_path: "{{ django_dir }}"
fixtures: "{{ initial_data }}"
# Run syncdb on the application
- django_manage:
command: syncdb
app_path: "{{ django_dir }}"
settings: "{{ settings_app_name }}"
pythonpath: "{{ settings_dir }}"
virtualenv: "{{ virtualenv_dir }}"
# Run the SmokeTest test case from the main app. Useful for testing deploys.
- django_manage:
command: test
app_path: "{{ django_dir }}"
apps: main.SmokeTest
# Create an initial superuser.
- django_manage:
command: "createsuperuser --noinput --username=admin [email protected]"
app_path: "{{ django_dir }}"
"""
import os
import sys
from ansible.module_utils.basic import AnsibleModule
def _fail(module, cmd, out, err, **kwargs):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg, **kwargs)
def _ensure_virtualenv(module):
venv_param = module.params['virtualenv']
if venv_param is None:
return
vbin = os.path.join(venv_param, 'bin')
activate = os.path.join(vbin, 'activate')
if not os.path.exists(activate):
virtualenv = module.get_bin_path('virtualenv', True)
vcmd = '%s %s' % (virtualenv, venv_param)
vcmd = [virtualenv, venv_param]
rc, out_venv, err_venv = module.run_command(vcmd)
if rc != 0:
_fail(module, vcmd, out_venv, err_venv)
os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
os.environ["VIRTUAL_ENV"] = venv_param
def createcachetable_filter_output(line):
return "Already exists" not in line
def flush_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def loaddata_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def syncdb_filter_output(line):
return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line)
def migrate_filter_output(line):
return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line)
def collectstatic_filter_output(line):
return line and "0 static files" not in line
def main():
command_allowed_param_map = dict(
cleanup=(),
createcachetable=('cache_table', 'database', ),
flush=('database', ),
loaddata=('database', 'fixtures', ),
syncdb=('database', ),
test=('failfast', 'testrunner', 'liveserver', 'apps', ),
validate=(),
migrate=('apps', 'skip', 'merge', 'database',),
collectstatic=('clear', 'link', ),
)
command_required_param_map = dict(
loaddata=('fixtures', ),
)
# forces --noinput on every command that needs it
noinput_commands = (
'flush',
'syncdb',
'migrate',
'test',
'collectstatic',
)
# These params are allowed for certain commands only
specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner')
# These params are automatically added to the command if present
general_params = ('settings', 'pythonpath', 'database',)
specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
end_of_command_params = ('apps', 'cache_table', 'fixtures')
module = AnsibleModule(
argument_spec=dict(
command=dict(default=None, required=True),
app_path=dict(default=None, required=True, type='path'),
settings=dict(default=None, required=False),
pythonpath=dict(default=None, required=False, aliases=['python_path']),
virtualenv=dict(default=None, required=False, type='path', aliases=['virtual_env']),
apps=dict(default=None, required=False),
cache_table=dict(default=None, required=False),
clear=dict(default=None, required=False, type='bool'),
database=dict(default=None, required=False),
failfast=dict(default='no', required=False, type='bool', aliases=['fail_fast']),
fixtures=dict(default=None, required=False),
liveserver=dict(default=None, required=False, aliases=['live_server']),
testrunner=dict(default=None, required=False, aliases=['test_runner']),
skip=dict(default=None, required=False, type='bool'),
merge=dict(default=None, required=False, type='bool'),
link=dict(default=None, required=False, type='bool'),
),
)
command = module.params['command']
app_path = module.params['app_path']
virtualenv = module.params['virtualenv']
for param in specific_params:
value = module.params[param]
if param in specific_boolean_params:
value = module.boolean(value)
if value and param not in command_allowed_param_map[command]:
module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
for param in command_required_param_map.get(command, ()):
if not module.params[param]:
module.fail_json(msg='%s param is required for command=%s' % (param, command))
_ensure_virtualenv(module)
cmd = "./manage.py %s" % (command, )
if command in noinput_commands:
cmd = '%s --noinput' % cmd
for param in general_params:
if module.params[param]:
cmd = '%s --%s=%s' % (cmd, param, module.params[param])
for param in specific_boolean_params:
if module.boolean(module.params[param]):
cmd = '%s --%s' % (cmd, param)
# these params always get tacked on the end of the command
for param in end_of_command_params:
if module.params[param]:
cmd = '%s %s' % (cmd, module.params[param])
rc, out, err = module.run_command(cmd, cwd=app_path)
if rc != 0:
if command == 'createcachetable' and 'table' in err and 'already exists' in err:
out = 'Already exists.'
else:
if "Unknown command:" in err:
_fail(module, cmd, err, "Unknown django command: %s" % command)
_fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
changed = False
lines = out.split('\n')
filt = globals().get(command + "_filter_output", None)
if filt:
filtered_output = list(filter(filt, lines))
if len(filtered_output):
changed = True
module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv,
settings=module.params['settings'], pythonpath=module.params['pythonpath'])
if __name__ == '__main__':
main()
| mit |
40223119/2015w13 | static/Brython3.1.0-20150301-090019/Lib/sys.py | 109 | 4959 | # hack to return special attributes
from _sys import *
from javascript import JSObject
has_local_storage=__BRYTHON__.has_local_storage
has_session_storage = __BRYTHON__.has_session_storage
has_json=__BRYTHON__.has_json
argv = ['__main__']
base_exec_prefix = __BRYTHON__.brython_path
base_prefix = __BRYTHON__.brython_path
builtin_module_names=__BRYTHON__.builtin_module_names
byteorder='little'
def exc_info():
exc = __BRYTHON__.exception_stack[-1]
return (exc.__class__,exc,exc.traceback)
exec_prefix = __BRYTHON__.brython_path
executable = __BRYTHON__.brython_path+'/brython.js'
def exit(i=None):
raise SystemExit('')
class flag_class:
def __init__(self):
self.debug=0
self.inspect=0
self.interactive=0
self.optimize=0
self.dont_write_bytecode=0
self.no_user_site=0
self.no_site=0
self.ignore_environment=0
self.verbose=0
self.bytes_warning=0
self.quiet=0
self.hash_randomization=1
flags=flag_class()
def getfilesystemencoding(*args,**kw):
"""getfilesystemencoding() -> string
Return the encoding used to convert Unicode filenames in
operating system filenames."""
return 'utf-8'
maxsize=2147483647
maxunicode=1114111
path = __BRYTHON__.path
#path_hooks = list(JSObject(__BRYTHON__.path_hooks))
meta_path=__BRYTHON__.meta_path
platform="brython"
prefix = __BRYTHON__.brython_path
version = '.'.join(str(x) for x in __BRYTHON__.version_info[:3])
version += " (default, %s) \n[Javascript 1.5] on Brython" % __BRYTHON__.compiled_date
hexversion = 0x03000000 # python 3.0
class __version_info(object):
def __init__(self, version_info):
self.version_info = version_info
self.major = version_info[0]
self.minor = version_info[1]
self.micro = version_info[2]
self.releaselevel = version_info[3]
self.serial = version_info[4]
def __getitem__(self, index):
if isinstance(self.version_info[index], list):
return tuple(self.version_info[index])
return self.version_info[index]
def hexversion(self):
try:
return '0%d0%d0%d' % (self.major, self.minor, self.micro)
finally: #probably some invalid char in minor (rc, etc)
return '0%d0000' % (self.major)
def __str__(self):
_s="sys.version(major=%d, minor=%d, micro=%d, releaselevel='%s', serial=%d)"
return _s % (self.major, self.minor, self.micro,
self.releaselevel, self.serial)
#return str(self.version_info)
def __eq__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) == other
raise Error("Error! I don't know how to compare!")
def __ge__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) >= other
raise Error("Error! I don't know how to compare!")
def __gt__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) > other
raise Error("Error! I don't know how to compare!")
def __le__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) <= other
raise Error("Error! I don't know how to compare!")
def __lt__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) < other
raise Error("Error! I don't know how to compare!")
def __ne__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) != other
raise Error("Error! I don't know how to compare!")
#eventually this needs to be the real python version such as 3.0, 3.1, etc
version_info=__version_info(__BRYTHON__.version_info)
class _implementation:
def __init__(self):
self.name='brython'
self.version = __version_info(__BRYTHON__.implementation)
self.hexversion = self.version.hexversion()
self.cache_tag=None
def __repr__(self):
return "namespace(name='%s' version=%s hexversion='%s')" % (self.name, self.version, self.hexversion)
def __str__(self):
return "namespace(name='%s' version=%s hexversion='%s')" % (self.name, self.version, self.hexversion)
implementation=_implementation()
class _hash_info:
def __init__(self):
self.width=32,
self.modulus=2147483647
self.inf=314159
self.nan=0
self.imag=1000003
self.algorithm='siphash24'
self.hash_bits=64
self.seed_bits=128
cutoff=0
def __repr(self):
#fix me
return "sys.hash_info(width=32, modulus=2147483647, inf=314159, nan=0, imag=1000003, algorithm='siphash24', hash_bits=64, seed_bits=128, cutoff=0)"
hash_info=_hash_info()
warnoptions=[]
def getfilesystemencoding():
return 'utf-8'
#delete objects not in python sys module namespace
del JSObject
del _implementation
| gpl-3.0 |
sserrot/champion_relationships | venv/Lib/site-packages/pythonwin/pywin/framework/editor/template.py | 7 | 1792 | import string
import win32ui
import win32api
from pywin.mfc import docview
import pywin.framework.window
import os
from . import frame
ParentEditorTemplate=docview.DocTemplate
class EditorTemplateBase(ParentEditorTemplate):
def __init__(self, res=win32ui.IDR_TEXTTYPE, makeDoc=None, makeFrame=None, makeView=None):
if makeFrame is None: makeFrame = frame.EditorFrame
ParentEditorTemplate.__init__(self, res, makeDoc, makeFrame, makeView)
def _CreateDocTemplate(self, resourceId):
assert 0, "You must override this"
def CreateWin32uiDocument(self):
assert 0, "You must override this"
def GetFileExtensions(self):
return ".txt", ".py"
def MatchDocType(self, fileName, fileType):
doc = self.FindOpenDocument(fileName)
if doc: return doc
ext = os.path.splitext(fileName)[1].lower()
if ext in self.GetFileExtensions():
return win32ui.CDocTemplate_Confidence_yesAttemptNative
return win32ui.CDocTemplate_Confidence_maybeAttemptForeign
def InitialUpdateFrame(self, frame, doc, makeVisible=1):
self._obj_.InitialUpdateFrame(frame, doc, makeVisible) # call default handler.
doc._UpdateUIForState()
def GetPythonPropertyPages(self):
"""Returns a list of property pages
"""
from . import configui
return [configui.EditorPropertyPage(), configui.EditorWhitespacePropertyPage()]
def OpenDocumentFile(self, filename, bMakeVisible = 1):
if filename is not None:
try:
path = os.path.split(filename)[0]
# print "The editor is translating", `filename`,"to",
filename = win32api.FindFiles(filename)[0][8]
filename = os.path.join(path, filename)
# print `filename`
except (win32api.error, IndexError) as details:
pass
# print "Couldnt get the full filename!", details
return self._obj_.OpenDocumentFile(filename, bMakeVisible)
| mit |
msrb/samba | python/examples/samr.py | 66 | 3870 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Unix SMB/CIFS implementation.
# Copyright © Jelmer Vernooij <[email protected]> 2008
#
# Based on samr.js © Andrew Tridgell <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
sys.path.insert(0, "bin/python")
from samba.dcerpc import samr, security
def display_lsa_string(str):
return str.string
def FillUserInfo(samr, dom_handle, users, level):
"""fill a user array with user information from samrQueryUserInfo"""
for i in range(len(users)):
user_handle = samr.OpenUser(handle, security.SEC_FLAG_MAXIMUM_ALLOWED, users[i].idx)
info = samr.QueryUserInfo(user_handle, level)
info.name = users[i].name
info.idx = users[i].idx
users[i] = info
samr.Close(user_handle)
def toArray((handle, array, num_entries)):
ret = []
for x in range(num_entries):
ret.append((array.entries[x].idx, array.entries[x].name))
return ret
def test_Connect(samr):
"""test the samr_Connect interface"""
print "Testing samr_Connect"
return samr.Connect2(None, security.SEC_FLAG_MAXIMUM_ALLOWED)
def test_LookupDomain(samr, handle, domain):
"""test the samr_LookupDomain interface"""
print "Testing samr_LookupDomain"
return samr.LookupDomain(handle, domain)
def test_OpenDomain(samr, handle, sid):
"""test the samr_OpenDomain interface"""
print "Testing samr_OpenDomain"
return samr.OpenDomain(handle, security.SEC_FLAG_MAXIMUM_ALLOWED, sid)
def test_EnumDomainUsers(samr, dom_handle):
"""test the samr_EnumDomainUsers interface"""
print "Testing samr_EnumDomainUsers"
users = toArray(samr.EnumDomainUsers(dom_handle, 0, 0, -1))
print "Found %d users" % len(users)
for idx, user in users:
print "\t%s\t(%d)" % (user.string, idx)
def test_EnumDomainGroups(samr, dom_handle):
"""test the samr_EnumDomainGroups interface"""
print "Testing samr_EnumDomainGroups"
groups = toArray(samr.EnumDomainGroups(dom_handle, 0, 0))
print "Found %d groups" % len(groups)
for idx, group in groups:
print "\t%s\t(%d)" % (group.string, idx)
def test_domain_ops(samr, dom_handle):
"""test domain specific ops"""
test_EnumDomainUsers(samr, dom_handle)
test_EnumDomainGroups(samr, dom_handle)
def test_EnumDomains(samr, handle):
"""test the samr_EnumDomains interface"""
print "Testing samr_EnumDomains"
domains = toArray(samr.EnumDomains(handle, 0, -1))
print "Found %d domains" % len(domains)
for idx, domain in domains:
print "\t%s (%d)" % (display_lsa_string(domain), idx)
for idx, domain in domains:
print "Testing domain %s" % display_lsa_string(domain)
sid = samr.LookupDomain(handle, domain)
dom_handle = test_OpenDomain(samr, handle, sid)
test_domain_ops(samr, dom_handle)
samr.Close(dom_handle)
if len(sys.argv) != 2:
print "Usage: samr.js <BINDING>"
sys.exit(1)
binding = sys.argv[1]
print "Connecting to %s" % binding
try:
samr = samr.samr(binding)
except Exception, e:
print "Failed to connect to %s: %s" % (binding, e.message)
sys.exit(1)
handle = test_Connect(samr)
test_EnumDomains(samr, handle)
samr.Close(handle)
print "All OK"
| gpl-3.0 |
cloudbase/neutron-virtualbox | neutron/plugins/embrane/common/constants.py | 47 | 2749 | # Copyright 2013 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heleosapi import exceptions as h_exc
from neutron.plugins.common import constants
# Router specific constants
UTIF_LIMIT = 7
QUEUE_TIMEOUT = 300
class Status(object):
# Transient
CREATING = constants.PENDING_CREATE
UPDATING = constants.PENDING_UPDATE
DELETING = constants.PENDING_DELETE
# Final
ACTIVE = constants.ACTIVE
ERROR = constants.ERROR
READY = constants.INACTIVE
DELETED = "DELETED" # not visible
class Events(object):
CREATE_ROUTER = "create_router"
UPDATE_ROUTER = "update_router"
DELETE_ROUTER = "delete_router"
GROW_ROUTER_IF = "grow_router_if"
SHRINK_ROUTER_IF = "shrink_router_if"
SET_NAT_RULE = "set_nat_rule"
RESET_NAT_RULE = "reset_nat_rule"
_DVA_PENDING_ERROR_MSG = _("Dva is pending for the following reason: %s")
_DVA_NOT_FOUNT_ERROR_MSG = _("Dva can't be found to execute the operation, "
"probably was cancelled through the heleos UI")
_DVA_BROKEN_ERROR_MSG = _("Dva seems to be broken for reason %s")
_DVA_BROKEN_INTERFACE_ERROR_MSG = _("Dva interface seems to be broken "
"for reason %s")
_DVA_CREATION_FAILED_ERROR_MSG = _("Dva creation failed reason %s")
_DVA_CREATION_PENDING_ERROR_MSG = _("Dva creation is in pending state "
"for reason %s")
_CFG_FAILED_ERROR_MSG = _("Dva configuration failed for reason %s")
_DVA_DEL_FAILED_ERROR_MSG = _("Failed to delete the backend "
"router for reason %s. Please remove "
"it manually through the heleos UI")
error_map = {h_exc.PendingDva: _DVA_PENDING_ERROR_MSG,
h_exc.DvaNotFound: _DVA_NOT_FOUNT_ERROR_MSG,
h_exc.BrokenDva: _DVA_BROKEN_ERROR_MSG,
h_exc.BrokenInterface: _DVA_BROKEN_INTERFACE_ERROR_MSG,
h_exc.DvaCreationFailed: _DVA_CREATION_FAILED_ERROR_MSG,
h_exc.DvaCreationPending: _DVA_CREATION_PENDING_ERROR_MSG,
h_exc.ConfigurationFailed: _CFG_FAILED_ERROR_MSG,
h_exc.DvaDeleteFailed: _DVA_DEL_FAILED_ERROR_MSG}
| apache-2.0 |
ged-lab/khmer | tests/test_graph.py | 2 | 10940 | # This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2010-2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: [email protected]
# pylint: disable=missing-docstring,no-member,invalid-name,no-self-use
# pylint: disable=protected-access
import khmer
import screed
from . import khmer_tst_utils as utils
def teardown():
utils.cleanup()
class Test_ExactGraphFu(object):
def setup(self):
self.ht = khmer.Nodegraph(12, 1e4, 2)
def test_counts(self):
ht = self.ht
ht.consume_seqfile(utils.get_test_data('test-graph.fa'))
kmer = "TTAGGACTGCAC"
x = ht.calc_connected_graph_size(kmer)
assert x == 69, x
kmer = "TGCGTTTCAATC"
x = ht.calc_connected_graph_size(kmer)
assert x == 68, x
kmer = "ATACTGTAAATA"
x = ht.calc_connected_graph_size(kmer)
assert x == 36, x
def test_graph_links_next_a(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "A")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_next_c(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "C")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_next_g(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "G")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_next_t(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "T")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_a(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("A" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_c(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("C" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_g(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("G" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_t(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("T" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
class Test_InexactGraphFu(object):
def setup(self):
self.ht = khmer.Nodegraph(12, 4 ** 3 + 1, 2)
def test_graph_links_next_a(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "A")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_next_c(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "C")
x = ht.calc_connected_graph_size(word)
assert x == 2, x
def test_graph_links_next_g(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "G")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_next_t(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "T")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_a(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("A" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_c(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("C" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_g(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("G" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_t(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("T" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
#
class Test_Partitioning(object):
def test_output_unassigned(self):
filename = utils.get_test_data('random-20-a.fa')
ht = khmer.Nodegraph(21, 1, 1, primes=[5, 7, 11, 13])
ht.consume_seqfile_and_tag(filename)
output_file = utils.get_temp_filename('part0test')
ht.output_partitions(filename, output_file, True)
len1 = len(list(screed.open(filename)))
len2 = len(list(screed.open(output_file)))
assert len1 > 0
assert len1 == len2, (len1, len2)
def test_not_output_unassigned(self):
filename = utils.get_test_data('random-20-a.fa')
ht = khmer.Nodegraph(21, 1, 1, primes=[5, 7, 11, 13])
ht.consume_seqfile_and_tag(filename)
output_file = utils.get_temp_filename('parttest')
ht.output_partitions(filename, output_file, False)
len1 = len(list(screed.open(filename)))
len2 = len(list(screed.open(output_file)))
assert len1 > 0
assert len2 == 0, len2
def test_output_fq(self):
filename = utils.get_test_data('random-20-a.fq')
ht = khmer.Nodegraph(20, 1e4, 4)
ht.consume_seqfile_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
ht.merge_subset(subset)
output_file = utils.get_temp_filename('parttest')
ht.output_partitions(filename, output_file, False)
print(open(output_file).read())
x = set([r.quality for r in screed.open(output_file)])
assert x, x
def test_disconnected_20_a(self):
filename = utils.get_test_data('random-20-a.fa')
ht = khmer.Nodegraph(21, 1e5, 4)
ht.consume_seqfile_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
x = subset.count_partitions()
assert x == (99, 0), x # disconnected @ 21
def test_connected_20_a(self):
filename = utils.get_test_data('random-20-a.fa')
ht = khmer.Nodegraph(20, 1e4, 4)
ht.consume_seqfile_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
x = subset.count_partitions()
assert x == (1, 0) # connected @ 20
def test_disconnected_20_b(self):
filename = utils.get_test_data('random-20-b.fa')
ht = khmer.Nodegraph(21, 1e4, 4)
ht.consume_seqfile_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
x = subset.count_partitions()
assert x == (99, 0), x # disconnected @ 21
def test_connected_20_b(self):
filename = utils.get_test_data('random-20-b.fa')
ht = khmer.Nodegraph(20, 1e4, 4)
ht.consume_seqfile_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
x = subset.count_partitions()
assert x == (1, 0) # connected @ 20
def test_disconnected_31_c(self):
filename = utils.get_test_data('random-31-c.fa')
ht = khmer.Nodegraph(32, 1e6, 4)
ht.consume_seqfile_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
x = subset.count_partitions()
assert x == (999, 0), x # disconnected @ K = 32
def test_connected_31_c(self):
filename = utils.get_test_data('random-31-c.fa')
ht = khmer.Nodegraph(31, 1e5, 4)
ht.consume_seqfile_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
x = subset.count_partitions()
assert x == (1, 0) # connected @ K = 31
#
class Test_PythonAPI(object):
def test_find_all_tags_kmersize(self):
ht = khmer.Nodegraph(20, 4 ** 4 + 1, 2)
a = "ATTGGGACTCTGGGAGCACTTATCATGGAGAT"
c = "GGAGCACTTATCATGGAGATATATCCCGTGCTTAAACATCGCACTTTAACCCTGCAGAGT"
print(ht.consume(a))
try:
ht.find_all_tags(c[:19])
assert False, "should raise a ValueError for wrong k-mer size"
except ValueError:
pass
try:
ht.find_all_tags(c[:21])
assert False, "should raise a ValueError for wrong k-mer size"
except ValueError:
pass
def test_ordered_connect(self):
ht = khmer.Nodegraph(20, 4 ** 4 + 1, 2)
a = "ATTGGGACTCTGGGAGCACTTATCATGGAGAT"
b = "GAGCACTTTAACCCTGCAGAGTGGCCAAGGCT"
c = "GGAGCACTTATCATGGAGATATATCCCGTGCTTAAACATCGCACTTTAACCCTGCAGAGT"
print(ht.consume(a))
ppi = ht.find_all_tags(a[:20])
pid = ht.assign_partition_id(ppi)
assert pid == 0, pid
print(ht.consume(b))
ppi = ht.find_all_tags(b[:20])
pid = ht.assign_partition_id(ppi)
assert pid == 0, pid
print(ht.consume(c))
ppi = ht.find_all_tags(c[:20])
pid = ht.assign_partition_id(ppi)
assert pid == 2, pid
#
| bsd-3-clause |
Peddle/hue | desktop/core/ext-py/boto-2.38.0/boto/kinesis/__init__.py | 145 | 1652 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the Amazon Kinesis service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.kinesis.layer1 import KinesisConnection
return get_regions('kinesis', connection_cls=KinesisConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| apache-2.0 |
pcamp/google-appengine-wx-launcher | launcher/app_unittest.py | 28 | 2973 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittests for app.py."""
import os
import unittest
import launcher
class NoShowApp(launcher.App):
def __init__(self):
super(NoShowApp, self).__init__()
self.displayed = False
def _DisplayMainFrame(self):
"""Override so we don't actually display UI.
Can't override by setting app._DisplayMainFrame to a new value
since this gets hit before we have a chance to override.
"""
self.displayed = True
def _InitializeLogging(self):
"""Override so logs don't throw up modal dialogs."""
pass
class NoShowNoVersionCheckApp(NoShowApp):
def _VersionCheck(self, url=None):
pass
class AppTest(unittest.TestCase):
def testOnInit(self):
app = NoShowNoVersionCheckApp()
self.assertTrue(app.Initialized())
def testVersionCheck(self):
app = NoShowApp()
warned = [False]
def fakeNewVersionNeeded(a, b, c):
warned[0] = True
app._NewVersionNeeded = fakeNewVersionNeeded
badurl = 'file://' + os.path.join(os.getcwd(),
launcher.__path__[0],
'app_unittest.py')
# silent unhappy on purpose
app._VersionCheck(badurl)
self.assertEqual(False, warned[0])
def DumpAndVersionCheck(data, app):
filename = os.tempnam()
f = open(filename, 'w')
f.write(data)
f.close()
app._VersionCheck('file:///' + filename)
return filename
# try hard to look like we're out of date
new_version_file = ('release: "9999.9999.9999"\n' +
'timestamp: 9999999999\n' +
'api_versions: [\'1\']\n')
self.assertEqual(False, warned[0])
filename = DumpAndVersionCheck(new_version_file, app)
os.unlink(filename)
self.assertEqual(True, warned[0])
warned[0] = False
# Make sure we are NOT out of date
old_version_file = ('release: "0.0.0"\n' +
'timestamp: 7\n' +
'api_versions: [\'1\']\n')
self.assertEqual(False, warned[0])
filename = DumpAndVersionCheck(old_version_file, app)
os.unlink(filename)
self.assertEqual(False, warned[0])
# VERSION file or well-defined failure string
# (depends on prefs setting...)
current = app._CurrentVersionData()
self.assertTrue('api_version' in current)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
teosz/servo | tests/wpt/css-tests/tools/manifest/item.py | 84 | 5794 | import os
import urlparse
from abc import ABCMeta, abstractmethod, abstractproperty
from utils import from_os_path, to_os_path
item_types = ["testharness", "reftest", "manual", "stub", "wdspec"]
def get_source_file(source_files, tests_root, manifest, path):
def make_new():
from sourcefile import SourceFile
return SourceFile(tests_root, path, manifest.url_base)
if source_files is None:
return make_new()
if path not in source_files:
source_files[path] = make_new()
return source_files[path]
class ManifestItem(object):
__metaclass__ = ABCMeta
item_type = None
def __init__(self, source_file, manifest=None):
self.manifest = manifest
self.source_file = source_file
@abstractproperty
def id(self):
"""The test's id (usually its url)"""
pass
@property
def path(self):
"""The test path relative to the test_root"""
return self.source_file.rel_path
@property
def https(self):
return "https" in self.source_file.meta_flags
def key(self):
"""A unique identifier for the test"""
return (self.item_type, self.id)
def meta_key(self):
"""Extra metadata that doesn't form part of the test identity, but for
which changes mean regenerating the manifest (e.g. the test timeout."""
return ()
def __eq__(self, other):
if not hasattr(other, "key"):
return False
return self.key() == other.key()
def __hash__(self):
return hash(self.key() + self.meta_key())
def to_json(self):
return {"path": from_os_path(self.path)}
@classmethod
def from_json(self, manifest, tests_root, obj, source_files=None):
raise NotImplementedError
class URLManifestItem(ManifestItem):
def __init__(self, source_file, url, url_base="/", manifest=None):
ManifestItem.__init__(self, source_file, manifest=manifest)
self._url = url
self.url_base = url_base
@property
def id(self):
return self.url
@property
def url(self):
return urlparse.urljoin(self.url_base, self._url)
def to_json(self):
rv = ManifestItem.to_json(self)
rv["url"] = self._url
return rv
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest,
to_os_path(obj["path"]))
return cls(source_file,
obj["url"],
url_base=manifest.url_base,
manifest=manifest)
class TestharnessTest(URLManifestItem):
item_type = "testharness"
def __init__(self, source_file, url, url_base="/", timeout=None, manifest=None):
URLManifestItem.__init__(self, source_file, url, url_base=url_base, manifest=manifest)
self.timeout = timeout
def meta_key(self):
return (self.timeout,)
def to_json(self):
rv = URLManifestItem.to_json(self)
if self.timeout is not None:
rv["timeout"] = self.timeout
return rv
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest,
to_os_path(obj["path"]))
return cls(source_file,
obj["url"],
url_base=manifest.url_base,
timeout=obj.get("timeout"),
manifest=manifest)
class RefTest(URLManifestItem):
item_type = "reftest"
def __init__(self, source_file, url, references, url_base="/", timeout=None,
viewport_size=None, dpi=None, manifest=None):
URLManifestItem.__init__(self, source_file, url, url_base=url_base, manifest=manifest)
for _, ref_type in references:
if ref_type not in ["==", "!="]:
raise ValueError, "Unrecognised ref_type %s" % ref_type
self.references = tuple(references)
self.timeout = timeout
self.viewport_size = viewport_size
self.dpi = dpi
@property
def is_reference(self):
return self.source_file.name_is_reference
def meta_key(self):
return (self.timeout, self.viewport_size, self.dpi)
def to_json(self):
rv = URLManifestItem.to_json(self)
rv["references"] = self.references
if self.timeout is not None:
rv["timeout"] = self.timeout
if self.viewport_size is not None:
rv["viewport_size"] = self.viewport_size
if self.dpi is not None:
rv["dpi"] = self.dpi
return rv
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest,
to_os_path(obj["path"]))
return cls(source_file,
obj["url"],
obj["references"],
url_base=manifest.url_base,
timeout=obj.get("timeout"),
viewport_size=obj.get("viewport_size"),
dpi=obj.get("dpi"),
manifest=manifest)
class ManualTest(URLManifestItem):
item_type = "manual"
class Stub(URLManifestItem):
item_type = "stub"
class WebdriverSpecTest(ManifestItem):
item_type = "wdspec"
@property
def id(self):
return self.path
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest,
to_os_path(obj["path"]))
return cls(source_file, manifest=manifest)
| mpl-2.0 |
sahitya-pavurala/luigi | test/test_ssh.py | 95 | 1796 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
from helpers import unittest
from luigi.contrib.ssh import RemoteContext
class TestMockedRemoteContext(unittest.TestCase):
def test_subprocess_delegation(self):
""" Test subprocess call structure using mock module """
orig_Popen = subprocess.Popen
self.last_test = None
def Popen(cmd, **kwargs):
self.last_test = cmd
subprocess.Popen = Popen
context = RemoteContext(
"some_host",
username="luigi",
key_file="/some/key.pub"
)
context.Popen(["ls"])
self.assertTrue("ssh" in self.last_test)
self.assertTrue("-i" in self.last_test)
self.assertTrue("/some/key.pub" in self.last_test)
self.assertTrue("luigi@some_host" in self.last_test)
self.assertTrue("ls" in self.last_test)
subprocess.Popen = orig_Popen
def test_check_output_fail_connect(self):
""" Test check_output to a non-existing host """
context = RemoteContext("__NO_HOST_LIKE_THIS__", connect_timeout=1)
self.assertRaises(
subprocess.CalledProcessError,
context.check_output, ["ls"]
)
| apache-2.0 |
v1k45/django-notify-x | notify/models.py | 1 | 14033 | from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.conf import settings
from django.db.models import QuerySet
from jsonfield.fields import JSONField
from six import python_2_unicode_compatible
from django.utils.html import escape
from django.utils.timesince import timesince
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from .utils import prefetch_relations
class NotificationQueryset(QuerySet):
"""
Chain-able QuerySets using ```.as_manager()``.
"""
def prefetch(self):
"""
Marks the current queryset to prefetch all generic relations.
"""
qs = self.select_related()
qs._prefetch_relations = True
return qs
def _fetch_all(self):
if self._result_cache is None:
if hasattr(self, '_prefetch_relations'):
# removes the flag since prefetch_relations is recursive
del self._prefetch_relations
prefetch_relations(self)
self._prefetch_relations = True
return super(NotificationQueryset, self)._fetch_all()
def _clone(self, **kwargs):
clone = super(NotificationQueryset, self)._clone(**kwargs)
if hasattr(self, '_prefetch_relations'):
clone._prefetch_relations = True
return clone
def active(self):
"""
QuerySet filter() for retrieving both read and unread notifications
which are not soft-deleted.
:return: Non soft-deleted notifications.
"""
return self.filter(deleted=False)
def read(self):
"""
QuerySet filter() for retrieving read notifications.
:return: Read and active Notifications filter().
"""
return self.filter(deleted=False, read=True)
def unread(self):
"""
QuerySet filter() for retrieving unread notifications.
:return: Unread and active Notifications filter().
"""
return self.filter(deleted=False, read=False)
def unread_all(self, user=None):
"""
Marks all notifications as unread for a user (if supplied)
:param user: Notification recipient.
:return: Updates QuerySet as unread.
"""
qs = self.read()
if user:
qs = qs.filter(recipient=user)
qs.update(read=False)
def read_all(self, user=None):
"""
Marks all notifications as read for a user (if supplied)
:param user: Notification recipient.
:return: Updates QuerySet as read.
"""
qs = self.unread()
if user:
qs = qs.filter(recipient=user)
qs.update(read=True)
def delete_all(self, user=None):
"""
Method to soft-delete all notifications of a User (if supplied)
:param user: Notification recipient.
:return: Updates QuerySet as soft-deleted.
"""
qs = self.active()
if user:
qs = qs.filter(recipient=user)
soft_delete = getattr(settings, 'NOTIFY_SOFT_DELETE', True)
if soft_delete:
qs.update(deleted=True)
else:
qs.delete()
def active_all(self, user=None):
"""
Method to soft-delete all notifications of a User (if supplied)
:param user: Notification recipient.
:return: Updates QuerySet as soft-deleted.
"""
qs = self.deleted()
if user:
qs = qs.filter(recipient=user)
qs.update(deleted=False)
def deleted(self):
"""
QuerySet ``filter()`` for retrieving soft-deleted notifications.
:return: Soft deleted notification filter()
"""
return self.filter(deleted=True)
@python_2_unicode_compatible
class Notification(models.Model):
"""
**Notification Model for storing notifications. (Yeah, too obvious)**
This model is pretty-much a replica of ``django-notifications``'s
model. The newly added fields just adds a feature to allow anonymous
``actors``, ``targets`` and ``object``.
**Attributes**:
:recipient: The user who receives notification.
:verb: Action performed by actor (not necessarily).
:description: Option description for your notification.
:actor_text: Anonymous actor who is not in content-type.
:actor_url: Since the actor is not in content-type,
a custom URL for it.
*...Same for target and obj*.
:nf_type: | Each notification is different, they must be formatted
| differently during HTML rendering. For this, each
| notification gets to carry it own *notification type*.
|
| This notification type will be used to search
| the special template for the notification located at
| ``notifications/includes/NF_TYPE.html`` of your
| template directory.
|
| The main reason to add this field is to save you
| from the pain of writing ``if...elif...else`` blocks
| in your template file just for handling how
| notifications will get rendered.
|
| With this, you can just save template for an individual
| notification type and call the *template-tag* to render
| all notifications for you without writing a single
| ``if...elif...else block``.
|
| You'll just need to do a
| ``{% render_notifications using NOTIFICATION_OBJ %}``
| and you'll get your notifications rendered.
|
| By default, every ``nf_type`` is set to ``default``.
:extra: **JSONField**, holds other optional data you want the
notification to carry in JSON format.
:deleted: Useful when you want to *soft delete* your notifications.
"""
recipient = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='notifications',
on_delete=models.CASCADE,
verbose_name=_('Notification receiver'))
# actor attributes.
actor_content_type = models.ForeignKey(
ContentType, null=True, blank=True,
related_name='notify_actor', on_delete=models.CASCADE,
verbose_name=_('Content type of actor object'))
actor_object_id = models.PositiveIntegerField(
null=True, blank=True,
verbose_name=_('ID of the actor object'))
actor_content_object = GenericForeignKey('actor_content_type',
'actor_object_id')
actor_text = models.CharField(
max_length=50, blank=True, null=True,
verbose_name=_('Anonymous text for actor'))
actor_url_text = models.CharField(
blank=True, null=True, max_length=200,
verbose_name=_('Anonymous URL for actor'))
# basic details.
verb = models.CharField(max_length=100,
verbose_name=_('Verb of the action'))
description = models.CharField(
max_length=255, blank=True, null=True,
verbose_name=_('Description of the notification'))
nf_type = models.CharField(max_length=20, default='default',
verbose_name=_('Type of notification'))
# TODO: Add a field to store notification cover images.
# target attributes.
target_content_type = models.ForeignKey(
ContentType, null=True, blank=True,
related_name='notify_target', on_delete=models.CASCADE,
verbose_name=_('Content type of target object'))
target_object_id = models.PositiveIntegerField(
null=True, blank=True,
verbose_name=_('ID of the target object'))
target_content_object = GenericForeignKey('target_content_type',
'target_object_id')
target_text = models.CharField(
max_length=50, blank=True, null=True,
verbose_name=_('Anonymous text for target'))
target_url_text = models.CharField(
blank=True, null=True, max_length=200,
verbose_name=_('Anonymous URL for target'))
# obj attributes.
obj_content_type = models.ForeignKey(
ContentType, null=True, blank=True,
related_name='notify_object', on_delete=models.CASCADE,
verbose_name=_('Content type of action object'))
obj_object_id = models.PositiveIntegerField(
null=True, blank=True,
verbose_name=_('ID of the target object'))
obj_content_object = GenericForeignKey('obj_content_type', 'obj_object_id')
obj_text = models.CharField(
max_length=50, blank=True, null=True,
verbose_name=_('Anonymous text for action object'))
obj_url_text = models.CharField(
blank=True, null=True, max_length=200,
verbose_name=_('Anonymous URL for action object'))
extra = JSONField(null=True, blank=True,
verbose_name=_('JSONField to store addtional data'))
# Advanced details.
created = models.DateTimeField(auto_now=False, auto_now_add=True)
read = models.BooleanField(default=False,
verbose_name=_('Read status'))
deleted = models.BooleanField(default=False,
verbose_name=_('Soft delete status'))
objects = NotificationQueryset.as_manager()
class Meta(object):
ordering = ('-created', )
def __str__(self):
ctx = {
'actor': self.actor or self.actor_text,
'verb': self.verb,
'description': self.description,
'target': self.target or self.target_text,
'obj': self.obj or self.obj_text,
'at': timesince(self.created),
}
if ctx['actor']:
if not ctx['target']:
return _("{actor} {verb} {at} ago").format(**ctx)
elif not ctx['obj']:
return _("{actor} {verb} on {target} {at} ago").format(**ctx)
elif ctx['obj']:
return _(
"{actor} {verb} {obj} on {target} {at} ago").format(**ctx)
return _("{description} -- {at} ago").format(**ctx)
def mark_as_read(self):
"""
Marks notification as read
"""
self.read = True
self.save()
def mark_as_unread(self):
"""
Marks notification as unread.
"""
self.read = False
self.save()
@cached_property
def actor(self):
"""
Property to return actor object/text to keep things DRY.
:return: Actor object or Text or None.
"""
return self.actor_content_object or self.actor_text
@cached_property
def actor_url(self):
"""
Property to return permalink of the actor.
Uses ``get_absolute_url()``.
If ``get_absolute_url()`` method fails, it tries to grab URL
from ``actor_url_text``, if it fails again, returns a "#".
:return: URL for the actor.
"""
try:
url = self.actor_content_object.get_absolute_url()
except AttributeError:
url = self.actor_url_text or "#"
return url
@cached_property
def target(self):
"""
See ``actor`` property
:return: Target object or Text or None
"""
return self.target_content_object or self.target_text
@cached_property
def target_url(self):
"""
See ``actor_url`` property.
:return: URL for the target.
"""
try:
url = self.target_content_object.get_absolute_url()
except AttributeError:
url = self.target_url_text or "#"
return url
@cached_property
def obj(self):
"""
See ``actor`` property.
:return: Action Object or Text or None.
"""
return self.obj_content_object or self.obj_text
@cached_property
def obj_url(self):
"""
See ``actor_url`` property.
:return: URL for Action Object.
"""
try:
url = self.obj_content_object.get_absolute_url()
except AttributeError:
url = self.obj_url_text or "#"
return url
@staticmethod
def do_escape(obj):
"""
Method to HTML escape an object or set it to None conditionally.
performs ``force_text()`` on the argument so that a foreignkey gets
serialized? and spit out the ``__str__`` output instead of an Object.
:param obj: Object to escape.
:return: HTML escaped and JSON-friendly data.
"""
return escape(force_text(obj)) if obj else None
def as_json(self):
"""
Notification data in a Python dictionary to which later gets
supplied to JSONResponse so that it gets JSON serialized
the *django-way*
:return: Dictionary format of the QuerySet object.
"""
data = {
"id": self.id,
"actor": self.do_escape(self.actor),
"actor_url": self.do_escape(self.actor_url),
"verb": self.do_escape(self.verb),
"description": self.do_escape(self.description),
"read": self.read,
"nf_type": self.do_escape(self.nf_type),
"target": self.do_escape(self.target),
"target_url": self.do_escape(self.target_url),
"obj": self.do_escape(self.obj),
"obj_url": self.do_escape(self.obj_url),
"created": self.created,
"data": self.extra,
}
return data
| mit |
angstwad/ansible | hacking/module_formatter.py | 13 | 18768 | #!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# (c) 2012-2014, Michael DeHaan <[email protected]> and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import os
import glob
import sys
import yaml
import re
import optparse
import datetime
import cgi
import warnings
from collections import defaultdict
from jinja2 import Environment, FileSystemLoader
from six import iteritems
from ansible.utils import module_docs
from ansible.utils.vars import merge_hash
from ansible.utils.unicode import to_bytes
from ansible.errors import AnsibleError
#####################################################################################
# constants and paths
# if a module is added in a version of Ansible older than this, don't print the version added information
# in the module documentation because everyone is assumed to be running something newer than this already.
TO_OLD_TO_BE_NOTABLE = 1.3
# Get parent directory of the directory this script lives in
MODULEDIR=os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules'
))
# The name of the DOCUMENTATION template
EXAMPLE_YAML=os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml'
))
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
DEPRECATED = " (D)"
NOTCORE = " (E)"
#####################################################################################
def rst_ify(text):
''' convert symbols like I(this is in italics) to valid restructured text '''
try:
t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
t = _BOLD.sub(r'**' + r"\1" + r"**", t)
t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t)
t = _URL.sub(r"\1", t)
t = _CONST.sub(r'``' + r"\1" + r"``", t)
except Exception as e:
raise AnsibleError("Could not process (%s) : %s" % (str(text), str(e)))
return t
#####################################################################################
def html_ify(text):
''' convert symbols like I(this is in italics) to valid HTML '''
t = cgi.escape(text)
t = _ITALIC.sub("<em>" + r"\1" + "</em>", t)
t = _BOLD.sub("<b>" + r"\1" + "</b>", t)
t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t)
t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t)
t = _CONST.sub("<code>" + r"\1" + "</code>", t)
return t
#####################################################################################
def rst_fmt(text, fmt):
''' helper for Jinja2 to do format strings '''
return fmt % (text)
#####################################################################################
def rst_xline(width, char="="):
''' return a restructured text line of a given length '''
return char * width
#####################################################################################
def write_data(text, options, outputname, module):
''' dumps module output to a file or the screen, as requested '''
if options.output_dir is not None:
fname = os.path.join(options.output_dir, outputname % module)
fname = fname.replace(".py","")
f = open(fname, 'w')
f.write(text.encode('utf-8'))
f.close()
else:
print(text)
#####################################################################################
def list_modules(module_dir, depth=0):
''' returns a hash of categories, each category being a hash of module names to file paths '''
categories = dict()
module_info = dict()
aliases = defaultdict(set)
# * windows powershell modules have documentation stubs in python docstring
# format (they are not executed) so skip the ps1 format files
# * One glob level for every module level that we're going to traverse
files = glob.glob("%s/*.py" % module_dir) + glob.glob("%s/*/*.py" % module_dir) + glob.glob("%s/*/*/*.py" % module_dir) + glob.glob("%s/*/*/*/*.py" % module_dir)
for module_path in files:
if module_path.endswith('__init__.py'):
continue
category = categories
mod_path_only = os.path.dirname(module_path[len(module_dir) + 1:])
# Start at the second directory because we don't want the "vendor"
# directories (core, extras)
for new_cat in mod_path_only.split('/')[1:]:
if new_cat not in category:
category[new_cat] = dict()
category = category[new_cat]
module = os.path.splitext(os.path.basename(module_path))[0]
if module in module_docs.BLACKLIST_MODULES:
# Do not list blacklisted modules
continue
if module.startswith("_") and os.path.islink(module_path):
source = os.path.splitext(os.path.basename(os.path.realpath(module_path)))[0]
module = module.replace("_","",1)
aliases[source].add(module)
continue
category[module] = module_path
module_info[module] = module_path
# keep module tests out of becoming module docs
if 'test' in categories:
del categories['test']
return module_info, categories, aliases
#####################################################################################
def generate_parser():
''' generate an optparse parser '''
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options] arg1 arg2',
description='Generate module documentation from metadata',
)
p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number")
p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path")
p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates")
p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type")
p.add_option("-v", "--verbose", action='store_true', default=False, help="Verbose")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files")
p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules")
p.add_option('-V', action='version', help='Show version number and exit')
return p
#####################################################################################
def jinja2_environment(template_dir, typ):
env = Environment(loader=FileSystemLoader(template_dir),
variable_start_string="@{",
variable_end_string="}@",
trim_blocks=True,
)
env.globals['xline'] = rst_xline
if typ == 'rst':
env.filters['convert_symbols_to_format'] = rst_ify
env.filters['html_ify'] = html_ify
env.filters['fmt'] = rst_fmt
env.filters['xline'] = rst_xline
template = env.get_template('rst.j2')
outputname = "%s_module.rst"
else:
raise Exception("unknown module format type: %s" % typ)
return env, template, outputname
#####################################################################################
def too_old(added):
if not added:
return False
try:
added_tokens = str(added).split(".")
readded = added_tokens[0] + "." + added_tokens[1]
added_float = float(readded)
except ValueError as e:
warnings.warn("Could not parse %s: %s" % (added, str(e)))
return False
return (added_float < TO_OLD_TO_BE_NOTABLE)
def process_module(module, options, env, template, outputname, module_map, aliases):
fname = module_map[module]
if isinstance(fname, dict):
return "SKIPPED"
basename = os.path.basename(fname)
deprecated = False
# ignore files with extensions
if not basename.endswith(".py"):
return
elif module.startswith("_"):
if os.path.islink(fname):
return # ignore, its an alias
deprecated = True
module = module.replace("_","",1)
print("rendering: %s" % module)
# use ansible core library to parse out doc metadata YAML and plaintext examples
doc, examples, returndocs = module_docs.get_docstring(fname, verbose=options.verbose)
# crash if module is missing documentation and not explicitly hidden from docs index
if doc is None:
sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module))
sys.exit(1)
if deprecated and 'deprecated' not in doc:
sys.stderr.write("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module))
sys.exit(1)
if "/core/" in fname:
doc['core'] = True
else:
doc['core'] = False
if module in aliases:
doc['aliases'] = aliases[module]
all_keys = []
if not 'version_added' in doc:
sys.stderr.write("*** ERROR: missing version_added in: %s ***\n" % module)
sys.exit(1)
added = 0
if doc['version_added'] == 'historical':
del doc['version_added']
else:
added = doc['version_added']
# don't show version added information if it's too old to be called out
if too_old(added):
del doc['version_added']
if 'options' in doc and doc['options']:
for (k,v) in iteritems(doc['options']):
# don't show version added information if it's too old to be called out
if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']):
del doc['options'][k]['version_added']
if not 'description' in doc['options'][k]:
raise AnsibleError("Missing required description for option %s in %s " % (k, module))
required_value = doc['options'][k].get('required', False)
if not isinstance(required_value, bool):
raise AnsibleError("Invalid required value '%s' for option '%s' in '%s' (must be truthy)" % (required_value, k, module))
if not isinstance(doc['options'][k]['description'],list):
doc['options'][k]['description'] = [doc['options'][k]['description']]
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = fname
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['ansible_version'] = options.ansible_version
doc['plainexamples'] = examples #plain text
if returndocs:
try:
doc['returndocs'] = yaml.safe_load(returndocs)
except:
print("could not load yaml: %s" % returndocs)
raise
else:
doc['returndocs'] = None
# here is where we build the table of contents...
try:
text = template.render(doc)
except Exception as e:
raise AnsibleError("Failed to render doc for %s: %s" % (fname, str(e)))
write_data(text, options, outputname, module)
return doc['short_description']
#####################################################################################
def print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases):
modstring = module
if modstring.startswith('_'):
modstring = module[1:]
modname = modstring
if module in deprecated:
modstring = modstring + DEPRECATED
elif module not in core:
modstring = modstring + NOTCORE
category_file.write(" %s - %s <%s_module>\n" % (to_bytes(modstring), to_bytes(rst_ify(module_map[module][1])), to_bytes(modname)))
def process_category(category, categories, options, env, template, outputname):
### FIXME:
# We no longer conceptually deal with a mapping of category names to
# modules to file paths. Instead we want several different records:
# (1) Mapping of module names to file paths (what's presently used
# as categories['all']
# (2) Mapping of category names to lists of module names (what you'd
# presently get from categories[category_name][subcategory_name].keys()
# (3) aliases (what's presently in categories['_aliases']
#
# list_modules() now returns those. Need to refactor this function and
# main to work with them.
module_map = categories[category]
module_info = categories['all']
aliases = {}
if '_aliases' in categories:
aliases = categories['_aliases']
category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category)
category_file = open(category_file_path, "w")
print("*** recording category %s in %s ***" % (category, category_file_path))
# start a new category file
category = category.replace("_"," ")
category = category.title()
modules = []
deprecated = []
core = []
for module in module_map.keys():
if isinstance(module_map[module], dict):
for mod in (m for m in module_map[module].keys() if m in module_info):
if mod.startswith("_"):
deprecated.append(mod)
elif '/core/' in module_info[mod][0]:
core.append(mod)
else:
if module not in module_info:
continue
if module.startswith("_"):
deprecated.append(module)
elif '/core/' in module_info[module][0]:
core.append(module)
modules.append(module)
modules.sort(key=lambda k: k[1:] if k.startswith('_') else k)
category_header = "%s Modules" % (category.title())
underscores = "`" * len(category_header)
category_file.write("""\
%s
%s
.. toctree:: :maxdepth: 1
""" % (category_header, underscores))
sections = []
for module in modules:
if module in module_map and isinstance(module_map[module], dict):
sections.append(module)
continue
else:
print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_info, aliases)
sections.sort()
for section in sections:
category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section)))
category_file.write(".. toctree:: :maxdepth: 1\n\n")
section_modules = module_map[section].keys()
section_modules.sort(key=lambda k: k[1:] if k.startswith('_') else k)
#for module in module_map[section]:
for module in (m for m in section_modules if m in module_info):
print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_info, aliases)
category_file.write("""\n\n
.. note::
- %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale.
- %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less actively maintained than 'core' modules.
- Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub <http://github.com/ansible/ansible-modules-core>`_, extras tickets to `ansible/ansible-modules-extras on GitHub <http://github.com/ansible/ansible-modules-extras>`_
""" % (DEPRECATED, NOTCORE))
category_file.close()
# TODO: end a new category file
#####################################################################################
def validate_options(options):
''' validate option parser options '''
if not options.module_dir:
print("--module-dir is required", file=sys.stderr)
sys.exit(1)
if not os.path.exists(options.module_dir):
print("--module-dir does not exist: %s" % options.module_dir, file=sys.stderr)
sys.exit(1)
if not options.template_dir:
print("--template-dir must be specified")
sys.exit(1)
#####################################################################################
def main():
p = generate_parser()
(options, args) = p.parse_args()
validate_options(options)
env, template, outputname = jinja2_environment(options.template_dir, options.type)
mod_info, categories, aliases = list_modules(options.module_dir)
categories['all'] = mod_info
categories['_aliases'] = aliases
category_names = [c for c in categories.keys() if not c.startswith('_')]
category_names.sort()
# Write master category list
category_list_path = os.path.join(options.output_dir, "modules_by_category.rst")
with open(category_list_path, "w") as category_list_file:
category_list_file.write("Module Index\n")
category_list_file.write("============\n")
category_list_file.write("\n\n")
category_list_file.write(".. toctree::\n")
category_list_file.write(" :maxdepth: 1\n\n")
for category in category_names:
category_list_file.write(" list_of_%s_modules\n" % category)
#
# Import all the docs into memory
#
module_map = mod_info.copy()
skipped_modules = set()
for modname in module_map:
result = process_module(modname, options, env, template, outputname, module_map, aliases)
if result == 'SKIPPED':
del categories['all'][modname]
else:
categories['all'][modname] = (categories['all'][modname], result)
#
# Render all the docs to rst via category pages
#
for category in category_names:
process_category(category, categories, options, env, template, outputname)
if __name__ == '__main__':
main()
| gpl-3.0 |
jayceyxc/hue | desktop/libs/hadoop/src/hadoop/mini_cluster.py | 10 | 18183 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################
## WARNING!!! ##
## This file is stale. Hadoop 0.23 and CDH4 ##
## do not support minicluster. This is replaced ##
## by webhdfs.py, to set up a running cluster. ##
#######################################################
# A Python-side driver for MiniHadoopClusterManager
#
# See README.testing for hints on how to use this,
# and also look for other examples.
#
# If you have one of these running and want to figure out what ports
# are open, one way to do so is something like:
# for p in $(lsof -p 63564 | grep LISTEN | sed -e 's/.*:\([0-9][0-9]*\).*/\1/')
# do
# echo $p
# echo "GET /" | nc -w 1 localhost $p
# done
import atexit
import subprocess
import os
import pwd
import logging
import sys
import signal
import shutil
import socket
import time
import tempfile
import json
import lxml.etree
import urllib2
from desktop.lib import python_util
from desktop.lib.test_utils import clear_sys_caches, restore_sys_caches
from hadoop.fs.hadoopfs import HadoopFileSystem
from hadoop.job_tracker import LiveJobTracker
import hadoop.cluster
# Starts mini cluster suspended until a debugger attaches to it.
DEBUG_HADOOP=False
# Redirects mini cluster stderr to stderr. (Default is to put it in a file.)
USE_STDERR=os.environ.get("MINI_CLUSTER_USE_STDERR", False)
# Whether to clean up temp dir at exit
CLEANUP_TMP_DIR=os.environ.get("MINI_CLUSTER_CLEANUP", True)
# How long to wait for cluster to start up. (seconds)
MAX_CLUSTER_STARTUP_TIME = 120.0
# List of classes to be used as plugins for the JT of the cluster.
CLUSTER_JT_PLUGINS = 'org.apache.hadoop.thriftfs.ThriftJobTrackerPlugin'
# MR Task Scheduler. By default use the FIFO scheduler
CLUSTER_TASK_SCHEDULER='org.apache.hadoop.mapred.JobQueueTaskScheduler'
# MR queue names
CLUSTER_QUEUE_NAMES='default'
STARTUP_CONFIGS={}
# users and their groups which are used in Hue tests.
TEST_USER_GROUP_MAPPING = {
'test': ['test','users','supergroup'], 'chown_test': ['chown_test'],
'notsuperuser': ['notsuperuser'], 'gamma': ['gamma'],
'webui': ['webui'], 'hue': ['supergroup']
}
LOGGER=logging.getLogger(__name__)
class MiniHadoopCluster(object):
"""
Manages the invocation of a MiniHadoopClusterManager from Python.
"""
def __init__(self, num_datanodes=1, num_tasktrackers=1):
# These are cached
self._jt, self._fs = None, None
self.num_datanodes = num_datanodes
self.num_tasktrackers = num_tasktrackers
def start(self, extra_configs=None):
"""
Start a cluster as a subprocess.
"""
self.tmpdir = tempfile.mkdtemp()
if not extra_configs:
extra_configs = {}
def tmppath(filename):
"""Creates paths in tmpdir."""
return os.path.join(self.tmpdir, filename)
LOGGER.info("Using temporary directory: %s" % self.tmpdir)
in_conf_dir = tmppath("in-conf")
os.mkdir(in_conf_dir)
self.log_dir = tmppath("logs")
os.mkdir(self.log_dir)
f = file(os.path.join(in_conf_dir, "hadoop-metrics.properties"), "w")
try:
f.write("""
dfs.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
mapred.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
jvm.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
""")
finally:
f.close()
if self.superuser not in TEST_USER_GROUP_MAPPING:
TEST_USER_GROUP_MAPPING[self.superuser] = [self.superuser]
_write_static_group_mapping(TEST_USER_GROUP_MAPPING,
tmppath('ugm.properties'))
core_configs = {
'hadoop.proxyuser.%s.groups' % (self.superuser,): 'users,supergroup',
'hadoop.proxyuser.%s.hosts' % (self.superuser,): 'localhost',
'mapred.jobtracker.plugins': CLUSTER_JT_PLUGINS}
extra_configs.update(STARTUP_CONFIGS)
write_config(core_configs, tmppath('in-conf/core-site.xml'))
write_config({'mapred.jobtracker.taskScheduler': CLUSTER_TASK_SCHEDULER,
'mapred.queue.names': CLUSTER_QUEUE_NAMES},
tmppath('in-conf/mapred-site.xml'))
hadoop_policy_keys = ['client', 'client.datanode', 'datanode', 'inter.datanode', 'namenode', 'inter.tracker', 'job.submission', 'task.umbilical', 'refresh.policy', 'admin.operations']
hadoop_policy_config = {}
for policy in hadoop_policy_keys:
hadoop_policy_config['security.' + policy + '.protocol.acl'] = '*'
write_config(hadoop_policy_config, tmppath('in-conf/hadoop-policy.xml'))
details_file = file(tmppath("details.json"), "w+")
try:
args = [ os.path.join(hadoop.conf.HADOOP_MR1_HOME.get(), 'bin', 'hadoop'),
"jar",
hadoop.conf.HADOOP_TEST_JAR.get(),
"minicluster",
"-writeConfig", tmppath("config.xml"),
"-writeDetails", tmppath("details.json"),
"-datanodes", str(self.num_datanodes),
"-tasktrackers", str(self.num_tasktrackers),
"-useloopbackhosts",
"-D", "hadoop.tmp.dir=%s" % self.tmpdir,
"-D", "mapred.local.dir=%s/mapred/local" % self.tmpdir,
"-D", "mapred.system.dir=/mapred/system",
"-D", "mapred.temp.dir=/mapred/temp",
"-D", "jobclient.completion.poll.interval=100",
"-D", "jobclient.progress.monitor.poll.interval=100",
"-D", "fs.checkpoint.period=1",
# For a reason I don't fully understand, this must be 0.0.0.0 and not 'localhost'
"-D", "dfs.secondary.http.address=0.0.0.0:%d" % python_util.find_unused_port(),
# We bind the NN's thrift interface to a port we find here.
# This is suboptimal, since there's a race. Alas, if we don't
# do this here, the datanodes fail to discover the namenode's thrift
# address, and there's a race there
"-D", "dfs.thrift.address=localhost:%d" % python_util.find_unused_port(),
"-D", "jobtracker.thrift.address=localhost:%d" % python_util.find_unused_port(),
# Jobs realize they have finished faster with this timeout.
"-D", "jobclient.completion.poll.interval=50",
"-D", "hadoop.security.authorization=true",
"-D", "hadoop.policy.file=%s/hadoop-policy.xml" % in_conf_dir,
]
for key,value in extra_configs.iteritems():
args.append("-D")
args.append(key + "=" + value)
env = {}
env["HADOOP_CONF_DIR"] = in_conf_dir
env["HADOOP_OPTS"] = "-Dtest.build.data=%s" % (self.tmpdir, )
env["HADOOP_CLASSPATH"] = ':'.join([
# -- BEGIN JAVA TRIVIA --
# Add the -test- jar to the classpath to work around a subtle issue
# involving Java classloaders. In brief, hadoop's RunJar class creates
# a child classloader with the test jar on it, but the core classes
# are loaded by the system classloader. This is fine except that
# some classes in the test jar extend package-protected classes in the
# core jar. Even though the classes are in the same package name, they
# are thus loaded by different classloaders and therefore an IllegalAccessError
# prevents the MiniMRCluster from starting. Adding the test jar to the system
# classpath prevents this error since then both the MiniMRCluster and the
# core classes are loaded by the system classloader.
hadoop.conf.HADOOP_TEST_JAR.get(),
# -- END JAVA TRIVIA --
hadoop.conf.HADOOP_PLUGIN_CLASSPATH.get(),
# Due to CDH-4537, we need to add test dependencies to run minicluster
os.path.join(os.path.dirname(__file__), 'test_jars', '*'),
])
env["HADOOP_HEAPSIZE"] = "128"
env["HADOOP_HOME"] = hadoop.conf.HADOOP_MR1_HOME.get()
env["HADOOP_LOG_DIR"] = self.log_dir
env["USER"] = self.superuser
if "JAVA_HOME" in os.environ:
env["JAVA_HOME"] = os.environ["JAVA_HOME"]
# Wait for the debugger to attach
if DEBUG_HADOOP:
env["HADOOP_OPTS"] = env.get("HADOOP_OPTS", "") + " -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=9999"
if USE_STDERR:
stderr=sys.stderr
else:
stderr=file(tmppath("stderr"), "w")
LOGGER.debug("Starting minicluster: %s env: %s" % (repr(args), repr(env)))
self.clusterproc = subprocess.Popen(
args=args,
stdout=file(tmppath("stdout"), "w"),
stderr=stderr,
env=env)
details = {}
start = time.time()
# We consider the cluster started when the details file parses correct JSON.
# MiniHadoopCluster currently writes the details file last, and this depends
# on that.
while not details:
try:
details_file.seek(0)
details = json.load(details_file)
except ValueError:
pass
if self.clusterproc.poll() is not None or (not DEBUG_HADOOP and (time.time() - start) > MAX_CLUSTER_STARTUP_TIME):
LOGGER.debug("stdout:" + file(tmppath("stdout")).read())
if not USE_STDERR:
LOGGER.debug("stderr:" + file(tmppath("stderr")).read())
self.stop()
raise Exception("Cluster process quit or is taking too long to start. Aborting.")
finally:
details_file.close()
LOGGER.debug("Successfully started minicluster")
# Place all the details as attributes on self.
for k, v in details.iteritems():
setattr(self, k, v)
# Parse the configuration using XPath and place into self.config.
config = lxml.etree.parse(tmppath("config.xml"))
self.config = dict( (property.find("./name").text, property.find("./value").text)
for property in config.xpath("/configuration/property"))
# Write out Hadoop-style configuration directory,
# which can, in turn, be used for /bin/hadoop.
self.config_dir = tmppath("conf")
os.mkdir(self.config_dir)
hadoop.conf.HADOOP_CONF_DIR.set_for_testing(self.config_dir)
write_config(self.config, tmppath("conf/core-site.xml"),
["fs.defaultFS", "jobclient.completion.poll.interval",
"dfs.namenode.checkpoint.period", "dfs.namenode.checkpoint.dir",
'hadoop.proxyuser.'+self.superuser+'.groups', 'hadoop.proxyuser.'+self.superuser+'.hosts'])
write_config(self.config, tmppath("conf/hdfs-site.xml"), ["fs.defaultFS", "dfs.namenode.http-address", "dfs.namenode.secondary.http-address"])
# mapred.job.tracker isn't written out into self.config, so we fill
# that one out more manually.
write_config({ 'mapred.job.tracker': 'localhost:%d' % self.jobtracker_port },
tmppath("conf/mapred-site.xml"))
write_config(hadoop_policy_config, tmppath('conf/hadoop-policy.xml'))
# Once the config is written out, we can start the 2NN.
args = [hadoop.conf.HADOOP_BIN.get(),
'--config', self.config_dir,
'secondarynamenode']
LOGGER.debug("Starting 2NN at: " +
self.config['dfs.secondary.http.address'])
LOGGER.debug("2NN command: %s env: %s" % (repr(args), repr(env)))
self.secondary_proc = subprocess.Popen(
args=args,
stdout=file(tmppath("stdout.2nn"), "w"),
stderr=file(tmppath("stderr.2nn"), "w"),
env=env)
while True:
try:
response = urllib2.urlopen(urllib2.Request('http://' +
self.config['dfs.secondary.http.address']))
except urllib2.URLError:
# If we should abort startup.
if self.secondary_proc.poll() is not None or (not DEBUG_HADOOP and (time.time() - start) > MAX_CLUSTER_STARTUP_TIME):
LOGGER.debug("stdout:" + file(tmppath("stdout")).read())
if not USE_STDERR:
LOGGER.debug("stderr:" + file(tmppath("stderr")).read())
self.stop()
raise Exception("2nn process quit or is taking too long to start. Aborting.")
break
else:
time.sleep(1)
continue
# We didn't get a URLError. 2NN started successfully.
response.close()
break
LOGGER.debug("Successfully started 2NN")
def stop(self):
"""
Kills the cluster ungracefully.
"""
if self.clusterproc and self.clusterproc.poll() is None:
os.kill(self.clusterproc.pid, signal.SIGKILL)
self.clusterproc.wait()
if self.secondary_proc and self.secondary_proc.poll() is None:
os.kill(self.secondary_proc.pid, signal.SIGKILL)
self.secondary_proc.wait()
if CLEANUP_TMP_DIR != 'false':
logging.info("Cleaning up self.tmpdir. Use $MINI_CLUSTER_CLEANUP to avoid.")
shutil.rmtree(self.tmpdir)
@property
def fs(self):
"""Creates a HadoopFileSystem object configured for this cluster."""
if self._fs is None:
self._fs = HadoopFileSystem("localhost",
thrift_port=self.namenode_thrift_port,
hdfs_port=self.namenode_port,
hadoop_bin_path=hadoop.conf.HADOOP_BIN.get())
return self._fs
@property
def jt(self):
"""Creates a LiveJobTracker object configured for this cluster."""
if self._jt is None:
self._jt = LiveJobTracker("localhost", self.jobtracker_thrift_port)
return self._jt
@property
def superuser(self):
"""
Returns the "superuser" of this cluster.
This is essentially the user that the cluster was started
with.
"""
return pwd.getpwuid(os.getuid()).pw_name
@property
def namenode_thrift_port(self):
"""
Return the namenode thrift port.
"""
_, port = self.config["dfs.thrift.address"].split(":")
return int(port)
@property
def jobtracker_thrift_port(self):
"""
Return the jobtracker thrift port.
"""
_, port = self.config["jobtracker.thrift.address"].split(":")
return int(port)
def dump_ini(self, fd=sys.stdout):
"""
Dumps an ini-style configuration suitable for configuring desktop
to talk to this cluster.
TODO(todd) eventually this should use config framework 'writeback'
support
@param fd: a file-like writable object
"""
print >>fd, "[hadoop]"
print >>fd, "[[hdfs_clusters]]"
print >>fd, "[[[default]]]"
print >>fd, "thrift_port=%d" % self.namenode_thrift_port
print >>fd, "[[mapred_clusters]]"
print >>fd, "[[[default]]]"
print >>fd, "thrift_port=%d" % self.jobtracker_thrift_port
# Shared global cluster returned by shared_cluster context manager.
_shared_cluster = None
def shared_cluster(conf=False):
"""
Use a shared cluster that is initialized on demand,
and that is torn down at process exit.
If conf is True, then configuration is updated to
reference the cluster, and relevant caches are cleared.
Returns a lambda which must be called when you are
done with the shared cluster.
"""
cluster = shared_cluster_internal()
closers = [ ]
if conf:
closers.extend([
hadoop.conf.HDFS_CLUSTERS["default"].NN_HOST.set_for_testing("localhost"),
hadoop.conf.HDFS_CLUSTERS["default"].NN_HDFS_PORT.set_for_testing(cluster.namenode_port),
hadoop.conf.MR_CLUSTERS["default"].HOST.set_for_testing("localhost"),
hadoop.conf.MR_CLUSTERS["default"].JT_THRIFT_PORT.set_for_testing(cluster.jt.thrift_port),
])
# Clear the caches
# This is djanky (that's django for "janky").
# Caches are tricky w.r.t. to to testing;
# perhaps there are better patterns?
old_caches = clear_sys_caches()
def finish():
if conf:
restore_sys_caches(old_caches)
for x in closers:
x()
# We don't run the cluster's real stop method,
# because a shared cluster should be shutdown at
# exit.
cluster.shutdown = finish
return cluster
def write_config(config, path, variables=None):
"""
Minimal utility to write Hadoop-style configuration
from a configuration map (config), into a new file
called path.
"""
f = file(path, "w")
try:
f.write("""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
""")
keys = (variables and (variables,) or (config.keys(),))[0]
for name in keys:
value = config[name]
f.write(" <property>\n")
f.write(" <name>%s</name>\n" % name)
f.write(" <value>%s</value>\n" % value)
f.write(" </property>\n")
f.write("</configuration>\n")
finally:
f.close()
def _write_static_group_mapping(user_group_mapping, path):
"""
Create a Java-style .properties file to contain the static user -> group
mapping used by tests.
"""
f = file(path, 'w')
try:
for user, groups in user_group_mapping.iteritems():
f.write('%s = %s\n' % (user, ','.join(groups)))
finally:
f.close()
def shared_cluster_internal():
"""
Manages _shared_cluster.
"""
global _shared_cluster
if _shared_cluster is None:
_shared_cluster = MiniHadoopCluster()
_shared_cluster.start()
atexit.register(_shared_cluster.stop)
return _shared_cluster
if __name__ == '__main__':
"""
It's poor form to write tests for tests (the world-wide stack
overflow exception), so this merely tries the code.
"""
logging.basicConfig(level=logging.DEBUG)
import desktop
desktop.lib.conf.initialize([hadoop.conf])
if True:
cluster = MiniHadoopCluster(num_datanodes=5, num_tasktrackers=5)
cluster.start()
print cluster.namenode_port
print cluster.jobtracker_port
print cluster.config.get("dfs.thrift.address")
cluster.dump_ini(sys.stdout)
from IPython.Shell import IPShellEmbed
IPShellEmbed()()
cluster.stop()
| apache-2.0 |
Sarsate/compute-image-packages | google_compute_engine/clock_skew/tests/clock_skew_daemon_test.py | 1 | 4640 | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for clock_skew_daemon.py module."""
import subprocess
from google_compute_engine.clock_skew import clock_skew_daemon
from google_compute_engine.test_compat import mock
from google_compute_engine.test_compat import unittest
class ClockSkewDaemonTest(unittest.TestCase):
@mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.metadata_watcher')
@mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.logger.Logger')
@mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.file_utils.LockFile')
def testClockSkewDaemon(self, mock_lock, mock_logger, mock_watcher):
mocks = mock.Mock()
mocks.attach_mock(mock_lock, 'lock')
mocks.attach_mock(mock_logger, 'logger')
mocks.attach_mock(mock_watcher, 'watcher')
metadata_key = clock_skew_daemon.ClockSkewDaemon.drift_token
mock_logger.return_value = mock_logger
mock_watcher.MetadataWatcher.return_value = mock_watcher
with mock.patch.object(
clock_skew_daemon.ClockSkewDaemon, 'HandleClockSync') as mock_handle:
clock_skew_daemon.ClockSkewDaemon()
expected_calls = [
mock.call.logger(name=mock.ANY, debug=False, facility=mock.ANY),
mock.call.watcher.MetadataWatcher(logger=mock_logger),
mock.call.lock(clock_skew_daemon.LOCKFILE),
mock.call.lock().__enter__(),
mock.call.logger.info(mock.ANY),
mock.call.watcher.WatchMetadata(
mock_handle, metadata_key=metadata_key, recursive=False),
mock.call.lock().__exit__(None, None, None),
]
self.assertEqual(mocks.mock_calls, expected_calls)
@mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.metadata_watcher')
@mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.logger.Logger')
@mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.file_utils.LockFile')
def testClockSkewDaemonError(self, mock_lock, mock_logger, mock_watcher):
mocks = mock.Mock()
mocks.attach_mock(mock_lock, 'lock')
mocks.attach_mock(mock_logger, 'logger')
mocks.attach_mock(mock_watcher, 'watcher')
mock_lock.side_effect = IOError('Test Error')
mock_logger.return_value = mock_logger
with mock.patch.object(
clock_skew_daemon.ClockSkewDaemon, 'HandleClockSync'):
clock_skew_daemon.ClockSkewDaemon(debug=True)
expected_calls = [
mock.call.logger(name=mock.ANY, debug=True, facility=mock.ANY),
mock.call.watcher.MetadataWatcher(logger=mock_logger),
mock.call.lock(clock_skew_daemon.LOCKFILE),
mock.call.logger.warning('Test Error'),
]
self.assertEqual(mocks.mock_calls, expected_calls)
@mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.subprocess.check_call')
def testHandleClockSync(self, mock_call):
command = ['/sbin/hwclock', '--hctosys']
mock_sync = mock.create_autospec(clock_skew_daemon.ClockSkewDaemon)
mock_logger = mock.Mock()
mock_sync.logger = mock_logger
clock_skew_daemon.ClockSkewDaemon.HandleClockSync(mock_sync, 'Response')
mock_call.assert_called_once_with(command)
expected_calls = [
mock.call.info(mock.ANY, 'Response'),
mock.call.info(mock.ANY),
]
self.assertEqual(mock_logger.mock_calls, expected_calls)
@mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.subprocess.check_call')
def testHandleClockSyncError(self, mock_call):
command = ['/sbin/hwclock', '--hctosys']
mock_sync = mock.create_autospec(clock_skew_daemon.ClockSkewDaemon)
mock_logger = mock.Mock()
mock_sync.logger = mock_logger
mock_call.side_effect = subprocess.CalledProcessError(1, 'Test')
clock_skew_daemon.ClockSkewDaemon.HandleClockSync(mock_sync, 'Response')
mock_call.assert_called_once_with(command)
expected_calls = [
mock.call.info(mock.ANY, 'Response'),
mock.call.warning(mock.ANY),
]
self.assertEqual(mock_logger.mock_calls, expected_calls)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
blindFS/powerline | tests/lib/vterm.py | 23 | 4580 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import ctypes
from powerline.lib.unicode import unicode, unichr, tointiter
class CTypesFunction(object):
def __init__(self, library, name, rettype, args):
self.name = name
self.prototype = ctypes.CFUNCTYPE(rettype, *[
arg[1] for arg in args
])
self.args = args
self.func = self.prototype((name, library), tuple((
(1, arg[0]) for arg in args
)))
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __repr__(self):
return '{cls}(<library>, {name!r}, {rettype!r}, {args!r})'.format(
cls=self.__class__.__name__,
**self.__dict__
)
class CTypesLibraryFuncsCollection(object):
def __init__(self, lib, **kwargs):
self.lib = lib
library_loader = ctypes.LibraryLoader(ctypes.CDLL)
library = library_loader.LoadLibrary(lib)
self.library = library
for name, args in kwargs.items():
self.__dict__[name] = CTypesFunction(library, name, *args)
class VTermPos_s(ctypes.Structure):
_fields_ = (
('row', ctypes.c_int),
('col', ctypes.c_int),
)
class VTermColor_s(ctypes.Structure):
_fields_ = (
('red', ctypes.c_uint8),
('green', ctypes.c_uint8),
('blue', ctypes.c_uint8),
)
class VTermScreenCellAttrs_s(ctypes.Structure):
_fields_ = (
('bold', ctypes.c_uint, 1),
('underline', ctypes.c_uint, 2),
('italic', ctypes.c_uint, 1),
('blink', ctypes.c_uint, 1),
('reverse', ctypes.c_uint, 1),
('strike', ctypes.c_uint, 1),
('font', ctypes.c_uint, 4),
('dwl', ctypes.c_uint, 1),
('dhl', ctypes.c_uint, 2),
)
VTERM_MAX_CHARS_PER_CELL = 6
class VTermScreenCell_s(ctypes.Structure):
_fields_ = (
('chars', ctypes.ARRAY(ctypes.c_uint32, VTERM_MAX_CHARS_PER_CELL)),
('width', ctypes.c_char),
('attrs', VTermScreenCellAttrs_s),
('fg', VTermColor_s),
('bg', VTermColor_s),
)
VTerm_p = ctypes.c_void_p
VTermScreen_p = ctypes.c_void_p
def get_functions(lib):
return CTypesLibraryFuncsCollection(
lib,
vterm_new=(VTerm_p, (
('rows', ctypes.c_int),
('cols', ctypes.c_int)
)),
vterm_obtain_screen=(VTermScreen_p, (('vt', VTerm_p),)),
vterm_set_size=(None, (
('vt', VTerm_p),
('rows', ctypes.c_int),
('cols', ctypes.c_int)
)),
vterm_screen_reset=(None, (
('screen', VTermScreen_p),
('hard', ctypes.c_int)
)),
vterm_input_write=(ctypes.c_size_t, (
('vt', VTerm_p),
('bytes', ctypes.POINTER(ctypes.c_char)),
('size', ctypes.c_size_t),
)),
vterm_screen_get_cell=(ctypes.c_int, (
('screen', VTermScreen_p),
('pos', VTermPos_s),
('cell', ctypes.POINTER(VTermScreenCell_s))
)),
vterm_free=(None, (('vt', VTerm_p),)),
)
class VTermColor(object):
__slots__ = ('red', 'green', 'blue')
def __init__(self, color):
self.red = color.red
self.green = color.green
self.blue = color.blue
@property
def color_key(self):
return (self.red, self.green, self.blue)
class VTermScreenCell(object):
def __init__(self, vtsc):
for field in VTermScreenCellAttrs_s._fields_:
field_name = field[0]
setattr(self, field_name, getattr(vtsc.attrs, field_name))
self.text = ''.join((
unichr(vtsc.chars[i]) for i in range(VTERM_MAX_CHARS_PER_CELL)
)).rstrip('\x00')
self.width = next(tointiter(vtsc.width))
self.fg = VTermColor(vtsc.fg)
self.bg = VTermColor(vtsc.bg)
self.cell_properties_key = (
self.fg.color_key,
self.bg.color_key,
self.bold,
self.underline,
self.italic,
)
class VTermScreen(object):
def __init__(self, functions, screen):
self.functions = functions
self.screen = screen
def __getitem__(self, position):
pos = VTermPos_s(*position)
cell = VTermScreenCell_s()
ret = self.functions.vterm_screen_get_cell(self.screen, pos, cell)
if ret != 1:
raise ValueError('vterm_screen_get_cell returned {0}'.format(ret))
return VTermScreenCell(cell)
def reset(self, hard):
self.functions.vterm_screen_reset(self.screen, int(bool(hard)))
class VTerm(object):
def __init__(self, lib, rows, cols):
self.functions = get_functions(lib)
self.vt = self.functions.vterm_new(rows, cols)
self.vtscreen = VTermScreen(self.functions, self.functions.vterm_obtain_screen(self.vt))
self.vtscreen.reset(True)
def push(self, data):
if isinstance(data, unicode):
data = data.encode('utf-8')
return self.functions.vterm_input_write(self.vt, data, len(data))
def resize(self, rows, cols):
self.functions.vterm_set_size(self.vt, rows, cols)
def __del__(self):
try:
self.functions.vterm_free(self.vt)
except AttributeError:
pass
| mit |
pbrady/sympy | sympy/galgebra/stringarrays.py | 50 | 3306 | # sympy/galgebra/stringarrays.py
"""
stringarrays.py are a group of helper functions to convert string
input to vector and multivector class function to arrays of SymPy
symbols.
"""
import operator
from sympy.core.compatibility import reduce
from itertools import combinations
from sympy import S, Symbol, Function
from sympy.core.compatibility import range
def str_array(base, n=None):
"""
Generate one dimensional (list of strings) or two dimensional (list
of list of strings) string array.
For one dimensional arrays: -
base is string of variable names separated by blanks such as
base = 'a b c' which produces the string list ['a','b','c'] or
it is a string with no blanks than in conjunction with the
integer n generates -
str_array('v',n=-3) = ['v_1','v_2','v_3']
str_array('v',n=3) = ['v__1','v__2','v__3'].
In the case of LaTeX printing the '_' would give a subscript and
the '__' a super script.
For two dimensional arrays: -
base is string where elements are separated by spaces and rows by
commas so that -
str_array('a b,c d') = [['a','b'],['c','d']]
"""
if n is None:
if ',' in base:
base_array = []
base_split = base.split(',')
for base_arg in base_split:
base_array.append(list(filter(lambda x: x != '', base_arg.split(' '))))
return base_array
else:
return base.split(' ')
result = []
if isinstance(n, str):
if n[0] == '-':
for index in n[1:].split(' '):
result.append(base + '_' + index)
if n[0] == '+':
for index in n[1:].split(' '):
result.append(base + '__' + index)
if n > 0:
for i in range(1, n + 1):
result.append(base + '__' + str(i))
if n < 0:
for i in range(1, -n + 1):
result.append(base + '_' + str(i))
return result
def symbol_array(base, n=None):
"""
Generates a string arrary with str_array and replaces each string in
array with Symbol of same name.
"""
symbol_str_lst = str_array(base, n)
result = []
for symbol_str in symbol_str_lst:
result.append(S(symbol_str))
return tuple(result)
def fct_sym_array(str_lst, coords=None):
"""
Construct list of symbols or functions with names in 'str_lst'. If
'coords' are given (tuple of symbols) function list constructed,
otherwise a symbol list is constructed.
"""
if coords is None:
fs_lst = []
for sym_str in str_lst:
fs_lst.append(Symbol(sym_str))
else:
fs_lst = []
for fct_str in str_lst:
fs_lst.append(Function(fct_str)(*coords))
return fs_lst
def str_combinations(base, lst, rank=1, mode='_'):
"""
Construct a list of strings of the form 'base+mode+indexes' where the
indexes are formed by converting 'lst' to a list of strings and then
forming the 'indexes' by concatenating combinations of elements from
'lst' taken 'rank' at a time.
"""
a1 = combinations([str(x) for x in lst], rank)
a2 = [reduce(operator.add, x) for x in a1]
str_lst = [base + mode + x for x in a2]
return str_lst
| bsd-3-clause |
hlieberman/ansible-modules-core | cloud/openstack/os_server_volume.py | 77 | 4651 | #!/usr/bin/python
#coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server_volume
short_description: Attach/Detach Volumes from OpenStack VM's
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Attach or Detach volumes from OpenStack VM's
options:
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
required: false
server:
description:
- Name or ID of server you want to attach a volume to
required: true
volume:
description:
- Name or id of volume you want to attach to a server
required: true
device:
description:
- Device you want to attach. Defaults to auto finding a device name.
required: false
default: None
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Attaches a volume to a compute host
- name: attach a volume
hosts: localhost
tasks:
- name: attach volume to host
os_server_volume:
state: present
cloud: mordred
server: Mysql-server
volume: mysql-data
device: /dev/vdb
'''
def _system_state_change(state, device):
"""Check if system state would change."""
if state == 'present':
if device:
return False
return True
if state == 'absent':
if device:
return True
return False
return False
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
volume=dict(required=True),
device=dict(default=None), # None == auto choose device name
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
wait = module.params['wait']
timeout = module.params['timeout']
try:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
volume = cloud.get_volume(module.params['volume'])
dev = cloud.get_volume_attach_device(volume, server.id)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, dev))
if state == 'present':
if dev:
# Volume is already attached to this server
module.exit_json(changed=False)
cloud.attach_volume(server, volume, module.params['device'],
wait=wait, timeout=timeout)
server = cloud.get_server(module.params['server']) # refresh
volume = cloud.get_volume(module.params['volume']) # refresh
hostvars = meta.get_hostvars_from_server(cloud, server)
module.exit_json(
changed=True,
id=volume['id'],
attachments=volume['attachments'],
openstack=hostvars
)
elif state == 'absent':
if not dev:
# Volume is not attached to this server
module.exit_json(changed=False)
cloud.detach_volume(server, volume, wait=wait, timeout=timeout)
module.exit_json(
changed=True,
result='Detached volume from server'
)
except (shade.OpenStackCloudException, shade.OpenStackCloudTimeout) as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_utils/common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
dan1/horizon-proto | horizon/tabs/base.py | 36 | 17272 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import six
from django.template.loader import render_to_string
from django.template import TemplateSyntaxError # noqa
from django.utils.datastructures import SortedDict
from horizon import exceptions
from horizon.utils import html
SEPARATOR = "__"
CSS_TAB_GROUP_CLASSES = ["nav", "nav-tabs", "ajax-tabs"]
CSS_ACTIVE_TAB_CLASSES = ["active"]
CSS_DISABLED_TAB_CLASSES = ["disabled"]
class TabGroup(html.HTMLElement):
"""A container class which knows how to manage and render
:class:`~horizon.tabs.Tab` objects.
.. attribute:: slug
The URL slug and pseudo-unique identifier for this tab group.
.. attribute:: template_name
The name of the template which will be used to render this tab group.
Default: ``"horizon/common/_tab_group.html"``
.. attribute:: sticky
Boolean to control whether the active tab state should be stored
across requests for a given user. (State storage is all done
client-side.)
.. attribute:: show_single_tab
Boolean to control whether the tab bar is shown when the tab group
has only one tab. Default: ``False``
.. attribute:: param_name
The name of the GET request parameter which will be used when
requesting specific tab data. Default: ``tab``.
.. attribute:: classes
A list of CSS classes which should be displayed on this tab group.
.. attribute:: attrs
A dictionary of HTML attributes which should be rendered into the
markup for this tab group.
.. attribute:: selected
Read-only property which is set to the instance of the
currently-selected tab if there is one, otherwise ``None``.
.. attribute:: active
Read-only property which is set to the value of the current active tab.
This may not be the same as the value of ``selected`` if no
specific tab was requested via the ``GET`` parameter.
"""
slug = None
template_name = "horizon/common/_tab_group.html"
param_name = 'tab'
sticky = False
show_single_tab = False
_selected = None
_active = None
@property
def selected(self):
return self._selected
@property
def active(self):
return self._active
def __init__(self, request, **kwargs):
super(TabGroup, self).__init__()
if not hasattr(self, "tabs"):
raise NotImplementedError('%s must declare a "tabs" attribute.'
% self.__class__)
if not self.slug:
raise NotImplementedError('%s must declare a "slug" attribute.'
% self.__class__)
self.request = request
self.kwargs = kwargs
self._data = None
tab_instances = []
for tab in self.tabs:
tab_instances.append((tab.slug, tab(self, request)))
self._tabs = SortedDict(tab_instances)
if self.sticky:
self.attrs['data-sticky-tabs'] = 'sticky'
if not self._set_active_tab():
self.tabs_not_available()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def load_tab_data(self):
"""Preload all data that for the tabs that will be displayed."""
for tab in self._tabs.values():
if tab.load and not tab.data_loaded:
try:
tab._data = tab.get_context_data(self.request)
except Exception:
tab._data = False
exceptions.handle(self.request)
def get_id(self):
"""Returns the id for this tab group. Defaults to the value of the tab
group's :attr:`horizon.tabs.Tab.slug`.
"""
return self.slug
def get_default_classes(self):
"""Returns a list of the default classes for the tab group. Defaults to
``["nav", "nav-tabs", "ajax-tabs"]``.
"""
default_classes = super(TabGroup, self).get_default_classes()
default_classes.extend(CSS_TAB_GROUP_CLASSES)
return default_classes
def tabs_not_available(self):
"""In the event that no tabs are either allowed or enabled, this method
is the fallback handler. By default it's a no-op, but it exists
to make redirecting or raising exceptions possible for subclasses.
"""
pass
def _set_active_tab(self):
marked_active = None
# See if we have a selected tab via the GET parameter.
tab = self.get_selected_tab()
if tab:
tab._active = True
self._active = tab
marked_active = tab
# Iterate through to mark them all accordingly.
for tab in self._tabs.values():
if tab._allowed and tab._enabled and not marked_active:
tab._active = True
self._active = tab
marked_active = True
elif tab == marked_active:
continue
else:
tab._active = False
return marked_active
def render(self):
"""Renders the HTML output for this tab group."""
return render_to_string(self.template_name, {"tab_group": self})
def get_tabs(self):
"""Returns a list of the allowed tabs for this tab group."""
return filter(lambda tab: tab._allowed, self._tabs.values())
def get_tab(self, tab_name, allow_disabled=False):
"""Returns a specific tab from this tab group.
If the tab is not allowed or not enabled this method returns ``None``.
If the tab is disabled but you wish to return it anyway, you can pass
``True`` to the allow_disabled argument.
"""
tab = self._tabs.get(tab_name, None)
if tab and tab._allowed and (tab._enabled or allow_disabled):
return tab
return None
def get_loaded_tabs(self):
return filter(lambda t: self.get_tab(t.slug), self._tabs.values())
def get_selected_tab(self):
"""Returns the tab specific by the GET request parameter.
In the event that there is no GET request parameter, the value
of the query parameter is invalid, or the tab is not allowed/enabled,
the return value of this function is None.
"""
selected = self.request.GET.get(self.param_name, None)
if selected:
try:
tab_group, tab_name = selected.split(SEPARATOR)
except ValueError:
return None
if tab_group == self.get_id():
self._selected = self.get_tab(tab_name)
return self._selected
class Tab(html.HTMLElement):
"""A reusable interface for constructing a tab within a
:class:`~horizon.tabs.TabGroup`.
.. attribute:: name
The display name for the tab which will be rendered as the text for
the tab element in the HTML. Required.
.. attribute:: slug
The URL slug and id attribute for the tab. This should be unique for
a given tab group. Required.
.. attribute:: preload
Determines whether the contents of the tab should be rendered into
the page's HTML when the tab group is rendered, or whether it should
be loaded dynamically when the tab is selected. Default: ``True``.
.. attribute:: classes
A list of CSS classes which should be displayed on this tab.
.. attribute:: attrs
A dictionary of HTML attributes which should be rendered into the
markup for this tab.
.. attribute:: load
Read-only access to determine whether or not this tab's data should
be loaded immediately.
.. attribute:: permissions
A list of permission names which this tab requires in order to be
displayed. Defaults to an empty list (``[]``).
"""
name = None
slug = None
preload = True
_active = None
permissions = []
def __init__(self, tab_group, request=None):
super(Tab, self).__init__()
# Priority: constructor, class-defined, fallback
if not self.name:
raise ValueError("%s must have a name." % self.__class__.__name__)
self.name = unicode(self.name) # Force unicode.
if not self.slug:
raise ValueError("%s must have a slug." % self.__class__.__name__)
self.tab_group = tab_group
self.request = request
if request:
self._allowed = self.allowed(request) and (
self._has_permissions(request))
self._enabled = self.enabled(request)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def _has_permissions(self, request):
return request.user.has_perms(self.permissions)
def is_active(self):
"""Method to access whether or not this tab is the active tab."""
if self._active is None:
self.tab_group._set_active_tab()
return self._active
@property
def load(self):
load_preloaded = self.preload or self.is_active()
return load_preloaded and self._allowed and self._enabled
@property
def data(self):
if getattr(self, "_data", None) is None:
self._data = self.get_context_data(self.request)
return self._data
@property
def data_loaded(self):
return getattr(self, "_data", None) is not None
def render(self):
"""Renders the tab to HTML using the
:meth:`~horizon.tabs.Tab.get_context_data` method and
the :meth:`~horizon.tabs.Tab.get_template_name` method.
If :attr:`~horizon.tabs.Tab.preload` is ``False`` and ``force_load``
is not ``True``, or
either :meth:`~horizon.tabs.Tab.allowed` or
:meth:`~horizon.tabs.Tab.enabled` returns ``False`` this method will
return an empty string.
"""
if not self.load:
return ''
try:
context = self.data
except exceptions.Http302:
raise
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
raise six.reraise(TemplateSyntaxError, exc_value, exc_traceback)
return render_to_string(self.get_template_name(self.request), context)
def get_id(self):
"""Returns the id for this tab. Defaults to
``"{{ tab_group.slug }}__{{ tab.slug }}"``.
"""
return SEPARATOR.join([self.tab_group.slug, self.slug])
def get_query_string(self):
return "=".join((self.tab_group.param_name, self.get_id()))
def get_default_classes(self):
"""Returns a list of the default classes for the tab. Defaults to
and empty list (``[]``), however additional classes may be added
depending on the state of the tab as follows:
If the tab is the active tab for the tab group, in which
the class ``"active"`` will be added.
If the tab is not enabled, the classes the class ``"disabled"``
will be added.
"""
default_classes = super(Tab, self).get_default_classes()
if self.is_active():
default_classes.extend(CSS_ACTIVE_TAB_CLASSES)
if not self._enabled:
default_classes.extend(CSS_DISABLED_TAB_CLASSES)
return default_classes
def get_template_name(self, request):
"""Returns the name of the template to be used for rendering this tab.
By default it returns the value of the ``template_name`` attribute
on the ``Tab`` class.
"""
if not hasattr(self, "template_name"):
raise AttributeError("%s must have a template_name attribute or "
"override the get_template_name method."
% self.__class__.__name__)
return self.template_name
def get_context_data(self, request, **kwargs):
"""This method should return a dictionary of context data used to
render the tab. Required.
"""
return kwargs
def enabled(self, request):
"""Determines whether or not the tab should be accessible
(e.g. be rendered into the HTML on load and respond to a click event).
If a tab returns ``False`` from ``enabled`` it will ignore the value
of ``preload`` and only render the HTML of the tab after being clicked.
The default behavior is to return ``True`` for all cases.
"""
return True
def allowed(self, request):
"""Determines whether or not the tab is displayed.
Tab instances can override this method to specify conditions under
which this tab should not be shown at all by returning ``False``.
The default behavior is to return ``True`` for all cases.
"""
return True
def post(self, request, *args, **kwargs):
"""Handles POST data sent to a tab.
Tab instances can override this method to have tab-specific POST logic
without polluting the TabView code.
The default behavior is to ignore POST data.
"""
pass
class TableTab(Tab):
"""A :class:`~horizon.tabs.Tab` class which knows how to deal with
:class:`~horizon.tables.DataTable` classes rendered inside of it.
This distinct class is required due to the complexity involved in handling
both dynamic tab loading, dynamic table updating and table actions all
within one view.
.. attribute:: table_classes
An iterable containing the :class:`~horizon.tables.DataTable` classes
which this tab will contain. Equivalent to the
:attr:`~horizon.tables.MultiTableView.table_classes` attribute on
:class:`~horizon.tables.MultiTableView`. For each table class you
need to define a corresponding ``get_{{ table_name }}_data`` method
as with :class:`~horizon.tables.MultiTableView`.
"""
table_classes = None
def __init__(self, tab_group, request):
super(TableTab, self).__init__(tab_group, request)
if not self.table_classes:
class_name = self.__class__.__name__
raise NotImplementedError("You must define a table_class "
"attribute on %s" % class_name)
# Instantiate our table classes but don't assign data yet
table_instances = [(table._meta.name,
table(request, **tab_group.kwargs))
for table in self.table_classes]
self._tables = SortedDict(table_instances)
self._table_data_loaded = False
def load_table_data(self):
"""Calls the ``get_{{ table_name }}_data`` methods for each table class
and sets the data on the tables.
"""
# We only want the data to be loaded once, so we track if we have...
if not self._table_data_loaded:
for table_name, table in self._tables.items():
# Fetch the data function.
func_name = "get_%s_data" % table_name
data_func = getattr(self, func_name, None)
if data_func is None:
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"on %s." % (func_name, cls_name))
# Load the data.
table.data = data_func()
table._meta.has_prev_data = self.has_prev_data(table)
table._meta.has_more_data = self.has_more_data(table)
# Mark our data as loaded so we don't run the loaders again.
self._table_data_loaded = True
def get_context_data(self, request, **kwargs):
"""Adds a ``{{ table_name }}_table`` item to the context for each table
in the :attr:`~horizon.tabs.TableTab.table_classes` attribute.
If only one table class is provided, a shortcut ``table`` context
variable is also added containing the single table.
"""
context = super(TableTab, self).get_context_data(request, **kwargs)
# If the data hasn't been manually loaded before now,
# make certain it's loaded before setting the context.
self.load_table_data()
for table_name, table in self._tables.items():
# If there's only one table class, add a shortcut name as well.
if len(self.table_classes) == 1:
context["table"] = table
context["%s_table" % table_name] = table
return context
def has_prev_data(self, table):
return False
def has_more_data(self, table):
return False
| apache-2.0 |
Azulinho/ansible | lib/ansible/modules/clustering/znode.py | 46 | 7699 | #!/usr/bin/python
# Copyright 2015 WP Engine, Inc. All rights reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: znode
version_added: "2.0"
short_description: Create, delete, retrieve, and update znodes using ZooKeeper
description:
- Create, delete, retrieve, and update znodes using ZooKeeper.
options:
hosts:
description:
- A list of ZooKeeper servers (format '[server]:[port]').
required: true
name:
description:
- The path of the znode.
required: true
value:
description:
- The value assigned to the znode.
default: None
required: false
op:
description:
- An operation to perform. Mutually exclusive with state.
default: None
required: false
state:
description:
- The state to enforce. Mutually exclusive with op.
default: None
required: false
timeout:
description:
- The amount of time to wait for a node to appear.
default: 300
required: false
recursive:
description:
- Recursively delete node and all its children.
default: False
required: false
version_added: "2.1"
requirements:
- kazoo >= 2.1
- python >= 2.6
author: "Trey Perry (@treyperry)"
"""
EXAMPLES = """
# Creating or updating a znode with a given value
- znode:
hosts: 'localhost:2181'
name: /mypath
value: myvalue
state: present
# Getting the value and stat structure for a znode
- znode:
hosts: 'localhost:2181'
name: /mypath
op: get
# Listing a particular znode's children
- znode:
hosts: 'localhost:2181'
name: /zookeeper
op: list
# Waiting 20 seconds for a znode to appear at path /mypath
- znode:
hosts: 'localhost:2181'
name: /mypath
op: wait
timeout: 20
# Deleting a znode at path /mypath
- znode:
hosts: 'localhost:2181'
name: /mypath
state: absent
# Creating or updating a znode with a given value on a remote Zookeeper
- znode:
hosts: 'my-zookeeper-node:2181'
name: /mypath
value: myvalue
state: present
delegate_to: 127.0.0.1
"""
import time
try:
from kazoo.client import KazooClient
from kazoo.handlers.threading import KazooTimeoutError
KAZOO_INSTALLED = True
except ImportError:
KAZOO_INSTALLED = False
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
hosts=dict(required=True, type='str'),
name=dict(required=True, type='str'),
value=dict(required=False, default=None, type='str'),
op=dict(required=False, default=None, choices=['get', 'wait', 'list']),
state=dict(choices=['present', 'absent']),
timeout=dict(required=False, default=300, type='int'),
recursive=dict(required=False, default=False, type='bool')
),
supports_check_mode=False
)
if not KAZOO_INSTALLED:
module.fail_json(msg='kazoo >= 2.1 is required to use this module. Use pip to install it.')
check = check_params(module.params)
if not check['success']:
module.fail_json(msg=check['msg'])
zoo = KazooCommandProxy(module)
try:
zoo.start()
except KazooTimeoutError:
module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.')
command_dict = {
'op': {
'get': zoo.get,
'list': zoo.list,
'wait': zoo.wait
},
'state': {
'present': zoo.present,
'absent': zoo.absent
}
}
command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state'
method = module.params[command_type]
result, result_dict = command_dict[command_type][method]()
zoo.shutdown()
if result:
module.exit_json(**result_dict)
else:
module.fail_json(**result_dict)
def check_params(params):
if not params['state'] and not params['op']:
return {'success': False, 'msg': 'Please define an operation (op) or a state.'}
if params['state'] and params['op']:
return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'}
return {'success': True}
class KazooCommandProxy():
def __init__(self, module):
self.module = module
self.zk = KazooClient(module.params['hosts'])
def absent(self):
return self._absent(self.module.params['name'])
def exists(self, znode):
return self.zk.exists(znode)
def list(self):
children = self.zk.get_children(self.module.params['name'])
return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.',
'znode': self.module.params['name']}
def present(self):
return self._present(self.module.params['name'], self.module.params['value'])
def get(self):
return self._get(self.module.params['name'])
def shutdown(self):
self.zk.stop()
self.zk.close()
def start(self):
self.zk.start()
def wait(self):
return self._wait(self.module.params['name'], self.module.params['timeout'])
def _absent(self, znode):
if self.exists(znode):
self.zk.delete(znode, recursive=self.module.params['recursive'])
return True, {'changed': True, 'msg': 'The znode was deleted.'}
else:
return True, {'changed': False, 'msg': 'The znode does not exist.'}
def _get(self, path):
if self.exists(path):
value, zstat = self.zk.get(path)
stat_dict = {}
for i in dir(zstat):
if not i.startswith('_'):
attr = getattr(zstat, i)
if isinstance(attr, (int, str)):
stat_dict[i] = attr
result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value,
'stat': stat_dict}
else:
result = False, {'msg': 'The requested node does not exist.'}
return result
def _present(self, path, value):
if self.exists(path):
(current_value, zstat) = self.zk.get(path)
if value != current_value:
self.zk.set(path, value)
return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path,
'value': value}
else:
return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value}
else:
self.zk.create(path, value, makepath=True)
return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value}
def _wait(self, path, timeout, interval=5):
lim = time.time() + timeout
while time.time() < lim:
if self.exists(path):
return True, {'msg': 'The node appeared before the configured timeout.',
'znode': path, 'timeout': timeout}
else:
time.sleep(interval)
return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout,
'znode': path}
if __name__ == '__main__':
main()
| gpl-3.0 |
teamfx/openjfx-10-dev-rt | modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/tool/bot/irc_command.py | 2 | 12985 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools
import random
import re
from webkitpy.common.config import irc as config_irc
from webkitpy.common.config import urls
from webkitpy.common.config.committers import CommitterList
from webkitpy.common.net.web import Web
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.bot.queueengine import TerminateQueue
from webkitpy.tool.grammar import join_with_separators
from webkitpy.tool.grammar import pluralize
def _post_error_and_check_for_bug_url(tool, nicks_string, exception):
tool.irc().post("%s" % exception)
bug_id = urls.parse_bug_id(exception.output)
if bug_id:
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
tool.irc().post("%s: Ugg... Might have created %s" % (nicks_string, bug_url))
# FIXME: Merge with Command?
class IRCCommand(object):
usage_string = None
help_string = None
def execute(self, nick, args, tool, sheriff):
raise NotImplementedError("subclasses must implement")
@classmethod
def usage(cls, nick):
return "%s: Usage: %s" % (nick, cls.usage_string)
@classmethod
def help(cls, nick):
return "%s: %s" % (nick, cls.help_string)
class CreateBug(IRCCommand):
usage_string = "create-bug BUG_TITLE"
help_string = "Creates a Bugzilla bug with the given title."
def execute(self, nick, args, tool, sheriff):
if not args:
return self.usage(nick)
bug_title = " ".join(args)
bug_description = "%s\nRequested by %s on %s." % (bug_title, nick, config_irc.channel)
# There happens to be a committers list hung off of Bugzilla, so
# re-using that one makes things easiest for now.
requester = tool.bugs.committers.contributor_by_irc_nickname(nick)
requester_email = requester.bugzilla_email() if requester else None
try:
bug_id = tool.bugs.create_bug(bug_title, bug_description, cc=requester_email, assignee=requester_email)
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
return "%s: Created bug: %s" % (nick, bug_url)
except Exception, e:
return "%s: Failed to create bug:\n%s" % (nick, e)
class Help(IRCCommand):
usage_string = "help [COMMAND]"
help_string = "Provides help on my individual commands."
def execute(self, nick, args, tool, sheriff):
if args:
for command_name in args:
if command_name in commands:
self._post_command_help(nick, tool, commands[command_name])
else:
tool.irc().post("%s: Available commands: %s" % (nick, ", ".join(sorted(visible_commands.keys()))))
tool.irc().post('%s: Type "%s: help COMMAND" for help on my individual commands.' % (nick, sheriff.name()))
def _post_command_help(self, nick, tool, command):
tool.irc().post(command.usage(nick))
tool.irc().post(command.help(nick))
aliases = " ".join(sorted(filter(lambda alias: commands[alias] == command and alias not in visible_commands, commands)))
if aliases:
tool.irc().post("%s: Aliases: %s" % (nick, aliases))
class Hi(IRCCommand):
usage_string = "hi"
help_string = "Responds with hi."
def execute(self, nick, args, tool, sheriff):
if len(args) and re.match(sheriff.name() + r'_*\s*!\s*', ' '.join(args)):
return "%s: hi %s!" % (nick, nick)
if sheriff.name() == 'WKR': # For some unknown reason, WKR can't use tool.bugs.quips().
return "You're doing it wrong"
quips = tool.bugs.quips()
quips.append('"Only you can prevent forest fires." -- Smokey the Bear')
return random.choice(quips)
class PingPong(IRCCommand):
usage_string = "ping"
help_string = "Responds with pong."
def execute(self, nick, args, tool, sheriff):
return nick + ": pong"
class YouThere(IRCCommand):
usage_string = "yt?"
help_string = "Responds with yes."
def execute(self, nick, args, tool, sheriff):
return "%s: yes" % nick
class Restart(IRCCommand):
usage_string = "restart"
help_string = "Restarts sherrifbot. Will update its WebKit checkout, and re-join the channel momentarily."
def execute(self, nick, args, tool, sheriff):
tool.irc().post("Restarting...")
raise TerminateQueue()
class Rollout(IRCCommand):
usage_string = "rollout SVN_REVISION [SVN_REVISIONS] REASON"
help_string = "Opens a rollout bug, CCing author + reviewer, and attaching the reverse-diff of the given revisions marked as commit-queue=?."
def _extract_revisions(self, arg):
revision_list = []
possible_revisions = arg.split(",")
for revision in possible_revisions:
revision = revision.strip()
if not revision:
continue
revision = revision.lstrip("r")
# If one part of the arg isn't in the correct format,
# then none of the arg should be considered a revision.
if not revision.isdigit():
return None
revision_list.append(int(revision))
return revision_list
def _parse_args(self, args):
if not args:
return (None, None)
svn_revision_list = []
remaining_args = args[:]
# First process all revisions.
while remaining_args:
new_revisions = self._extract_revisions(remaining_args[0])
if not new_revisions:
break
svn_revision_list += new_revisions
remaining_args = remaining_args[1:]
# Was there a revision number?
if not len(svn_revision_list):
return (None, None)
# Everything left is the reason.
rollout_reason = " ".join(remaining_args)
return svn_revision_list, rollout_reason
def _responsible_nicknames_from_revisions(self, tool, sheriff, svn_revision_list):
commit_infos = map(tool.checkout().commit_info_for_revision, svn_revision_list)
nickname_lists = map(sheriff.responsible_nicknames_from_commit_info, commit_infos)
return sorted(set(itertools.chain(*nickname_lists)))
def _nicks_string(self, tool, sheriff, requester_nick, svn_revision_list):
# FIXME: _parse_args guarentees that our svn_revision_list is all numbers.
# However, it's possible our checkout will not include one of the revisions,
# so we may need to catch exceptions from commit_info_for_revision here.
target_nicks = [requester_nick] + self._responsible_nicknames_from_revisions(tool, sheriff, svn_revision_list)
return ", ".join(target_nicks)
def _update_working_copy(self, tool):
tool.scm().discard_local_changes()
tool.executive.run_and_throw_if_fail(tool.deprecated_port().update_webkit_command(), quiet=True, cwd=tool.scm().checkout_root)
def _check_diff_failure(self, error_log, tool):
if not error_log:
return None
revert_failure_message_start = error_log.find("Failed to apply reverse diff for revision")
if revert_failure_message_start == -1:
return None
lines = error_log[revert_failure_message_start:].split('\n')[1:]
files = list(itertools.takewhile(lambda line: tool.filesystem.exists(tool.scm().absolute_path(line)), lines))
if files:
return "Failed to apply reverse diff for %s: %s" % (pluralize(len(files), "file", showCount=False), ", ".join(files))
return None
def execute(self, nick, args, tool, sheriff):
svn_revision_list, rollout_reason = self._parse_args(args)
if (not svn_revision_list or not rollout_reason):
return self.usage(nick)
revision_urls_string = join_with_separators([urls.view_revision_url(revision) for revision in svn_revision_list])
tool.irc().post("%s: Preparing rollout for %s ..." % (nick, revision_urls_string))
self._update_working_copy(tool)
# FIXME: IRCCommand should bind to a tool and have a self._tool like Command objects do.
# Likewise we should probably have a self._sheriff.
nicks_string = self._nicks_string(tool, sheriff, nick, svn_revision_list)
try:
complete_reason = "%s (Requested by %s on %s)." % (
rollout_reason, nick, config_irc.channel)
bug_id = sheriff.post_rollout_patch(svn_revision_list, complete_reason)
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
tool.irc().post("%s: Created rollout: %s" % (nicks_string, bug_url))
except ScriptError, e:
tool.irc().post("%s: Failed to create rollout patch:" % nicks_string)
diff_failure = self._check_diff_failure(e.output, tool)
if diff_failure:
return "%s: %s" % (nicks_string, diff_failure)
_post_error_and_check_for_bug_url(tool, nicks_string, e)
class Whois(IRCCommand):
usage_string = "whois SEARCH_STRING"
help_string = "Searches known contributors and returns any matches with irc, email and full name. Wild card * permitted."
def _full_record_and_nick(self, contributor):
result = ''
if contributor.irc_nicknames:
result += ' (:%s)' % ', :'.join(contributor.irc_nicknames)
if contributor.can_review:
result += ' (r)'
elif contributor.can_commit:
result += ' (c)'
return unicode(contributor) + result
def execute(self, nick, args, tool, sheriff):
if not args:
return self.usage(nick)
search_string = unicode(" ".join(args))
# FIXME: We should get the ContributorList off the tool somewhere.
contributors = CommitterList().contributors_by_search_string(search_string)
if not contributors:
return unicode("%s: Sorry, I don't know any contributors matching '%s'.") % (nick, search_string)
if len(contributors) > 5:
return unicode("%s: More than 5 contributors match '%s', could you be more specific?") % (nick, search_string)
if len(contributors) == 1:
contributor = contributors[0]
if not contributor.irc_nicknames:
return unicode("%s: %s hasn't told me their nick. Boo hoo :-(") % (nick, contributor)
return unicode("%s: %s is %s. Why do you ask?") % (nick, search_string, self._full_record_and_nick(contributor))
contributor_nicks = map(self._full_record_and_nick, contributors)
contributors_string = join_with_separators(contributor_nicks, only_two_separator=" or ", last_separator=', or ')
return unicode("%s: I'm not sure who you mean? %s could be '%s'.") % (nick, contributors_string, search_string)
# FIXME: Lame. We should have an auto-registering CommandCenter.
visible_commands = {
"create-bug": CreateBug,
"help": Help,
"hi": Hi,
"ping": PingPong,
"restart": Restart,
"rollout": Rollout,
"whois": Whois,
"yt?": YouThere,
}
# Add revert as an "easter egg" command. Why?
# revert is the same as rollout and it would be confusing to list both when
# they do the same thing. However, this command is a very natural thing for
# people to use and it seems silly to have them hunt around for "rollout" instead.
commands = visible_commands.copy()
commands["revert"] = Rollout
# "hello" Alias for "hi" command for the purposes of testing aliases
commands["hello"] = Hi
| gpl-2.0 |
gameduell/duell | bin/win/python2.7.9/Lib/nturl2path.py | 228 | 2371 | """Convert a NT pathname to a file URL and vice versa."""
def url2pathname(url):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
# e.g.
# ///C|/foo/bar/spam.foo
# becomes
# C:\foo\bar\spam.foo
import string, urllib
# Windows itself uses ":" even in URLs.
url = url.replace(':', '|')
if not '|' in url:
# No drive specifier, just convert slashes
if url[:4] == '////':
# path is something like ////host/path/on/remote/host
# convert this to \\host\path\on\remote\host
# (notice halving of slashes at the start of the path)
url = url[2:]
components = url.split('/')
# make sure not to convert quoted slashes :-)
return urllib.unquote('\\'.join(components))
comp = url.split('|')
if len(comp) != 2 or comp[0][-1] not in string.ascii_letters:
error = 'Bad URL: ' + url
raise IOError, error
drive = comp[0][-1].upper()
path = drive + ':'
components = comp[1].split('/')
for comp in components:
if comp:
path = path + '\\' + urllib.unquote(comp)
# Issue #11474: url like '/C|/' should convert into 'C:\\'
if path.endswith(':') and url.endswith('/'):
path += '\\'
return path
def pathname2url(p):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
# e.g.
# C:\foo\bar\spam.foo
# becomes
# ///C|/foo/bar/spam.foo
import urllib
if not ':' in p:
# No drive specifier, just convert slashes and quote the name
if p[:2] == '\\\\':
# path is something like \\host\path\on\remote\host
# convert this to ////host/path/on/remote/host
# (notice doubling of slashes at the start of the path)
p = '\\\\' + p
components = p.split('\\')
return urllib.quote('/'.join(components))
comp = p.split(':')
if len(comp) != 2 or len(comp[0]) > 1:
error = 'Bad path: ' + p
raise IOError, error
drive = urllib.quote(comp[0].upper())
components = comp[1].split('\\')
path = '///' + drive + ':'
for comp in components:
if comp:
path = path + '/' + urllib.quote(comp)
return path
| bsd-2-clause |
Umang88/Radon-Kenzo | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
astaninger/speakout | venv/lib/python3.6/site-packages/setuptools/wheel.py | 19 | 8102 | """Wheels support."""
from distutils.util import get_platform
import email
import itertools
import os
import posixpath
import re
import zipfile
from pkg_resources import Distribution, PathMetadata, parse_version
from setuptools.extern.packaging.utils import canonicalize_name
from setuptools.extern.six import PY3
from setuptools import Distribution as SetuptoolsDistribution
from setuptools import pep425tags
from setuptools.command.egg_info import write_requirements
__metaclass__ = type
WHEEL_NAME = re.compile(
r"""^(?P<project_name>.+?)-(?P<version>\d.*?)
((-(?P<build>\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)
)\.whl$""",
re.VERBOSE).match
NAMESPACE_PACKAGE_INIT = '''\
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
'''
def unpack(src_dir, dst_dir):
'''Move everything under `src_dir` to `dst_dir`, and delete the former.'''
for dirpath, dirnames, filenames in os.walk(src_dir):
subdir = os.path.relpath(dirpath, src_dir)
for f in filenames:
src = os.path.join(dirpath, f)
dst = os.path.join(dst_dir, subdir, f)
os.renames(src, dst)
for n, d in reversed(list(enumerate(dirnames))):
src = os.path.join(dirpath, d)
dst = os.path.join(dst_dir, subdir, d)
if not os.path.exists(dst):
# Directory does not exist in destination,
# rename it and prune it from os.walk list.
os.renames(src, dst)
del dirnames[n]
# Cleanup.
for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):
assert not filenames
os.rmdir(dirpath)
class Wheel:
def __init__(self, filename):
match = WHEEL_NAME(os.path.basename(filename))
if match is None:
raise ValueError('invalid wheel name: %r' % filename)
self.filename = filename
for k, v in match.groupdict().items():
setattr(self, k, v)
def tags(self):
'''List tags (py_version, abi, platform) supported by this wheel.'''
return itertools.product(
self.py_version.split('.'),
self.abi.split('.'),
self.platform.split('.'),
)
def is_compatible(self):
'''Is the wheel is compatible with the current platform?'''
supported_tags = pep425tags.get_supported()
return next((True for t in self.tags() if t in supported_tags), False)
def egg_name(self):
return Distribution(
project_name=self.project_name, version=self.version,
platform=(None if self.platform == 'any' else get_platform()),
).egg_name() + '.egg'
def get_dist_info(self, zf):
# find the correct name of the .dist-info dir in the wheel file
for member in zf.namelist():
dirname = posixpath.dirname(member)
if (dirname.endswith('.dist-info') and
canonicalize_name(dirname).startswith(
canonicalize_name(self.project_name))):
return dirname
raise ValueError("unsupported wheel format. .dist-info not found")
def install_as_egg(self, destination_eggdir):
'''Install wheel as an egg directory.'''
with zipfile.ZipFile(self.filename) as zf:
self._install_as_egg(destination_eggdir, zf)
def _install_as_egg(self, destination_eggdir, zf):
dist_basename = '%s-%s' % (self.project_name, self.version)
dist_info = self.get_dist_info(zf)
dist_data = '%s.data' % dist_basename
egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
self._convert_metadata(zf, destination_eggdir, dist_info, egg_info)
self._move_data_entries(destination_eggdir, dist_data)
self._fix_namespace_packages(egg_info, destination_eggdir)
@staticmethod
def _convert_metadata(zf, destination_eggdir, dist_info, egg_info):
def get_metadata(name):
with zf.open(posixpath.join(dist_info, name)) as fp:
value = fp.read().decode('utf-8') if PY3 else fp.read()
return email.parser.Parser().parsestr(value)
wheel_metadata = get_metadata('WHEEL')
# Check wheel format version is supported.
wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
wheel_v1 = (
parse_version('1.0') <= wheel_version < parse_version('2.0dev0')
)
if not wheel_v1:
raise ValueError(
'unsupported wheel format version: %s' % wheel_version)
# Extract to target directory.
os.mkdir(destination_eggdir)
zf.extractall(destination_eggdir)
# Convert metadata.
dist_info = os.path.join(destination_eggdir, dist_info)
dist = Distribution.from_location(
destination_eggdir, dist_info,
metadata=PathMetadata(destination_eggdir, dist_info),
)
# Note: Evaluate and strip markers now,
# as it's difficult to convert back from the syntax:
# foobar; "linux" in sys_platform and extra == 'test'
def raw_req(req):
req.marker = None
return str(req)
install_requires = list(sorted(map(raw_req, dist.requires())))
extras_require = {
extra: sorted(
req
for req in map(raw_req, dist.requires((extra,)))
if req not in install_requires
)
for extra in dist.extras
}
os.rename(dist_info, egg_info)
os.rename(
os.path.join(egg_info, 'METADATA'),
os.path.join(egg_info, 'PKG-INFO'),
)
setup_dist = SetuptoolsDistribution(
attrs=dict(
install_requires=install_requires,
extras_require=extras_require,
),
)
write_requirements(
setup_dist.get_command_obj('egg_info'),
None,
os.path.join(egg_info, 'requires.txt'),
)
@staticmethod
def _move_data_entries(destination_eggdir, dist_data):
"""Move data entries to their correct location."""
dist_data = os.path.join(destination_eggdir, dist_data)
dist_data_scripts = os.path.join(dist_data, 'scripts')
if os.path.exists(dist_data_scripts):
egg_info_scripts = os.path.join(
destination_eggdir, 'EGG-INFO', 'scripts')
os.mkdir(egg_info_scripts)
for entry in os.listdir(dist_data_scripts):
# Remove bytecode, as it's not properly handled
# during easy_install scripts install phase.
if entry.endswith('.pyc'):
os.unlink(os.path.join(dist_data_scripts, entry))
else:
os.rename(
os.path.join(dist_data_scripts, entry),
os.path.join(egg_info_scripts, entry),
)
os.rmdir(dist_data_scripts)
for subdir in filter(os.path.exists, (
os.path.join(dist_data, d)
for d in ('data', 'headers', 'purelib', 'platlib')
)):
unpack(subdir, destination_eggdir)
if os.path.exists(dist_data):
os.rmdir(dist_data)
@staticmethod
def _fix_namespace_packages(egg_info, destination_eggdir):
namespace_packages = os.path.join(
egg_info, 'namespace_packages.txt')
if os.path.exists(namespace_packages):
with open(namespace_packages) as fp:
namespace_packages = fp.read().split()
for mod in namespace_packages:
mod_dir = os.path.join(destination_eggdir, *mod.split('.'))
mod_init = os.path.join(mod_dir, '__init__.py')
if os.path.exists(mod_dir) and not os.path.exists(mod_init):
with open(mod_init, 'w') as fp:
fp.write(NAMESPACE_PACKAGE_INIT)
| mit |
GoogleCloudDataproc/cloud-dataproc | codelabs/spark-nlp/topic_model.py | 1 | 8715 | # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code accompanies this codelab: https://codelabs.developers.google.com/codelabs/spark-nlp.
# In this example, we will build a topic model using spark-nlp and Spark ML.
# In order for this code to work properly, a bucket name must be provided.
# Python imports
import sys
# spark-nlp components. Each one is incorporated into our pipeline.
from sparknlp.annotator import Lemmatizer, Stemmer, Tokenizer, Normalizer
from sparknlp.base import DocumentAssembler, Finisher
# A Spark Session is how we interact with Spark SQL to create Dataframes
from pyspark.sql import SparkSession
# These allow us to create a schema for our data
from pyspark.sql.types import StructField, StructType, StringType, LongType
# Spark Pipelines allow us to sequentially add components such as transformers
from pyspark.ml import Pipeline
# These are components we will incorporate into our pipeline.
from pyspark.ml.feature import StopWordsRemover, CountVectorizer, IDF
# LDA is our model of choice for topic modeling
from pyspark.ml.clustering import LDA
# Some transformers require the usage of other Spark ML functions. We import them here
from pyspark.sql.functions import col, lit, concat, regexp_replace
# This will help catch some PySpark errors
from pyspark.sql.utils import AnalysisException
# Assign bucket where the data lives
try:
bucket = sys.argv[1]
except IndexError:
print("Please provide a bucket name")
sys.exit(1)
# Create a SparkSession under the name "reddit". Viewable via the Spark UI
spark = SparkSession.builder.appName("reddit topic model").getOrCreate()
# Create a three column schema consisting of two strings and a long integer
fields = [StructField("title", StringType(), True),
StructField("body", StringType(), True),
StructField("created_at", LongType(), True)]
schema = StructType(fields)
# We'll attempt to process every year / month combination below.
years = ['2016', '2017', '2018', '2019']
months = ['01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12']
# This is the subreddit we're working with.
subreddit = "food"
# Create a base dataframe.
reddit_data = spark.createDataFrame([], schema)
# Keep a running list of all files that will be processed
files_read = []
for year in years:
for month in months:
# In the form of <project-id>.<dataset>.<table>
gs_uri = f"gs://{bucket}/reddit_posts/{year}/{month}/{subreddit}.csv.gz"
# If the table doesn't exist we will simply continue and not
# log it into our "tables_read" list
try:
reddit_data = (
spark.read.format('csv')
.options(codec="org.apache.hadoop.io.compress.GzipCodec")
.load(gs_uri, schema=schema)
.union(reddit_data)
)
files_read.append(gs_uri)
except AnalysisException:
continue
if len(files_read) == 0:
print('No files read')
sys.exit(1)
# Replacing null values with their respective typed-equivalent is usually
# easier to work with. In this case, we'll replace nulls with empty strings.
# Since some of our data doesn't have a body, we can combine all of the text
# for the titles and bodies so that every row has useful data.
df_train = (
reddit_data
# Replace null values with an empty string
.fillna("")
.select(
# Combine columns
concat(
# First column to concatenate. col() is used to specify that we're referencing a column
col("title"),
# Literal character that will be between the concatenated columns.
lit(" "),
# Second column to concatenate.
col("body")
# Change the name of the new column
).alias("text")
)
# The text has several tags including [REMOVED] or [DELETED] for redacted content.
# We'll replace these with empty strings.
.select(
regexp_replace(col("text"), "\[.*?\]", "")
.alias("text")
)
)
# Now, we begin assembling our pipeline. Each component here is used to some transformation to the data.
# The Document Assembler takes the raw text data and convert it into a format that can
# be tokenized. It becomes one of spark-nlp native object types, the "Document".
document_assembler = DocumentAssembler().setInputCol("text").setOutputCol("document")
# The Tokenizer takes data that is of the "Document" type and tokenizes it.
# While slightly more involved than this, this is effectively taking a string and splitting
# it along ths spaces, so each word is its own string. The data then becomes the
# spark-nlp native type "Token".
tokenizer = Tokenizer().setInputCols(["document"]).setOutputCol("token")
# The Normalizer will group words together based on similar semantic meaning.
normalizer = Normalizer().setInputCols(["token"]).setOutputCol("normalizer")
# The Stemmer takes objects of class "Token" and converts the words into their
# root meaning. For instance, the words "cars", "cars'" and "car's" would all be replaced
# with the word "car".
stemmer = Stemmer().setInputCols(["normalizer"]).setOutputCol("stem")
# The Finisher signals to spark-nlp allows us to access the data outside of spark-nlp
# components. For instance, we can now feed the data into components from Spark MLlib.
finisher = Finisher().setInputCols(["stem"]).setOutputCols(["to_spark"]).setValueSplitSymbol(" ")
# Stopwords are common words that generally don't add much detail to the meaning
# of a body of text. In English, these are mostly "articles" such as the words "the"
# and "of".
stopword_remover = StopWordsRemover(inputCol="to_spark", outputCol="filtered")
# Here we implement TF-IDF as an input to our LDA model. CountVectorizer (TF) keeps track
# of the vocabulary that's being created so we can map our topics back to their
# corresponding words.
# TF (term frequency) creates a matrix that counts how many times each word in the
# vocabulary appears in each body of text. This then gives each word a weight based
# on it's frequency.
tf = CountVectorizer(inputCol="filtered", outputCol="raw_features")
# Here we implement the IDF portion. IDF (Inverse document frequency) reduces
# the weights of commonly-appearing words.
idf = IDF(inputCol="raw_features", outputCol="features")
# LDA creates a statistical representation of how frequently words appear
# together in order to create "topics" or groups of commonly appearing words.
# In this case, we'll create 5 topics.
lda = LDA(k=5)
# We add all of the transformers into a Pipeline object. Each transformer
# will execute in the ordered provided to the "stages" parameter
pipeline = Pipeline(
stages = [
document_assembler,
tokenizer,
normalizer,
stemmer,
finisher,
stopword_remover,
tf,
idf,
lda
]
)
# We fit the data to the model.
model = pipeline.fit(df_train)
# Now that we have completed a pipeline, we want to output the topics as human-readable.
# To do this, we need to grab the vocabulary generated from our pipeline, grab the topic
# model and do the appropriate mapping. The output from each individual component lives
# in the model object. We can access them by referring to them by their position in
# the pipeline via model.stages[<ind>]
# Let's create a reference our vocabulary.
vocab = model.stages[-3].vocabulary
# Next, let's grab the topics generated by our LDA model via describeTopics(). Using collect(),
# we load the output into a Python array.
raw_topics = model.stages[-1].describeTopics(maxTermsPerTopic=5).collect()
# Lastly, let's get the indices of the vocabulary terms from our topics
topic_inds = [ind.termIndices for ind in raw_topics]
# The indices we just grab directly map to the term at position <ind> from our vocabulary.
# Using the below code, we can generate the mappings from our topic indicies to our vocabulary.
topics = []
for topic in topic_inds:
_topic = []
for ind in topic:
_topic.append(vocab[ind])
topics.append(_topic)
# Let's see our topics!
for i, topic in enumerate(topics, start=1):
print(f"topic {i}: {topic}")
| apache-2.0 |
bretlowery/snakr | lib/django/contrib/gis/gdal/feature.py | 439 | 4153 | from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException, OGRIndexError
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"""
This class that wraps an OGR Feature, needs to be instantiated
from a Layer object.
"""
def __init__(self, feat, layer):
"""
Initializes Feature from a pointer and its Layer object.
"""
if not feat:
raise GDALException('Cannot create OGR Feature, invalid pointer given.')
self.ptr = feat
self._layer = layer
def __del__(self):
"Releases a reference to this object."
if self._ptr and capi:
capi.destroy_feature(self._ptr)
def __getitem__(self, index):
"""
Gets the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, six.string_types):
i = self.index(index)
else:
if index < 0 or index > self.num_fields:
raise OGRIndexError('index out of range')
i = index
return Field(self, i)
def __iter__(self):
"Iterates over each field in the Feature."
for i in range(self.num_fields):
yield self[i]
def __len__(self):
"Returns the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name)
def __eq__(self, other):
"Does equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
# #### Feature Properties ####
@property
def encoding(self):
return self._layer._ds.encoding
@property
def fid(self):
"Returns the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Returns the name of the layer for the feature."
name = capi.get_feat_name(self._layer._ldefn)
return force_text(name, self.encoding, strings_only=True)
@property
def num_fields(self):
"Returns the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Returns a list of fields in the Feature."
return [capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i))
for i in range(self.num_fields)]
@property
def geom(self):
"Returns the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Returns the OGR Geometry Type for this Feture."
return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn))
# #### Feature Methods ####
def get(self, field):
"""
Returns the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, 'name', field)
return self[field_name].value
def index(self, field_name):
"Returns the index of the given field name."
i = capi.get_field_index(self.ptr, force_bytes(field_name))
if i < 0:
raise OGRIndexError('invalid OFT field name given: "%s"' % field_name)
return i
| bsd-3-clause |
Spiderlover/Toontown | toontown/suit/SuitBase.py | 1 | 3300 | import SuitDNA
from SuitLegList import *
import SuitTimings
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.ClockDelta import *
from pandac.PandaModules import *
from pandac.PandaModules import Point3
from toontown.battle import SuitBattleGlobals
from toontown.toonbase import TTLocalizer
TIME_BUFFER_PER_WPT = 0.25
TIME_DIVISOR = 100
DISTRIBUTE_TASK_CREATION = 0
class SuitBase:
notify = DirectNotifyGlobal.directNotify.newCategory('SuitBase')
def __init__(self):
self.dna = None
self.level = 0
self.maxHP = 10
self.currHP = 10
self.isSkelecog = 0
self.isWaiter = 0
self.isVirtual = 0
self.isRental = 0
return
def delete(self):
if hasattr(self, 'legList'):
del self.legList
def getCurrHp(self):
if hasattr(self, 'currHP') and self.currHP:
return self.currHP
else:
self.notify.error('currHP is None')
return 'unknown'
def getMaxHp(self):
if hasattr(self, 'maxHP') and self.maxHP:
return self.maxHP
else:
self.notify.error('maxHP is None')
return 'unknown'
def getStyleName(self):
if hasattr(self, 'dna') and self.dna:
return self.dna.name
else:
self.notify.error('called getStyleName() before dna was set!')
return 'unknown'
def getStyleDept(self):
if hasattr(self, 'dna') and self.dna:
return SuitDNA.getDeptFullname(self.dna.dept)
else:
self.notify.error('called getStyleDept() before dna was set!')
return 'unknown'
def getLevel(self):
return self.level
def setLevel(self, level):
self.level = level
nameWLevel = TTLocalizer.SuitBaseNameWithLevel % {'name': self.name,
'dept': self.getStyleDept(),
'level': self.getActualLevel()}
self.setDisplayName(nameWLevel)
attributes = SuitBattleGlobals.SuitAttributes[self.dna.name]
self.maxHP = attributes['hp'][self.level]
self.currHP = self.maxHP
def getSkelecog(self):
return self.isSkelecog
def setSkelecog(self, flag):
self.isSkelecog = flag
def setWaiter(self, flag):
self.isWaiter = flag
def setVirtual(self, flag):
self.isVirtual = flag
def setRental(self, flag):
self.isRental = flag
def getActualLevel(self):
if hasattr(self, 'dna'):
return SuitBattleGlobals.getActualFromRelativeLevel(self.getStyleName(), self.level) + 1
else:
self.notify.warning('called getActualLevel with no DNA, returning 1 for level')
return 1
def setPath(self, path):
self.path = path
self.pathLength = self.path.getNumPoints()
def getPath(self):
return self.path
def printPath(self):
print '%d points in path' % self.pathLength
for currPathPt in xrange(self.pathLength):
indexVal = self.path.getPointIndex(currPathPt)
print '\t', self.sp.dnaStore.getSuitPointWithIndex(indexVal)
def makeLegList(self):
self.legList = SuitLegList(self.path, self.sp.dnaStore)
| mit |
pombredanne/invenio-old | modules/websubmit/lib/functions/Move_Files_Archive.py | 4 | 2180 | ## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import os
from invenio.bibdocfile import BibRecDocs, decompose_file, normalize_format
def Move_Files_Archive(parameters, curdir, form, user_info=None):
"""DEPRECATED: Use FFT instead."""
MainDir = "%s/files/MainFiles" % curdir
IncludeDir = "%s/files/AdditionalFiles" % curdir
watcheddirs = {'Main' : MainDir, 'Additional' : IncludeDir}
for type, dir in watcheddirs.iteritems():
if os.path.exists(dir):
formats = {}
files = os.listdir(dir)
files.sort()
for file in files:
dummy, filename, extension = decompose_file(file)
if not formats.has_key(filename):
formats[filename] = []
formats[filename].append(normalize_format(extension))
# first delete all missing files
bibarchive = BibRecDocs(sysno)
existingBibdocs = bibarchive.list_bibdocs(type)
for existingBibdoc in existingBibdocs:
if not formats.has_key(existingBibdoc.get_docname()):
existingBibdoc.delete()
# then create/update the new ones
for key in formats.keys():
# instanciate bibdoc object
bibarchive.add_new_file('%s/%s%s' % (dir, key, formats[key]), doctype=type, never_fail=True)
return ""
| gpl-2.0 |
vlinhd11/vlinhd11-android-scripting | python/src/Lib/plat-mac/Carbon/Icons.py | 81 | 16284 | # Generated from 'Icons.h'
def FOUR_CHAR_CODE(x): return x
from Carbon.Files import *
kGenericDocumentIconResource = -4000
kGenericStationeryIconResource = -3985
kGenericEditionFileIconResource = -3989
kGenericApplicationIconResource = -3996
kGenericDeskAccessoryIconResource = -3991
kGenericFolderIconResource = -3999
kPrivateFolderIconResource = -3994
kFloppyIconResource = -3998
kTrashIconResource = -3993
kGenericRAMDiskIconResource = -3988
kGenericCDROMIconResource = -3987
kDesktopIconResource = -3992
kOpenFolderIconResource = -3997
kGenericHardDiskIconResource = -3995
kGenericFileServerIconResource = -3972
kGenericSuitcaseIconResource = -3970
kGenericMoverObjectIconResource = -3969
kGenericPreferencesIconResource = -3971
kGenericQueryDocumentIconResource = -16506
kGenericExtensionIconResource = -16415
kSystemFolderIconResource = -3983
kHelpIconResource = -20271
kAppleMenuFolderIconResource = -3982
genericDocumentIconResource = kGenericDocumentIconResource
genericStationeryIconResource = kGenericStationeryIconResource
genericEditionFileIconResource = kGenericEditionFileIconResource
genericApplicationIconResource = kGenericApplicationIconResource
genericDeskAccessoryIconResource = kGenericDeskAccessoryIconResource
genericFolderIconResource = kGenericFolderIconResource
privateFolderIconResource = kPrivateFolderIconResource
floppyIconResource = kFloppyIconResource
trashIconResource = kTrashIconResource
genericRAMDiskIconResource = kGenericRAMDiskIconResource
genericCDROMIconResource = kGenericCDROMIconResource
desktopIconResource = kDesktopIconResource
openFolderIconResource = kOpenFolderIconResource
genericHardDiskIconResource = kGenericHardDiskIconResource
genericFileServerIconResource = kGenericFileServerIconResource
genericSuitcaseIconResource = kGenericSuitcaseIconResource
genericMoverObjectIconResource = kGenericMoverObjectIconResource
genericPreferencesIconResource = kGenericPreferencesIconResource
genericQueryDocumentIconResource = kGenericQueryDocumentIconResource
genericExtensionIconResource = kGenericExtensionIconResource
systemFolderIconResource = kSystemFolderIconResource
appleMenuFolderIconResource = kAppleMenuFolderIconResource
kStartupFolderIconResource = -3981
kOwnedFolderIconResource = -3980
kDropFolderIconResource = -3979
kSharedFolderIconResource = -3978
kMountedFolderIconResource = -3977
kControlPanelFolderIconResource = -3976
kPrintMonitorFolderIconResource = -3975
kPreferencesFolderIconResource = -3974
kExtensionsFolderIconResource = -3973
kFontsFolderIconResource = -3968
kFullTrashIconResource = -3984
startupFolderIconResource = kStartupFolderIconResource
ownedFolderIconResource = kOwnedFolderIconResource
dropFolderIconResource = kDropFolderIconResource
sharedFolderIconResource = kSharedFolderIconResource
mountedFolderIconResource = kMountedFolderIconResource
controlPanelFolderIconResource = kControlPanelFolderIconResource
printMonitorFolderIconResource = kPrintMonitorFolderIconResource
preferencesFolderIconResource = kPreferencesFolderIconResource
extensionsFolderIconResource = kExtensionsFolderIconResource
fontsFolderIconResource = kFontsFolderIconResource
fullTrashIconResource = kFullTrashIconResource
kThumbnail32BitData = FOUR_CHAR_CODE('it32')
kThumbnail8BitMask = FOUR_CHAR_CODE('t8mk')
kHuge1BitMask = FOUR_CHAR_CODE('ich#')
kHuge4BitData = FOUR_CHAR_CODE('ich4')
kHuge8BitData = FOUR_CHAR_CODE('ich8')
kHuge32BitData = FOUR_CHAR_CODE('ih32')
kHuge8BitMask = FOUR_CHAR_CODE('h8mk')
kLarge1BitMask = FOUR_CHAR_CODE('ICN#')
kLarge4BitData = FOUR_CHAR_CODE('icl4')
kLarge8BitData = FOUR_CHAR_CODE('icl8')
kLarge32BitData = FOUR_CHAR_CODE('il32')
kLarge8BitMask = FOUR_CHAR_CODE('l8mk')
kSmall1BitMask = FOUR_CHAR_CODE('ics#')
kSmall4BitData = FOUR_CHAR_CODE('ics4')
kSmall8BitData = FOUR_CHAR_CODE('ics8')
kSmall32BitData = FOUR_CHAR_CODE('is32')
kSmall8BitMask = FOUR_CHAR_CODE('s8mk')
kMini1BitMask = FOUR_CHAR_CODE('icm#')
kMini4BitData = FOUR_CHAR_CODE('icm4')
kMini8BitData = FOUR_CHAR_CODE('icm8')
kTileIconVariant = FOUR_CHAR_CODE('tile')
kRolloverIconVariant = FOUR_CHAR_CODE('over')
kDropIconVariant = FOUR_CHAR_CODE('drop')
kOpenIconVariant = FOUR_CHAR_CODE('open')
kOpenDropIconVariant = FOUR_CHAR_CODE('odrp')
large1BitMask = kLarge1BitMask
large4BitData = kLarge4BitData
large8BitData = kLarge8BitData
small1BitMask = kSmall1BitMask
small4BitData = kSmall4BitData
small8BitData = kSmall8BitData
mini1BitMask = kMini1BitMask
mini4BitData = kMini4BitData
mini8BitData = kMini8BitData
kAlignNone = 0x00
kAlignVerticalCenter = 0x01
kAlignTop = 0x02
kAlignBottom = 0x03
kAlignHorizontalCenter = 0x04
kAlignAbsoluteCenter = kAlignVerticalCenter | kAlignHorizontalCenter
kAlignCenterTop = kAlignTop | kAlignHorizontalCenter
kAlignCenterBottom = kAlignBottom | kAlignHorizontalCenter
kAlignLeft = 0x08
kAlignCenterLeft = kAlignVerticalCenter | kAlignLeft
kAlignTopLeft = kAlignTop | kAlignLeft
kAlignBottomLeft = kAlignBottom | kAlignLeft
kAlignRight = 0x0C
kAlignCenterRight = kAlignVerticalCenter | kAlignRight
kAlignTopRight = kAlignTop | kAlignRight
kAlignBottomRight = kAlignBottom | kAlignRight
atNone = kAlignNone
atVerticalCenter = kAlignVerticalCenter
atTop = kAlignTop
atBottom = kAlignBottom
atHorizontalCenter = kAlignHorizontalCenter
atAbsoluteCenter = kAlignAbsoluteCenter
atCenterTop = kAlignCenterTop
atCenterBottom = kAlignCenterBottom
atLeft = kAlignLeft
atCenterLeft = kAlignCenterLeft
atTopLeft = kAlignTopLeft
atBottomLeft = kAlignBottomLeft
atRight = kAlignRight
atCenterRight = kAlignCenterRight
atTopRight = kAlignTopRight
atBottomRight = kAlignBottomRight
kTransformNone = 0x00
kTransformDisabled = 0x01
kTransformOffline = 0x02
kTransformOpen = 0x03
kTransformLabel1 = 0x0100
kTransformLabel2 = 0x0200
kTransformLabel3 = 0x0300
kTransformLabel4 = 0x0400
kTransformLabel5 = 0x0500
kTransformLabel6 = 0x0600
kTransformLabel7 = 0x0700
kTransformSelected = 0x4000
kTransformSelectedDisabled = kTransformSelected | kTransformDisabled
kTransformSelectedOffline = kTransformSelected | kTransformOffline
kTransformSelectedOpen = kTransformSelected | kTransformOpen
ttNone = kTransformNone
ttDisabled = kTransformDisabled
ttOffline = kTransformOffline
ttOpen = kTransformOpen
ttLabel1 = kTransformLabel1
ttLabel2 = kTransformLabel2
ttLabel3 = kTransformLabel3
ttLabel4 = kTransformLabel4
ttLabel5 = kTransformLabel5
ttLabel6 = kTransformLabel6
ttLabel7 = kTransformLabel7
ttSelected = kTransformSelected
ttSelectedDisabled = kTransformSelectedDisabled
ttSelectedOffline = kTransformSelectedOffline
ttSelectedOpen = kTransformSelectedOpen
kSelectorLarge1Bit = 0x00000001
kSelectorLarge4Bit = 0x00000002
kSelectorLarge8Bit = 0x00000004
kSelectorLarge32Bit = 0x00000008
kSelectorLarge8BitMask = 0x00000010
kSelectorSmall1Bit = 0x00000100
kSelectorSmall4Bit = 0x00000200
kSelectorSmall8Bit = 0x00000400
kSelectorSmall32Bit = 0x00000800
kSelectorSmall8BitMask = 0x00001000
kSelectorMini1Bit = 0x00010000
kSelectorMini4Bit = 0x00020000
kSelectorMini8Bit = 0x00040000
kSelectorHuge1Bit = 0x01000000
kSelectorHuge4Bit = 0x02000000
kSelectorHuge8Bit = 0x04000000
kSelectorHuge32Bit = 0x08000000
kSelectorHuge8BitMask = 0x10000000
kSelectorAllLargeData = 0x000000FF
kSelectorAllSmallData = 0x0000FF00
kSelectorAllMiniData = 0x00FF0000
# kSelectorAllHugeData = (long)0xFF000000
kSelectorAll1BitData = kSelectorLarge1Bit | kSelectorSmall1Bit | kSelectorMini1Bit | kSelectorHuge1Bit
kSelectorAll4BitData = kSelectorLarge4Bit | kSelectorSmall4Bit | kSelectorMini4Bit | kSelectorHuge4Bit
kSelectorAll8BitData = kSelectorLarge8Bit | kSelectorSmall8Bit | kSelectorMini8Bit | kSelectorHuge8Bit
kSelectorAll32BitData = kSelectorLarge32Bit | kSelectorSmall32Bit | kSelectorHuge32Bit
# kSelectorAllAvailableData = (long)0xFFFFFFFF
svLarge1Bit = kSelectorLarge1Bit
svLarge4Bit = kSelectorLarge4Bit
svLarge8Bit = kSelectorLarge8Bit
svSmall1Bit = kSelectorSmall1Bit
svSmall4Bit = kSelectorSmall4Bit
svSmall8Bit = kSelectorSmall8Bit
svMini1Bit = kSelectorMini1Bit
svMini4Bit = kSelectorMini4Bit
svMini8Bit = kSelectorMini8Bit
svAllLargeData = kSelectorAllLargeData
svAllSmallData = kSelectorAllSmallData
svAllMiniData = kSelectorAllMiniData
svAll1BitData = kSelectorAll1BitData
svAll4BitData = kSelectorAll4BitData
svAll8BitData = kSelectorAll8BitData
# svAllAvailableData = kSelectorAllAvailableData
kSystemIconsCreator = FOUR_CHAR_CODE('macs')
# err = GetIconRef(kOnSystemDisk
kClipboardIcon = FOUR_CHAR_CODE('CLIP')
kClippingUnknownTypeIcon = FOUR_CHAR_CODE('clpu')
kClippingPictureTypeIcon = FOUR_CHAR_CODE('clpp')
kClippingTextTypeIcon = FOUR_CHAR_CODE('clpt')
kClippingSoundTypeIcon = FOUR_CHAR_CODE('clps')
kDesktopIcon = FOUR_CHAR_CODE('desk')
kFinderIcon = FOUR_CHAR_CODE('FNDR')
kFontSuitcaseIcon = FOUR_CHAR_CODE('FFIL')
kFullTrashIcon = FOUR_CHAR_CODE('ftrh')
kGenericApplicationIcon = FOUR_CHAR_CODE('APPL')
kGenericCDROMIcon = FOUR_CHAR_CODE('cddr')
kGenericControlPanelIcon = FOUR_CHAR_CODE('APPC')
kGenericControlStripModuleIcon = FOUR_CHAR_CODE('sdev')
kGenericComponentIcon = FOUR_CHAR_CODE('thng')
kGenericDeskAccessoryIcon = FOUR_CHAR_CODE('APPD')
kGenericDocumentIcon = FOUR_CHAR_CODE('docu')
kGenericEditionFileIcon = FOUR_CHAR_CODE('edtf')
kGenericExtensionIcon = FOUR_CHAR_CODE('INIT')
kGenericFileServerIcon = FOUR_CHAR_CODE('srvr')
kGenericFontIcon = FOUR_CHAR_CODE('ffil')
kGenericFontScalerIcon = FOUR_CHAR_CODE('sclr')
kGenericFloppyIcon = FOUR_CHAR_CODE('flpy')
kGenericHardDiskIcon = FOUR_CHAR_CODE('hdsk')
kGenericIDiskIcon = FOUR_CHAR_CODE('idsk')
kGenericRemovableMediaIcon = FOUR_CHAR_CODE('rmov')
kGenericMoverObjectIcon = FOUR_CHAR_CODE('movr')
kGenericPCCardIcon = FOUR_CHAR_CODE('pcmc')
kGenericPreferencesIcon = FOUR_CHAR_CODE('pref')
kGenericQueryDocumentIcon = FOUR_CHAR_CODE('qery')
kGenericRAMDiskIcon = FOUR_CHAR_CODE('ramd')
kGenericSharedLibaryIcon = FOUR_CHAR_CODE('shlb')
kGenericStationeryIcon = FOUR_CHAR_CODE('sdoc')
kGenericSuitcaseIcon = FOUR_CHAR_CODE('suit')
kGenericURLIcon = FOUR_CHAR_CODE('gurl')
kGenericWORMIcon = FOUR_CHAR_CODE('worm')
kInternationalResourcesIcon = FOUR_CHAR_CODE('ifil')
kKeyboardLayoutIcon = FOUR_CHAR_CODE('kfil')
kSoundFileIcon = FOUR_CHAR_CODE('sfil')
kSystemSuitcaseIcon = FOUR_CHAR_CODE('zsys')
kTrashIcon = FOUR_CHAR_CODE('trsh')
kTrueTypeFontIcon = FOUR_CHAR_CODE('tfil')
kTrueTypeFlatFontIcon = FOUR_CHAR_CODE('sfnt')
kTrueTypeMultiFlatFontIcon = FOUR_CHAR_CODE('ttcf')
kUserIDiskIcon = FOUR_CHAR_CODE('udsk')
kInternationResourcesIcon = kInternationalResourcesIcon
kInternetLocationHTTPIcon = FOUR_CHAR_CODE('ilht')
kInternetLocationFTPIcon = FOUR_CHAR_CODE('ilft')
kInternetLocationAppleShareIcon = FOUR_CHAR_CODE('ilaf')
kInternetLocationAppleTalkZoneIcon = FOUR_CHAR_CODE('ilat')
kInternetLocationFileIcon = FOUR_CHAR_CODE('ilfi')
kInternetLocationMailIcon = FOUR_CHAR_CODE('ilma')
kInternetLocationNewsIcon = FOUR_CHAR_CODE('ilnw')
kInternetLocationNSLNeighborhoodIcon = FOUR_CHAR_CODE('ilns')
kInternetLocationGenericIcon = FOUR_CHAR_CODE('ilge')
kGenericFolderIcon = FOUR_CHAR_CODE('fldr')
kDropFolderIcon = FOUR_CHAR_CODE('dbox')
kMountedFolderIcon = FOUR_CHAR_CODE('mntd')
kOpenFolderIcon = FOUR_CHAR_CODE('ofld')
kOwnedFolderIcon = FOUR_CHAR_CODE('ownd')
kPrivateFolderIcon = FOUR_CHAR_CODE('prvf')
kSharedFolderIcon = FOUR_CHAR_CODE('shfl')
kSharingPrivsNotApplicableIcon = FOUR_CHAR_CODE('shna')
kSharingPrivsReadOnlyIcon = FOUR_CHAR_CODE('shro')
kSharingPrivsReadWriteIcon = FOUR_CHAR_CODE('shrw')
kSharingPrivsUnknownIcon = FOUR_CHAR_CODE('shuk')
kSharingPrivsWritableIcon = FOUR_CHAR_CODE('writ')
kUserFolderIcon = FOUR_CHAR_CODE('ufld')
kWorkgroupFolderIcon = FOUR_CHAR_CODE('wfld')
kGuestUserIcon = FOUR_CHAR_CODE('gusr')
kUserIcon = FOUR_CHAR_CODE('user')
kOwnerIcon = FOUR_CHAR_CODE('susr')
kGroupIcon = FOUR_CHAR_CODE('grup')
kAppearanceFolderIcon = FOUR_CHAR_CODE('appr')
kAppleExtrasFolderIcon = FOUR_CHAR_CODE('aex\xc4')
kAppleMenuFolderIcon = FOUR_CHAR_CODE('amnu')
kApplicationsFolderIcon = FOUR_CHAR_CODE('apps')
kApplicationSupportFolderIcon = FOUR_CHAR_CODE('asup')
kAssistantsFolderIcon = FOUR_CHAR_CODE('ast\xc4')
kColorSyncFolderIcon = FOUR_CHAR_CODE('prof')
kContextualMenuItemsFolderIcon = FOUR_CHAR_CODE('cmnu')
kControlPanelDisabledFolderIcon = FOUR_CHAR_CODE('ctrD')
kControlPanelFolderIcon = FOUR_CHAR_CODE('ctrl')
kControlStripModulesFolderIcon = FOUR_CHAR_CODE('sdv\xc4')
kDocumentsFolderIcon = FOUR_CHAR_CODE('docs')
kExtensionsDisabledFolderIcon = FOUR_CHAR_CODE('extD')
kExtensionsFolderIcon = FOUR_CHAR_CODE('extn')
kFavoritesFolderIcon = FOUR_CHAR_CODE('favs')
kFontsFolderIcon = FOUR_CHAR_CODE('font')
kHelpFolderIcon = FOUR_CHAR_CODE('\xc4hlp')
kInternetFolderIcon = FOUR_CHAR_CODE('int\xc4')
kInternetPlugInFolderIcon = FOUR_CHAR_CODE('\xc4net')
kInternetSearchSitesFolderIcon = FOUR_CHAR_CODE('issf')
kLocalesFolderIcon = FOUR_CHAR_CODE('\xc4loc')
kMacOSReadMeFolderIcon = FOUR_CHAR_CODE('mor\xc4')
kPublicFolderIcon = FOUR_CHAR_CODE('pubf')
kPreferencesFolderIcon = FOUR_CHAR_CODE('prf\xc4')
kPrinterDescriptionFolderIcon = FOUR_CHAR_CODE('ppdf')
kPrinterDriverFolderIcon = FOUR_CHAR_CODE('\xc4prd')
kPrintMonitorFolderIcon = FOUR_CHAR_CODE('prnt')
kRecentApplicationsFolderIcon = FOUR_CHAR_CODE('rapp')
kRecentDocumentsFolderIcon = FOUR_CHAR_CODE('rdoc')
kRecentServersFolderIcon = FOUR_CHAR_CODE('rsrv')
kScriptingAdditionsFolderIcon = FOUR_CHAR_CODE('\xc4scr')
kSharedLibrariesFolderIcon = FOUR_CHAR_CODE('\xc4lib')
kScriptsFolderIcon = FOUR_CHAR_CODE('scr\xc4')
kShutdownItemsDisabledFolderIcon = FOUR_CHAR_CODE('shdD')
kShutdownItemsFolderIcon = FOUR_CHAR_CODE('shdf')
kSpeakableItemsFolder = FOUR_CHAR_CODE('spki')
kStartupItemsDisabledFolderIcon = FOUR_CHAR_CODE('strD')
kStartupItemsFolderIcon = FOUR_CHAR_CODE('strt')
kSystemExtensionDisabledFolderIcon = FOUR_CHAR_CODE('macD')
kSystemFolderIcon = FOUR_CHAR_CODE('macs')
kTextEncodingsFolderIcon = FOUR_CHAR_CODE('\xc4tex')
kUsersFolderIcon = FOUR_CHAR_CODE('usr\xc4')
kUtilitiesFolderIcon = FOUR_CHAR_CODE('uti\xc4')
kVoicesFolderIcon = FOUR_CHAR_CODE('fvoc')
kSystemFolderXIcon = FOUR_CHAR_CODE('macx')
kAppleScriptBadgeIcon = FOUR_CHAR_CODE('scrp')
kLockedBadgeIcon = FOUR_CHAR_CODE('lbdg')
kMountedBadgeIcon = FOUR_CHAR_CODE('mbdg')
kSharedBadgeIcon = FOUR_CHAR_CODE('sbdg')
kAliasBadgeIcon = FOUR_CHAR_CODE('abdg')
kAlertCautionBadgeIcon = FOUR_CHAR_CODE('cbdg')
kAlertNoteIcon = FOUR_CHAR_CODE('note')
kAlertCautionIcon = FOUR_CHAR_CODE('caut')
kAlertStopIcon = FOUR_CHAR_CODE('stop')
kAppleTalkIcon = FOUR_CHAR_CODE('atlk')
kAppleTalkZoneIcon = FOUR_CHAR_CODE('atzn')
kAFPServerIcon = FOUR_CHAR_CODE('afps')
kFTPServerIcon = FOUR_CHAR_CODE('ftps')
kHTTPServerIcon = FOUR_CHAR_CODE('htps')
kGenericNetworkIcon = FOUR_CHAR_CODE('gnet')
kIPFileServerIcon = FOUR_CHAR_CODE('isrv')
kToolbarCustomizeIcon = FOUR_CHAR_CODE('tcus')
kToolbarDeleteIcon = FOUR_CHAR_CODE('tdel')
kToolbarFavoritesIcon = FOUR_CHAR_CODE('tfav')
kToolbarHomeIcon = FOUR_CHAR_CODE('thom')
kAppleLogoIcon = FOUR_CHAR_CODE('capl')
kAppleMenuIcon = FOUR_CHAR_CODE('sapl')
kBackwardArrowIcon = FOUR_CHAR_CODE('baro')
kFavoriteItemsIcon = FOUR_CHAR_CODE('favr')
kForwardArrowIcon = FOUR_CHAR_CODE('faro')
kGridIcon = FOUR_CHAR_CODE('grid')
kHelpIcon = FOUR_CHAR_CODE('help')
kKeepArrangedIcon = FOUR_CHAR_CODE('arng')
kLockedIcon = FOUR_CHAR_CODE('lock')
kNoFilesIcon = FOUR_CHAR_CODE('nfil')
kNoFolderIcon = FOUR_CHAR_CODE('nfld')
kNoWriteIcon = FOUR_CHAR_CODE('nwrt')
kProtectedApplicationFolderIcon = FOUR_CHAR_CODE('papp')
kProtectedSystemFolderIcon = FOUR_CHAR_CODE('psys')
kRecentItemsIcon = FOUR_CHAR_CODE('rcnt')
kShortcutIcon = FOUR_CHAR_CODE('shrt')
kSortAscendingIcon = FOUR_CHAR_CODE('asnd')
kSortDescendingIcon = FOUR_CHAR_CODE('dsnd')
kUnlockedIcon = FOUR_CHAR_CODE('ulck')
kConnectToIcon = FOUR_CHAR_CODE('cnct')
kGenericWindowIcon = FOUR_CHAR_CODE('gwin')
kQuestionMarkIcon = FOUR_CHAR_CODE('ques')
kDeleteAliasIcon = FOUR_CHAR_CODE('dali')
kEjectMediaIcon = FOUR_CHAR_CODE('ejec')
kBurningIcon = FOUR_CHAR_CODE('burn')
kRightContainerArrowIcon = FOUR_CHAR_CODE('rcar')
kIconServicesNormalUsageFlag = 0
kIconServicesCatalogInfoMask = (kFSCatInfoNodeID | kFSCatInfoParentDirID | kFSCatInfoVolume | kFSCatInfoNodeFlags | kFSCatInfoFinderInfo | kFSCatInfoFinderXInfo | kFSCatInfoUserAccess)
kPlotIconRefNormalFlags = 0L
kPlotIconRefNoImage = (1 << 1)
kPlotIconRefNoMask = (1 << 2)
kIconFamilyType = FOUR_CHAR_CODE('icns')
| apache-2.0 |
ray-project/ray | rllib/utils/annotations.py | 2 | 1536 | def override(cls):
"""Annotation for documenting method overrides.
Args:
cls (type): The superclass that provides the overridden method. If this
cls does not actually have the method, an error is raised.
"""
def check_override(method):
if method.__name__ not in dir(cls):
raise NameError("{} does not override any method of {}".format(
method, cls))
return method
return check_override
def PublicAPI(obj):
"""Annotation for documenting public APIs.
Public APIs are classes and methods exposed to end users of RLlib. You
can expect these APIs to remain stable across RLlib releases.
Subclasses that inherit from a ``@PublicAPI`` base class can be
assumed part of the RLlib public API as well (e.g., all trainer classes
are in public API because Trainer is ``@PublicAPI``).
In addition, you can assume all trainer configurations are part of their
public API as well.
"""
return obj
def DeveloperAPI(obj):
"""Annotation for documenting developer APIs.
Developer APIs are classes and methods explicitly exposed to developers
for the purposes of building custom algorithms or advanced training
strategies on top of RLlib internals. You can generally expect these APIs
to be stable sans minor changes (but less stable than public APIs).
Subclasses that inherit from a ``@DeveloperAPI`` base class can be
assumed part of the RLlib developer API as well.
"""
return obj
| apache-2.0 |
akash1808/glance | glance/tests/unit/common/test_signature_utils.py | 2 | 15922 | # Copyright (c) The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import mock
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
from glance.common import exception
from glance.common import signature_utils
from glance.tests import utils as test_utils
TEST_PRIVATE_KEY = rsa.generate_private_key(public_exponent=3,
key_size=1024,
backend=default_backend())
# Required image property names
(SIGNATURE, HASH_METHOD, KEY_TYPE, CERT_UUID) = (
signature_utils.SIGNATURE,
signature_utils.HASH_METHOD,
signature_utils.KEY_TYPE,
signature_utils.CERT_UUID
)
# Optional image property names for RSA-PSS
(MASK_GEN_ALG, PSS_SALT_LENGTH) = (
signature_utils.MASK_GEN_ALG,
signature_utils.PSS_SALT_LENGTH
)
class FakeKeyManager(object):
def __init__(self):
self.certs = {'invalid_format_cert':
FakeCastellanCertificate('A' * 256, 'BLAH'),
'valid_format_cert':
FakeCastellanCertificate('A' * 256, 'X.509')}
def get(self, context, cert_uuid):
cert = self.certs.get(cert_uuid)
if cert is None:
raise Exception("No matching certificate found.")
return cert
class FakeCastellanCertificate(object):
def __init__(self, data, cert_format):
self.data = data
self.cert_format = cert_format
@property
def format(self):
return self.cert_format
def get_encoded(self):
return self.data
class FakeCryptoCertificate(object):
def __init__(self, pub_key):
self.pub_key = pub_key
def public_key(self):
return self.pub_key
class BadPublicKey(object):
def verifier(self, signature, padding, hash_method):
return None
class TestSignatureUtils(test_utils.BaseTestCase):
"""Test methods of signature_utils"""
def test_should_verify_signature(self):
image_props = {CERT_UUID: 'CERT_UUID',
HASH_METHOD: 'HASH_METHOD',
SIGNATURE: 'SIGNATURE',
KEY_TYPE: 'SIG_KEY_TYPE'}
self.assertTrue(signature_utils.should_verify_signature(image_props))
def test_should_verify_signature_fail(self):
bad_image_properties = [{CERT_UUID: 'CERT_UUID',
HASH_METHOD: 'HASH_METHOD',
SIGNATURE: 'SIGNATURE'},
{CERT_UUID: 'CERT_UUID',
HASH_METHOD: 'HASH_METHOD',
KEY_TYPE: 'SIG_KEY_TYPE'},
{CERT_UUID: 'CERT_UUID',
SIGNATURE: 'SIGNATURE',
KEY_TYPE: 'SIG_KEY_TYPE'},
{HASH_METHOD: 'HASH_METHOD',
SIGNATURE: 'SIGNATURE',
KEY_TYPE: 'SIG_KEY_TYPE'}]
for bad_props in bad_image_properties:
result = signature_utils.should_verify_signature(bad_props)
self.assertFalse(result)
@mock.patch('glance.common.signature_utils.get_public_key')
def test_verify_signature_PSS(self, mock_get_pub_key):
checksum_hash = '224626ae19824466f2a7f39ab7b80f7f'
mock_get_pub_key.return_value = TEST_PRIVATE_KEY.public_key()
for hash_name, hash_alg in signature_utils.HASH_METHODS.iteritems():
signer = TEST_PRIVATE_KEY.signer(
padding.PSS(
mgf=padding.MGF1(hash_alg),
salt_length=padding.PSS.MAX_LENGTH
),
hash_alg
)
signer.update(checksum_hash)
signature = base64.b64encode(signer.finalize())
image_props = {CERT_UUID:
'fea14bc2-d75f-4ba5-bccc-b5c924ad0693',
HASH_METHOD: hash_name,
KEY_TYPE: 'RSA-PSS',
MASK_GEN_ALG: 'MGF1',
SIGNATURE: signature}
self.assertTrue(signature_utils.verify_signature(None,
checksum_hash,
image_props))
@mock.patch('glance.common.signature_utils.get_public_key')
def test_verify_signature_custom_PSS_salt(self, mock_get_pub_key):
checksum_hash = '224626ae19824466f2a7f39ab7b80f7f'
mock_get_pub_key.return_value = TEST_PRIVATE_KEY.public_key()
custom_salt_length = 32
for hash_name, hash_alg in signature_utils.HASH_METHODS.iteritems():
signer = TEST_PRIVATE_KEY.signer(
padding.PSS(
mgf=padding.MGF1(hash_alg),
salt_length=custom_salt_length
),
hash_alg
)
signer.update(checksum_hash)
signature = base64.b64encode(signer.finalize())
image_props = {CERT_UUID:
'fea14bc2-d75f-4ba5-bccc-b5c924ad0693',
HASH_METHOD: hash_name,
KEY_TYPE: 'RSA-PSS',
MASK_GEN_ALG: 'MGF1',
PSS_SALT_LENGTH: custom_salt_length,
SIGNATURE: signature}
self.assertTrue(signature_utils.verify_signature(None,
checksum_hash,
image_props))
@mock.patch('glance.common.signature_utils.get_public_key')
def test_verify_signature_bad_signature(self, mock_get_pub_key):
checksum_hash = '224626ae19824466f2a7f39ab7b80f7f'
mock_get_pub_key.return_value = TEST_PRIVATE_KEY.public_key()
image_properties = {CERT_UUID:
'fea14bc2-d75f-4ba5-bccc-b5c924ad0693',
HASH_METHOD: 'SHA-256',
KEY_TYPE: 'RSA-PSS',
MASK_GEN_ALG: 'MGF1',
SIGNATURE: 'BLAH'}
self.assertRaisesRegexp(exception.SignatureVerificationError,
'Signature verification failed.',
signature_utils.verify_signature,
None, checksum_hash, image_properties)
@mock.patch('glance.common.signature_utils.should_verify_signature')
def test_verify_signature_invalid_image_props(self, mock_should):
mock_should.return_value = False
self.assertRaisesRegexp(exception.SignatureVerificationError,
'Required image properties for signature'
' verification do not exist. Cannot verify'
' signature.',
signature_utils.verify_signature,
None, None, None)
@mock.patch('glance.common.signature_utils.get_public_key')
def test_verify_signature_bad_sig_key_type(self, mock_get_pub_key):
checksum_hash = '224626ae19824466f2a7f39ab7b80f7f'
mock_get_pub_key.return_value = TEST_PRIVATE_KEY.public_key()
image_properties = {CERT_UUID:
'fea14bc2-d75f-4ba5-bccc-b5c924ad0693',
HASH_METHOD: 'SHA-256',
KEY_TYPE: 'BLAH',
MASK_GEN_ALG: 'MGF1',
SIGNATURE: 'BLAH'}
self.assertRaisesRegexp(exception.SignatureVerificationError,
'Invalid signature key type: .*',
signature_utils.verify_signature,
None, checksum_hash, image_properties)
@mock.patch('glance.common.signature_utils.get_public_key')
def test_verify_signature_RSA_no_mask_gen(self, mock_get_pub_key):
checksum_hash = '224626ae19824466f2a7f39ab7b80f7f'
mock_get_pub_key.return_value = TEST_PRIVATE_KEY.public_key()
image_properties = {CERT_UUID:
'fea14bc2-d75f-4ba5-bccc-b5c924ad0693',
HASH_METHOD: 'SHA-256',
KEY_TYPE: 'RSA-PSS',
SIGNATURE: 'BLAH'}
self.assertRaisesRegexp(exception.SignatureVerificationError,
'Signature verification failed.',
signature_utils.verify_signature,
None, checksum_hash, image_properties)
@mock.patch('glance.common.signature_utils.get_public_key')
def test_verify_signature_RSA_bad_mask_gen(self, mock_get_pub_key):
checksum_hash = '224626ae19824466f2a7f39ab7b80f7f'
mock_get_pub_key.return_value = TEST_PRIVATE_KEY.public_key()
image_properties = {CERT_UUID:
'fea14bc2-d75f-4ba5-bccc-b5c924ad0693',
HASH_METHOD: 'SHA-256',
KEY_TYPE: 'RSA-PSS',
MASK_GEN_ALG: 'BLAH',
SIGNATURE: 'BLAH'}
self.assertRaisesRegexp(exception.SignatureVerificationError,
'Invalid mask_gen_algorithm: .*',
signature_utils.verify_signature,
None, checksum_hash, image_properties)
@mock.patch('glance.common.signature_utils.get_public_key')
def test_verify_signature_bad_pss_salt(self, mock_get_pub_key):
checksum_hash = '224626ae19824466f2a7f39ab7b80f7f'
mock_get_pub_key.return_value = TEST_PRIVATE_KEY.public_key()
image_properties = {CERT_UUID:
'fea14bc2-d75f-4ba5-bccc-b5c924ad0693',
HASH_METHOD: 'SHA-256',
KEY_TYPE: 'RSA-PSS',
MASK_GEN_ALG: 'MGF1',
PSS_SALT_LENGTH: 'BLAH',
SIGNATURE: 'BLAH'}
self.assertRaisesRegexp(exception.SignatureVerificationError,
'Invalid pss_salt_length: .*',
signature_utils.verify_signature,
None, checksum_hash, image_properties)
@mock.patch('glance.common.signature_utils.get_public_key')
def test_verify_signature_verifier_none(self, mock_get_pub_key):
checksum_hash = '224626ae19824466f2a7f39ab7b80f7f'
mock_get_pub_key.return_value = BadPublicKey()
image_properties = {CERT_UUID:
'fea14bc2-d75f-4ba5-bccc-b5c924ad0693',
HASH_METHOD: 'SHA-256',
KEY_TYPE: 'RSA-PSS',
MASK_GEN_ALG: 'MGF1',
SIGNATURE: 'BLAH'}
self.assertRaisesRegexp(exception.SignatureVerificationError,
'Error occurred while verifying'
' the signature',
signature_utils.verify_signature,
None, checksum_hash, image_properties)
def test_get_signature(self):
signature = 'A' * 256
data = base64.b64encode(signature)
self.assertEqual(signature,
signature_utils.get_signature(data))
def test_get_signature_fail(self):
self.assertRaisesRegexp(exception.SignatureVerificationError,
'The signature data was not properly'
' encoded using base64',
signature_utils.get_signature, '///')
def test_get_hash_method(self):
hash_dict = signature_utils.HASH_METHODS
for hash_name in hash_dict.keys():
hash_class = signature_utils.get_hash_method(hash_name).__class__
self.assertIsInstance(hash_dict[hash_name], hash_class)
def test_get_hash_method_fail(self):
self.assertRaisesRegexp(exception.SignatureVerificationError,
'Invalid signature hash method: .*',
signature_utils.get_hash_method, 'SHA-2')
def test_get_signature_key_type(self):
for sig_format in signature_utils.SIGNATURE_KEY_TYPES:
result = signature_utils.get_signature_key_type(sig_format)
self.assertEqual(sig_format, result)
def test_get_signature_key_type_fail(self):
self.assertRaisesRegexp(exception.SignatureVerificationError,
'Invalid signature key type: .*',
signature_utils.get_signature_key_type,
'RSB-PSS')
@mock.patch('glance.common.signature_utils.get_certificate')
def test_get_public_key(self, mock_get_cert):
fake_cert = FakeCryptoCertificate(TEST_PRIVATE_KEY.public_key())
mock_get_cert.return_value = fake_cert
result_pub_key = signature_utils.get_public_key(None, None, 'RSA-PSS')
self.assertEqual(fake_cert.public_key(), result_pub_key)
@mock.patch('glance.common.signature_utils.get_certificate')
def test_get_public_key_invalid_key(self, mock_get_certificate):
bad_pub_key = 'A' * 256
mock_get_certificate.return_value = FakeCryptoCertificate(bad_pub_key)
self.assertRaisesRegexp(exception.SignatureVerificationError,
'Invalid public key type for '
'signature key type: .*',
signature_utils.get_public_key, None,
None, 'RSA-PSS')
@mock.patch('cryptography.x509.load_der_x509_certificate')
@mock.patch('castellan.key_manager.API', return_value=FakeKeyManager())
def test_get_certificate(self, mock_key_manager_API, mock_load_cert):
cert_uuid = 'valid_format_cert'
x509_cert = FakeCryptoCertificate(TEST_PRIVATE_KEY.public_key())
mock_load_cert.return_value = x509_cert
self.assertEqual(x509_cert,
signature_utils.get_certificate(None, cert_uuid))
@mock.patch('castellan.key_manager.API', return_value=FakeKeyManager())
def test_get_certificate_key_manager_fail(self, mock_key_manager_API):
bad_cert_uuid = 'fea14bc2-d75f-4ba5-bccc-b5c924ad0695'
self.assertRaisesRegexp(exception.SignatureVerificationError,
'Unable to retrieve certificate with ID: .*',
signature_utils.get_certificate, None,
bad_cert_uuid)
@mock.patch('castellan.key_manager.API', return_value=FakeKeyManager())
def test_get_certificate_invalid_format(self, mock_API):
cert_uuid = 'invalid_format_cert'
self.assertRaisesRegexp(exception.SignatureVerificationError,
'Invalid certificate format: .*',
signature_utils.get_certificate, None,
cert_uuid)
| apache-2.0 |
DreamerKing/LightweightHtmlWidgets | publish-rc/v1.1/files/Ipy.Lib/bdb.py | 108 | 21084 | """Debugger basics"""
import fnmatch
import sys
import os
import types
__all__ = ["BdbQuit","Bdb","Breakpoint"]
class BdbQuit(Exception):
"""Exception to give up completely"""
class Bdb:
"""Generic Python debugger base class.
This class takes care of details of the trace facility;
a derived class should implement user interaction.
The standard debugger class (pdb.Pdb) is an example.
"""
def __init__(self, skip=None):
self.skip = set(skip) if skip else None
self.breaks = {}
self.fncache = {}
def canonic(self, filename):
if filename == "<" + filename[1:-1] + ">":
return filename
canonic = self.fncache.get(filename)
if not canonic:
canonic = os.path.abspath(filename)
canonic = os.path.normcase(canonic)
self.fncache[filename] = canonic
return canonic
def reset(self):
import linecache
linecache.checkcache()
self.botframe = None
self._set_stopinfo(None, None)
def trace_dispatch(self, frame, event, arg):
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
if event == 'c_call':
return self.trace_dispatch
if event == 'c_exception':
return self.trace_dispatch
if event == 'c_return':
return self.trace_dispatch
print 'bdb.Bdb.dispatch: unknown debugging event:', repr(event)
return self.trace_dispatch
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_call(self, frame, arg):
# XXX 'arg' is no longer used
if self.botframe is None:
# First call of dispatch since reset()
self.botframe = frame.f_back # (CT) Note that this may also be None!
return self.trace_dispatch
if not (self.stop_here(frame) or self.break_anywhere(frame)):
# No need to trace this function
return # None
self.user_call(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
self.user_return(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_exception(self, frame, arg):
if self.stop_here(frame):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
# Normally derived classes don't override the following
# methods, but they may if they want to redefine the
# definition of stopping and breakpoints.
def is_skipped_module(self, module_name):
for pattern in self.skip:
if fnmatch.fnmatch(module_name, pattern):
return True
return False
def stop_here(self, frame):
# (CT) stopframe may now also be None, see dispatch_call.
# (CT) the former test for None is therefore removed from here.
if self.skip and \
self.is_skipped_module(frame.f_globals.get('__name__')):
return False
if frame is self.stopframe:
if self.stoplineno == -1:
return False
return frame.f_lineno >= self.stoplineno
while frame is not None and frame is not self.stopframe:
if frame is self.botframe:
return True
frame = frame.f_back
return False
def break_here(self, frame):
filename = self.canonic(frame.f_code.co_filename)
if not filename in self.breaks:
return False
lineno = frame.f_lineno
if not lineno in self.breaks[filename]:
# The line itself has no breakpoint, but maybe the line is the
# first line of a function with breakpoint set by function name.
lineno = frame.f_code.co_firstlineno
if not lineno in self.breaks[filename]:
return False
# flag says ok to delete temp. bp
(bp, flag) = effective(filename, lineno, frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.do_clear(str(bp.number))
return True
else:
return False
def do_clear(self, arg):
raise NotImplementedError, "subclass of bdb must implement do_clear()"
def break_anywhere(self, frame):
return self.canonic(frame.f_code.co_filename) in self.breaks
# Derived classes should override the user_* methods
# to gain control.
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
pass
def user_line(self, frame):
"""This method is called when we stop or break at this line."""
pass
def user_return(self, frame, return_value):
"""This method is called when a return trap is set here."""
pass
def user_exception(self, frame, exc_info):
exc_type, exc_value, exc_traceback = exc_info
"""This method is called if an exception occurs,
but only if we are to stop at or just below this level."""
pass
def _set_stopinfo(self, stopframe, returnframe, stoplineno=0):
self.stopframe = stopframe
self.returnframe = returnframe
self.quitting = 0
# stoplineno >= 0 means: stop at line >= the stoplineno
# stoplineno -1 means: don't stop at all
self.stoplineno = stoplineno
# Derived classes and clients can call the following methods
# to affect the stepping state.
def set_until(self, frame): #the name "until" is borrowed from gdb
"""Stop when the line with the line no greater than the current one is
reached or when returning from current frame"""
self._set_stopinfo(frame, frame, frame.f_lineno+1)
def set_step(self):
"""Stop after one line of code."""
self._set_stopinfo(None, None)
def set_next(self, frame):
"""Stop on the next line in or below the given frame."""
self._set_stopinfo(frame, None)
def set_return(self, frame):
"""Stop when returning from the given frame."""
self._set_stopinfo(frame.f_back, frame)
def set_trace(self, frame=None):
"""Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
if frame is None:
frame = sys._getframe().f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
def set_continue(self):
# Don't stop except at breakpoints or when finished
self._set_stopinfo(self.botframe, None, -1)
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
frame = sys._getframe().f_back
while frame and frame is not self.botframe:
del frame.f_trace
frame = frame.f_back
def set_quit(self):
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 1
sys.settrace(None)
# Derived classes and clients can call the following methods
# to manipulate breakpoints. These methods return an
# error message is something went wrong, None if all is well.
# Set_break prints out the breakpoint line and file:lineno.
# Call self.get_*break*() to see the breakpoints or better
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
def set_break(self, filename, lineno, temporary=0, cond = None,
funcname=None):
filename = self.canonic(filename)
import linecache # Import as late as possible
line = linecache.getline(filename, lineno)
if not line:
return 'Line %s:%d does not exist' % (filename,
lineno)
if not filename in self.breaks:
self.breaks[filename] = []
list = self.breaks[filename]
if not lineno in list:
list.append(lineno)
bp = Breakpoint(filename, lineno, temporary, cond, funcname)
def _prune_breaks(self, filename, lineno):
if (filename, lineno) not in Breakpoint.bplist:
self.breaks[filename].remove(lineno)
if not self.breaks[filename]:
del self.breaks[filename]
def clear_break(self, filename, lineno):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
if lineno not in self.breaks[filename]:
return 'There is no breakpoint at %s:%d' % (filename,
lineno)
# If there's only one bp in the list for that file,line
# pair, then remove the breaks entry
for bp in Breakpoint.bplist[filename, lineno][:]:
bp.deleteMe()
self._prune_breaks(filename, lineno)
def clear_bpbynumber(self, arg):
try:
number = int(arg)
except:
return 'Non-numeric breakpoint number (%s)' % arg
try:
bp = Breakpoint.bpbynumber[number]
except IndexError:
return 'Breakpoint number (%d) out of range' % number
if not bp:
return 'Breakpoint (%d) already deleted' % number
bp.deleteMe()
self._prune_breaks(bp.file, bp.line)
def clear_all_file_breaks(self, filename):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
for line in self.breaks[filename]:
blist = Breakpoint.bplist[filename, line]
for bp in blist:
bp.deleteMe()
del self.breaks[filename]
def clear_all_breaks(self):
if not self.breaks:
return 'There are no breakpoints'
for bp in Breakpoint.bpbynumber:
if bp:
bp.deleteMe()
self.breaks = {}
def get_break(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename]
def get_breaks(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename] and \
Breakpoint.bplist[filename, lineno] or []
def get_file_breaks(self, filename):
filename = self.canonic(filename)
if filename in self.breaks:
return self.breaks[filename]
else:
return []
def get_all_breaks(self):
return self.breaks
# Derived classes and clients can call the following method
# to get a data structure representing a stack trace.
def get_stack(self, f, t):
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
if f is self.botframe:
break
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
if f is None:
i = max(0, len(stack) - 1)
return stack, i
#
def format_stack_entry(self, frame_lineno, lprefix=': '):
import linecache, repr
frame, lineno = frame_lineno
filename = self.canonic(frame.f_code.co_filename)
s = '%s(%r)' % (filename, lineno)
if frame.f_code.co_name:
s = s + frame.f_code.co_name
else:
s = s + "<lambda>"
if '__args__' in frame.f_locals:
args = frame.f_locals['__args__']
else:
args = None
if args:
s = s + repr.repr(args)
else:
s = s + '()'
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
s = s + '->'
s = s + repr.repr(rv)
line = linecache.getline(filename, lineno, frame.f_globals)
if line: s = s + lprefix + line.strip()
return s
# The following two methods can be called by clients to use
# a debugger to debug a statement, given as a string.
def run(self, cmd, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(cmd, types.CodeType):
cmd = cmd+'\n'
try:
exec cmd in globals, locals
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runeval(self, expr, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(expr, types.CodeType):
expr = expr+'\n'
try:
return eval(expr, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runctx(self, cmd, globals, locals):
# B/W compatibility
self.run(cmd, globals, locals)
# This method is more useful to debug a single function call.
def runcall(self, func, *args, **kwds):
self.reset()
sys.settrace(self.trace_dispatch)
res = None
try:
res = func(*args, **kwds)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
return res
def set_trace():
Bdb().set_trace()
class Breakpoint:
"""Breakpoint class
Implements temporary breakpoints, ignore counts, disabling and
(re)-enabling, and conditionals.
Breakpoints are indexed by number through bpbynumber and by
the file,line tuple using bplist. The former points to a
single instance of class Breakpoint. The latter points to a
list of such instances since there may be more than one
breakpoint per line.
"""
# XXX Keeping state in the class is a mistake -- this means
# you cannot have more than one active Bdb instance.
next = 1 # Next bp to be assigned
bplist = {} # indexed by (file, lineno) tuple
bpbynumber = [None] # Each entry is None or an instance of Bpt
# index 0 is unused, except for marking an
# effective break .... see effective()
def __init__(self, file, line, temporary=0, cond=None, funcname=None):
self.funcname = funcname
# Needed if funcname is not None.
self.func_first_executable_line = None
self.file = file # This better be in canonical form!
self.line = line
self.temporary = temporary
self.cond = cond
self.enabled = 1
self.ignore = 0
self.hits = 0
self.number = Breakpoint.next
Breakpoint.next = Breakpoint.next + 1
# Build the two lists
self.bpbynumber.append(self)
if (file, line) in self.bplist:
self.bplist[file, line].append(self)
else:
self.bplist[file, line] = [self]
def deleteMe(self):
index = (self.file, self.line)
self.bpbynumber[self.number] = None # No longer in list
self.bplist[index].remove(self)
if not self.bplist[index]:
# No more bp for this f:l combo
del self.bplist[index]
def enable(self):
self.enabled = 1
def disable(self):
self.enabled = 0
def bpprint(self, out=None):
if out is None:
out = sys.stdout
if self.temporary:
disp = 'del '
else:
disp = 'keep '
if self.enabled:
disp = disp + 'yes '
else:
disp = disp + 'no '
print >>out, '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
self.file, self.line)
if self.cond:
print >>out, '\tstop only if %s' % (self.cond,)
if self.ignore:
print >>out, '\tignore next %d hits' % (self.ignore)
if (self.hits):
if (self.hits > 1): ss = 's'
else: ss = ''
print >>out, ('\tbreakpoint already hit %d time%s' %
(self.hits, ss))
# -----------end of Breakpoint class----------
def checkfuncname(b, frame):
"""Check whether we should break here because of `b.funcname`."""
if not b.funcname:
# Breakpoint was set via line number.
if b.line != frame.f_lineno:
# Breakpoint was set at a line with a def statement and the function
# defined is called: don't break.
return False
return True
# Breakpoint set via function name.
if frame.f_code.co_name != b.funcname:
# It's not a function call, but rather execution of def statement.
return False
# We are in the right frame.
if not b.func_first_executable_line:
# The function is entered for the 1st time.
b.func_first_executable_line = frame.f_lineno
if b.func_first_executable_line != frame.f_lineno:
# But we are not at the first line number: don't break.
return False
return True
# Determines if there is an effective (active) breakpoint at this
# line of code. Returns breakpoint number or 0 if none
def effective(file, line, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary bp.
"""
possibles = Breakpoint.bplist[file,line]
for i in range(0, len(possibles)):
b = possibles[i]
if b.enabled == 0:
continue
if not checkfuncname(b, frame):
continue
# Count every hit when bp is enabled
b.hits = b.hits + 1
if not b.cond:
# If unconditional, and ignoring,
# go on to next, else break
if b.ignore > 0:
b.ignore = b.ignore -1
continue
else:
# breakpoint and marker that's ok
# to delete if temporary
return (b,1)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals,
frame.f_locals)
if val:
if b.ignore > 0:
b.ignore = b.ignore -1
# continue
else:
return (b,1)
# else:
# continue
except:
# if eval fails, most conservative
# thing is to stop on breakpoint
# regardless of ignore count.
# Don't delete temporary,
# as another hint to user.
return (b,0)
return (None, None)
# -------------------- testing --------------------
class Tdb(Bdb):
def user_call(self, frame, args):
name = frame.f_code.co_name
if not name: name = '???'
print '+++ call', name, args
def user_line(self, frame):
import linecache
name = frame.f_code.co_name
if not name: name = '???'
fn = self.canonic(frame.f_code.co_filename)
line = linecache.getline(fn, frame.f_lineno, frame.f_globals)
print '+++', fn, frame.f_lineno, name, ':', line.strip()
def user_return(self, frame, retval):
print '+++ return', retval
def user_exception(self, frame, exc_stuff):
print '+++ exception', exc_stuff
self.set_continue()
def foo(n):
print 'foo(', n, ')'
x = bar(n*10)
print 'bar returned', x
def bar(a):
print 'bar(', a, ')'
return a/2
def test():
t = Tdb()
t.run('import bdb; bdb.foo(10)')
# end
| gpl-3.0 |
CouchPotato/CouchPotatoServer | couchpotato/core/downloaders/blackhole.py | 16 | 7939 | from __future__ import with_statement
import os
import traceback
from couchpotato.core._base.downloader.main import DownloaderBase
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import getDownloadDir
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
log = CPLog(__name__)
autoload = 'Blackhole'
class Blackhole(DownloaderBase):
protocol = ['nzb', 'torrent', 'torrent_magnet']
status_support = False
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
directory = self.conf('directory')
# The folder needs to exist
if not directory or not os.path.isdir(directory):
log.error('No directory set for blackhole %s download.', data.get('protocol'))
else:
try:
# Filedata can be empty, which probably means it a magnet link
if not filedata or len(filedata) < 50:
try:
if data.get('protocol') == 'torrent_magnet':
filedata = self.magnetToTorrent(data.get('url'))
data['protocol'] = 'torrent'
except:
log.error('Failed download torrent via magnet url: %s', traceback.format_exc())
# If it's still empty, either write the magnet link to a .magnet file, or error out.
if not filedata or len(filedata) < 50:
if self.conf('magnet_file'):
filedata = data.get('url') + '\n'
data['protocol'] = 'magnet'
else:
log.error('No nzb/torrent available: %s', data.get('url'))
return False
# Create filename with imdb id and other nice stuff
file_name = self.createFileName(data, filedata, media)
full_path = os.path.join(directory, file_name)
# People want thinks nice and tidy, create a subdir
if self.conf('create_subdir'):
try:
new_path = os.path.splitext(full_path)[0]
if not os.path.exists(new_path):
os.makedirs(new_path)
full_path = os.path.join(new_path, file_name)
except:
log.error('Couldnt create sub dir, reverting to old one: %s', full_path)
try:
# Make sure the file doesn't exist yet, no need in overwriting it
if not os.path.isfile(full_path):
log.info('Downloading %s to %s.', (data.get('protocol'), full_path))
with open(full_path, 'wb') as f:
f.write(filedata)
os.chmod(full_path, Env.getPermission('file'))
return self.downloadReturnId('')
else:
log.info('File %s already exists.', full_path)
return self.downloadReturnId('')
except:
log.error('Failed to download to blackhole %s', traceback.format_exc())
pass
except:
log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc()))
return False
return False
def test(self):
""" Test and see if the directory is writable
:return: boolean
"""
directory = self.conf('directory')
if directory and os.path.isdir(directory):
test_file = sp(os.path.join(directory, 'couchpotato_test.txt'))
# Check if folder is writable
self.createFile(test_file, 'This is a test file')
if os.path.isfile(test_file):
os.remove(test_file)
return True
return False
def getEnabledProtocol(self):
""" What protocols is this downloaded used for
:return: list with protocols
"""
if self.conf('use_for') == 'both':
return super(Blackhole, self).getEnabledProtocol()
elif self.conf('use_for') == 'torrent':
return ['torrent', 'torrent_magnet']
else:
return ['nzb']
def isEnabled(self, manual = False, data = None):
""" Check if protocol is used (and enabled)
:param manual: The user has clicked to download a link through the webUI
:param data: dict returned from provider
Contains the release information
:return: boolean
"""
if not data: data = {}
for_protocol = ['both']
if data and 'torrent' in data.get('protocol'):
for_protocol.append('torrent')
elif data:
for_protocol.append(data.get('protocol'))
return super(Blackhole, self).isEnabled(manual, data) and \
((self.conf('use_for') in for_protocol))
config = [{
'name': 'blackhole',
'order': 30,
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'blackhole',
'label': 'Black hole',
'description': 'Download the NZB/Torrent to a specific folder. <em>Note: Seeding and copying/linking features do <strong>not</strong> work with Black hole</em>.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': True,
'type': 'enabler',
'radio_group': 'nzb,torrent',
},
{
'name': 'directory',
'type': 'directory',
'description': 'Directory where the .nzb (or .torrent) file is saved to.',
'default': getDownloadDir()
},
{
'name': 'use_for',
'label': 'Use for',
'default': 'both',
'type': 'dropdown',
'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')],
},
{
'name': 'create_subdir',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Create a sub directory when saving the .nzb (or .torrent).',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'magnet_file',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'If magnet file conversion fails, write down the magnet link in a .magnet file instead.',
},
],
}
],
}]
| gpl-3.0 |
raumfeld/linux-am33xx | tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py | 91 | 5020 | '''
run the command under test, under valgrind and collect memory leak info
as a separate test.
'''
import os
import re
import signal
from string import Template
import subprocess
import time
from TdcPlugin import TdcPlugin
from tdc_config import *
def vp_extract_num_from_string(num_as_string_maybe_with_commas):
return int(num_as_string_maybe_with_commas.replace(',',''))
class SubPlugin(TdcPlugin):
def __init__(self):
self.sub_class = 'valgrind/SubPlugin'
self.tap = ''
super().__init__()
def pre_suite(self, testcount, testidlist):
'''run commands before test_runner goes into a test loop'''
super().pre_suite(testcount, testidlist)
if self.args.verbose > 1:
print('{}.pre_suite'.format(self.sub_class))
if self.args.valgrind:
self._add_to_tap('1..{}\n'.format(self.testcount))
def post_suite(self, index):
'''run commands after test_runner goes into a test loop'''
super().post_suite(index)
self._add_to_tap('\n|---\n')
if self.args.verbose > 1:
print('{}.post_suite'.format(self.sub_class))
print('{}'.format(self.tap))
if self.args.verbose < 4:
subprocess.check_output('rm -f vgnd-*.log', shell=True)
def add_args(self, parser):
super().add_args(parser)
self.argparser_group = self.argparser.add_argument_group(
'valgrind',
'options for valgrindPlugin (run command under test under Valgrind)')
self.argparser_group.add_argument(
'-V', '--valgrind', action='store_true',
help='Run commands under valgrind')
return self.argparser
def adjust_command(self, stage, command):
super().adjust_command(stage, command)
cmdform = 'list'
cmdlist = list()
if not self.args.valgrind:
return command
if self.args.verbose > 1:
print('{}.adjust_command'.format(self.sub_class))
if not isinstance(command, list):
cmdform = 'str'
cmdlist = command.split()
else:
cmdlist = command
if stage == 'execute':
if self.args.verbose > 1:
print('adjust_command: stage is {}; inserting valgrind stuff in command [{}] list [{}]'.
format(stage, command, cmdlist))
cmdlist.insert(0, '--track-origins=yes')
cmdlist.insert(0, '--show-leak-kinds=definite,indirect')
cmdlist.insert(0, '--leak-check=full')
cmdlist.insert(0, '--log-file=vgnd-{}.log'.format(self.args.testid))
cmdlist.insert(0, '-v') # ask for summary of non-leak errors
cmdlist.insert(0, ENVIR['VALGRIND_BIN'])
else:
pass
if cmdform == 'str':
command = ' '.join(cmdlist)
else:
command = cmdlist
if self.args.verbose > 1:
print('adjust_command: return command [{}]'.format(command))
return command
def post_execute(self):
if not self.args.valgrind:
return
self.definitely_lost_re = re.compile(
r'definitely lost:\s+([,0-9]+)\s+bytes in\s+([,0-9]+)\sblocks', re.MULTILINE | re.DOTALL)
self.indirectly_lost_re = re.compile(
r'indirectly lost:\s+([,0-9]+)\s+bytes in\s+([,0-9]+)\s+blocks', re.MULTILINE | re.DOTALL)
self.possibly_lost_re = re.compile(
r'possibly lost:\s+([,0-9]+)bytes in\s+([,0-9]+)\s+blocks', re.MULTILINE | re.DOTALL)
self.non_leak_error_re = re.compile(
r'ERROR SUMMARY:\s+([,0-9]+) errors from\s+([,0-9]+)\s+contexts', re.MULTILINE | re.DOTALL)
def_num = 0
ind_num = 0
pos_num = 0
nle_num = 0
# what about concurrent test runs? Maybe force them to be in different directories?
with open('vgnd-{}.log'.format(self.args.testid)) as vfd:
content = vfd.read()
def_mo = self.definitely_lost_re.search(content)
ind_mo = self.indirectly_lost_re.search(content)
pos_mo = self.possibly_lost_re.search(content)
nle_mo = self.non_leak_error_re.search(content)
if def_mo:
def_num = int(def_mo.group(2))
if ind_mo:
ind_num = int(ind_mo.group(2))
if pos_mo:
pos_num = int(pos_mo.group(2))
if nle_mo:
nle_num = int(nle_mo.group(1))
mem_results = ''
if (def_num > 0) or (ind_num > 0) or (pos_num > 0) or (nle_num > 0):
mem_results += 'not '
mem_results += 'ok {} - {}-mem # {}\n'.format(
self.args.test_ordinal, self.args.testid, 'memory leak check')
self._add_to_tap(mem_results)
if mem_results.startswith('not '):
print('{}'.format(content))
self._add_to_tap(content)
def _add_to_tap(self, more_tap_output):
self.tap += more_tap_output
| gpl-2.0 |
fronti90/kernel_lge_geefhd | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
danilito19/django | django/core/files/locks.py | 725 | 3516 | """
Portable file locking utilities.
Based partially on an example by Jonathan Feignberg in the Python
Cookbook [1] (licensed under the Python Software License) and a ctypes port by
Anatoly Techtonik for Roundup [2] (license [3]).
[1] http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
[2] http://sourceforge.net/p/roundup/code/ci/default/tree/roundup/backends/portalocker.py
[3] http://sourceforge.net/p/roundup/code/ci/default/tree/COPYING.txt
Example Usage::
>>> from django.core.files import locks
>>> with open('./file', 'wb') as f:
... locks.lock(f, locks.LOCK_EX)
... f.write('Django')
"""
import os
__all__ = ('LOCK_EX', 'LOCK_SH', 'LOCK_NB', 'lock', 'unlock')
def _fd(f):
"""Get a filedescriptor from something which could be a file or an fd."""
return f.fileno() if hasattr(f, 'fileno') else f
if os.name == 'nt':
import msvcrt
from ctypes import (sizeof, c_ulong, c_void_p, c_int64,
Structure, Union, POINTER, windll, byref)
from ctypes.wintypes import BOOL, DWORD, HANDLE
LOCK_SH = 0 # the default
LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY
LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK
# --- Adapted from the pyserial project ---
# detect size of ULONG_PTR
if sizeof(c_ulong) != sizeof(c_void_p):
ULONG_PTR = c_int64
else:
ULONG_PTR = c_ulong
PVOID = c_void_p
# --- Union inside Structure by stackoverflow:3480240 ---
class _OFFSET(Structure):
_fields_ = [
('Offset', DWORD),
('OffsetHigh', DWORD)]
class _OFFSET_UNION(Union):
_anonymous_ = ['_offset']
_fields_ = [
('_offset', _OFFSET),
('Pointer', PVOID)]
class OVERLAPPED(Structure):
_anonymous_ = ['_offset_union']
_fields_ = [
('Internal', ULONG_PTR),
('InternalHigh', ULONG_PTR),
('_offset_union', _OFFSET_UNION),
('hEvent', HANDLE)]
LPOVERLAPPED = POINTER(OVERLAPPED)
# --- Define function prototypes for extra safety ---
LockFileEx = windll.kernel32.LockFileEx
LockFileEx.restype = BOOL
LockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, DWORD, LPOVERLAPPED]
UnlockFileEx = windll.kernel32.UnlockFileEx
UnlockFileEx.restype = BOOL
UnlockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, LPOVERLAPPED]
def lock(f, flags):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = LockFileEx(hfile, flags, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
def unlock(f):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = UnlockFileEx(hfile, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
else:
try:
import fcntl
LOCK_SH = fcntl.LOCK_SH # shared lock
LOCK_NB = fcntl.LOCK_NB # non-blocking
LOCK_EX = fcntl.LOCK_EX
except (ImportError, AttributeError):
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = 0
# Dummy functions that don't do anything.
def lock(f, flags):
# File is not locked
return False
def unlock(f):
# File is unlocked
return True
else:
def lock(f, flags):
ret = fcntl.flock(_fd(f), flags)
return (ret == 0)
def unlock(f):
ret = fcntl.flock(_fd(f), fcntl.LOCK_UN)
return (ret == 0)
| bsd-3-clause |
anrl/gini3 | backend/src/gloader/xml/dom/html/HTMLUListElement.py | 10 | 1612 | ########################################################################
#
# File Name: HTMLUListElement
#
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: [email protected]
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import Node
from xml.dom.html.HTMLElement import HTMLElement
class HTMLUListElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="UL"):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_compact(self):
return self.hasAttribute("COMPACT")
def _set_compact(self, value):
if value:
self.setAttribute("COMPACT", "COMPACT")
else:
self.removeAttribute("COMPACT")
def _get_type(self):
return string.capitalize(self.getAttribute("TYPE"))
def _set_type(self, value):
self.setAttribute("TYPE", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"compact" : _get_compact,
"type" : _get_type
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"compact" : _set_compact,
"type" : _set_type
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
| mit |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.7/Lib/distutils/tests/test_build_py.py | 2 | 4009 | """Tests for distutils.command.build_py."""
import os
import sys
import StringIO
import unittest
from distutils.command.build_py import build_py
from distutils.core import Distribution
from distutils.errors import DistutilsFileError
from distutils.tests import support
class BuildPyTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def _setup_package_data(self):
sources = self.mkdtemp()
f = open(os.path.join(sources, "__init__.py"), "w")
f.write("# Pretend this is a package.")
f.close()
f = open(os.path.join(sources, "README.txt"), "w")
f.write("Info about this package")
f.close()
destination = self.mkdtemp()
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": sources}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.command_obj["build"] = support.DummyCommand(
force=0,
build_lib=destination)
dist.packages = ["pkg"]
dist.package_data = {"pkg": ["README.txt"]}
dist.package_dir = {"pkg": sources}
cmd = build_py(dist)
cmd.compile = 1
cmd.ensure_finalized()
self.assertEqual(cmd.package_data, dist.package_data)
cmd.run()
# This makes sure the list of outputs includes byte-compiled
# files for Python modules but not for package data files
# (there shouldn't *be* byte-code files for those!).
#
self.assertEqual(len(cmd.get_outputs()), 3)
pkgdest = os.path.join(destination, "pkg")
files = os.listdir(pkgdest)
return files
def test_package_data(self):
files = self._setup_package_data()
self.assertTrue("__init__.py" in files)
self.assertTrue("README.txt" in files)
@unittest.skipIf(sys.flags.optimize >= 2,
"pyc files are not written with -O2 and above")
def test_package_data_pyc(self):
files = self._setup_package_data()
self.assertTrue("__init__.pyc" in files)
def test_empty_package_dir (self):
# See SF 1668596/1720897.
cwd = os.getcwd()
# create the distribution files.
sources = self.mkdtemp()
open(os.path.join(sources, "__init__.py"), "w").close()
testdir = os.path.join(sources, "doc")
os.mkdir(testdir)
open(os.path.join(testdir, "testfile"), "w").close()
os.chdir(sources)
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
try:
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": ""},
"package_data": {"pkg": ["doc/*"]}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.script_args = ["build"]
dist.parse_command_line()
try:
dist.run_commands()
except DistutilsFileError:
self.fail("failed package_data test when package_dir is ''")
finally:
# Restore state.
os.chdir(cwd)
sys.stdout = old_stdout
def test_dont_write_bytecode(self):
# makes sure byte_compile is not used
pkg_dir, dist = self.create_dist()
cmd = build_py(dist)
cmd.compile = 1
cmd.optimize = 1
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
cmd.byte_compile([])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
self.assertTrue('byte-compiling is disabled' in self.logs[0][1])
def test_suite():
return unittest.makeSuite(BuildPyTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| mit |
pattywgm/funny-spider | douban/douban/spiders/movie_awards.py | 1 | 2888 | #!/usr/bin/env python
# encoding: utf-8
"""
@version: 1.0
@file: movie_awards.py
@time: 17/10/19 下午10:35
@desc: 电影获奖数据抓取
28items/每分钟 被ban
"""
import re
from copy import deepcopy
from os.path import exists
import scrapy
from douban.items import AwardsItem
from douban.utils.my_utils import load_obj, replace_dot
_META_VERSION = 'v1.0'
_AWARDS = 'https://movie.douban.com/subject/{}/awards/'
class MovieAwards(scrapy.Spider):
name = 'movie_awards'
meta_version = _META_VERSION
def __init__(self):
"""
:param urls:
:param done: 已经抓取完成的,用于断点续爬
:return:
"""
self.urls = load_obj('./records/urls.pkl')
self.done = list()
if exists('./records/{}_done.pkl'.format(self.name)):
self.done = load_obj('./records/{}_done.pkl'.format(self.name))
self.new_done = deepcopy(self.done)
def start_requests(self):
req = list()
for url in self.urls:
movie_code = re.findall('\d+', url)[0]
award_url = _AWARDS.format(movie_code)
if award_url not in self.done:
req.append(scrapy.Request(award_url, callback=self.parse, meta={'movie_code': movie_code}))
return req
def parse(self, response):
url = response.url
self.logger.info('Crawl {}'.format(url))
item = AwardsItem()
item['url'] = url
item['movie_code'] = response.meta['movie_code']
award_divs = response.xpath('//div[@class="awards"]')
item['awards'] = [self.parse_award_detail(div) for div in award_divs]
yield item
def parse_award_detail(self, award_div):
"""
解析获奖详细信息
:param award_div:
:return:
"""
award_detail = dict()
# 颁奖方及年份
url = award_div.xpath('.//h2/a/@href').extract_first()
name = award_div.xpath('.//h2/a/text()').extract_first()
year = award_div.xpath('.//h2/span/text()').extract_first().replace('(', '').replace(')', '').strip()
award_detail.update({'award_provider': {name: url}, 'year': year})
# 具体奖项名及获奖者
awards = list()
for ul in award_div.xpath('.//ul[@class="award"]'):
award_name = ul.xpath('./li[1]/text()').extract_first()
award_persons = list()
for person in ul.xpath('./li[position()>1]'):
if person.xpath('./a').extract_first() is None:
break
p_name = replace_dot(person.xpath('./a/text()').extract())
p_url = person.xpath('./a/@href').extract()
award_persons.append(dict(zip(p_name, p_url)))
awards.append({award_name: award_persons})
award_detail.update({'awards': awards})
return award_detail
| gpl-3.0 |
flyfei/python-for-android | python3-alpha/python3-src/Tools/scripts/ftpmirror.py | 49 | 13088 | #! /usr/bin/env python3
"""Mirror a remote ftp subtree into a local directory tree.
usage: ftpmirror [-v] [-q] [-i] [-m] [-n] [-r] [-s pat]
[-l username [-p passwd [-a account]]]
hostname[:port] [remotedir [localdir]]
-v: verbose
-q: quiet
-i: interactive mode
-m: macintosh server (NCSA telnet 2.4) (implies -n -s '*.o')
-n: don't log in
-r: remove local files/directories no longer pertinent
-l username [-p passwd [-a account]]: login info (default .netrc or anonymous)
-s pat: skip files matching pattern
hostname: remote host w/ optional port separated by ':'
remotedir: remote directory (default initial)
localdir: local directory (default current)
"""
import os
import sys
import time
import getopt
import ftplib
import netrc
from fnmatch import fnmatch
# Print usage message and exit
def usage(*args):
sys.stdout = sys.stderr
for msg in args: print(msg)
print(__doc__)
sys.exit(2)
verbose = 1 # 0 for -q, 2 for -v
interactive = 0
mac = 0
rmok = 0
nologin = 0
skippats = ['.', '..', '.mirrorinfo']
# Main program: parse command line and start processing
def main():
global verbose, interactive, mac, rmok, nologin
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:bil:mnp:qrs:v')
except getopt.error as msg:
usage(msg)
login = ''
passwd = ''
account = ''
if not args: usage('hostname missing')
host = args[0]
port = 0
if ':' in host:
host, port = host.split(':', 1)
port = int(port)
try:
auth = netrc.netrc().authenticators(host)
if auth is not None:
login, account, passwd = auth
except (netrc.NetrcParseError, IOError):
pass
for o, a in opts:
if o == '-l': login = a
if o == '-p': passwd = a
if o == '-a': account = a
if o == '-v': verbose = verbose + 1
if o == '-q': verbose = 0
if o == '-i': interactive = 1
if o == '-m': mac = 1; nologin = 1; skippats.append('*.o')
if o == '-n': nologin = 1
if o == '-r': rmok = 1
if o == '-s': skippats.append(a)
remotedir = ''
localdir = ''
if args[1:]:
remotedir = args[1]
if args[2:]:
localdir = args[2]
if args[3:]: usage('too many arguments')
#
f = ftplib.FTP()
if verbose: print("Connecting to '%s%s'..." % (host,
(port and ":%d"%port or "")))
f.connect(host,port)
if not nologin:
if verbose:
print('Logging in as %r...' % (login or 'anonymous'))
f.login(login, passwd, account)
if verbose: print('OK.')
pwd = f.pwd()
if verbose > 1: print('PWD =', repr(pwd))
if remotedir:
if verbose > 1: print('cwd(%s)' % repr(remotedir))
f.cwd(remotedir)
if verbose > 1: print('OK.')
pwd = f.pwd()
if verbose > 1: print('PWD =', repr(pwd))
#
mirrorsubdir(f, localdir)
# Core logic: mirror one subdirectory (recursively)
def mirrorsubdir(f, localdir):
pwd = f.pwd()
if localdir and not os.path.isdir(localdir):
if verbose: print('Creating local directory', repr(localdir))
try:
makedir(localdir)
except os.error as msg:
print("Failed to establish local directory", repr(localdir))
return
infofilename = os.path.join(localdir, '.mirrorinfo')
try:
text = open(infofilename, 'r').read()
except IOError as msg:
text = '{}'
try:
info = eval(text)
except (SyntaxError, NameError):
print('Bad mirror info in', repr(infofilename))
info = {}
subdirs = []
listing = []
if verbose: print('Listing remote directory %r...' % (pwd,))
f.retrlines('LIST', listing.append)
filesfound = []
for line in listing:
if verbose > 1: print('-->', repr(line))
if mac:
# Mac listing has just filenames;
# trailing / means subdirectory
filename = line.strip()
mode = '-'
if filename[-1:] == '/':
filename = filename[:-1]
mode = 'd'
infostuff = ''
else:
# Parse, assuming a UNIX listing
words = line.split(None, 8)
if len(words) < 6:
if verbose > 1: print('Skipping short line')
continue
filename = words[-1].lstrip()
i = filename.find(" -> ")
if i >= 0:
# words[0] had better start with 'l'...
if verbose > 1:
print('Found symbolic link %r' % (filename,))
linkto = filename[i+4:]
filename = filename[:i]
infostuff = words[-5:-1]
mode = words[0]
skip = 0
for pat in skippats:
if fnmatch(filename, pat):
if verbose > 1:
print('Skip pattern', repr(pat), end=' ')
print('matches', repr(filename))
skip = 1
break
if skip:
continue
if mode[0] == 'd':
if verbose > 1:
print('Remembering subdirectory', repr(filename))
subdirs.append(filename)
continue
filesfound.append(filename)
if filename in info and info[filename] == infostuff:
if verbose > 1:
print('Already have this version of',repr(filename))
continue
fullname = os.path.join(localdir, filename)
tempname = os.path.join(localdir, '@'+filename)
if interactive:
doit = askabout('file', filename, pwd)
if not doit:
if filename not in info:
info[filename] = 'Not retrieved'
continue
try:
os.unlink(tempname)
except os.error:
pass
if mode[0] == 'l':
if verbose:
print("Creating symlink %r -> %r" % (filename, linkto))
try:
os.symlink(linkto, tempname)
except IOError as msg:
print("Can't create %r: %s" % (tempname, msg))
continue
else:
try:
fp = open(tempname, 'wb')
except IOError as msg:
print("Can't create %r: %s" % (tempname, msg))
continue
if verbose:
print('Retrieving %r from %r as %r...' % (filename, pwd, fullname))
if verbose:
fp1 = LoggingFile(fp, 1024, sys.stdout)
else:
fp1 = fp
t0 = time.time()
try:
f.retrbinary('RETR ' + filename,
fp1.write, 8*1024)
except ftplib.error_perm as msg:
print(msg)
t1 = time.time()
bytes = fp.tell()
fp.close()
if fp1 != fp:
fp1.close()
try:
os.unlink(fullname)
except os.error:
pass # Ignore the error
try:
os.rename(tempname, fullname)
except os.error as msg:
print("Can't rename %r to %r: %s" % (tempname, fullname, msg))
continue
info[filename] = infostuff
writedict(info, infofilename)
if verbose and mode[0] != 'l':
dt = t1 - t0
kbytes = bytes / 1024.0
print(int(round(kbytes)), end=' ')
print('Kbytes in', end=' ')
print(int(round(dt)), end=' ')
print('seconds', end=' ')
if t1 > t0:
print('(~%d Kbytes/sec)' % \
int(round(kbytes/dt),))
print()
#
# Remove files from info that are no longer remote
deletions = 0
for filename in list(info.keys()):
if filename not in filesfound:
if verbose:
print("Removing obsolete info entry for", end=' ')
print(repr(filename), "in", repr(localdir or "."))
del info[filename]
deletions = deletions + 1
if deletions:
writedict(info, infofilename)
#
# Remove local files that are no longer in the remote directory
try:
if not localdir: names = os.listdir(os.curdir)
else: names = os.listdir(localdir)
except os.error:
names = []
for name in names:
if name[0] == '.' or name in info or name in subdirs:
continue
skip = 0
for pat in skippats:
if fnmatch(name, pat):
if verbose > 1:
print('Skip pattern', repr(pat), end=' ')
print('matches', repr(name))
skip = 1
break
if skip:
continue
fullname = os.path.join(localdir, name)
if not rmok:
if verbose:
print('Local file', repr(fullname), end=' ')
print('is no longer pertinent')
continue
if verbose: print('Removing local file/dir', repr(fullname))
remove(fullname)
#
# Recursively mirror subdirectories
for subdir in subdirs:
if interactive:
doit = askabout('subdirectory', subdir, pwd)
if not doit: continue
if verbose: print('Processing subdirectory', repr(subdir))
localsubdir = os.path.join(localdir, subdir)
pwd = f.pwd()
if verbose > 1:
print('Remote directory now:', repr(pwd))
print('Remote cwd', repr(subdir))
try:
f.cwd(subdir)
except ftplib.error_perm as msg:
print("Can't chdir to", repr(subdir), ":", repr(msg))
else:
if verbose: print('Mirroring as', repr(localsubdir))
mirrorsubdir(f, localsubdir)
if verbose > 1: print('Remote cwd ..')
f.cwd('..')
newpwd = f.pwd()
if newpwd != pwd:
print('Ended up in wrong directory after cd + cd ..')
print('Giving up now.')
break
else:
if verbose > 1: print('OK.')
# Helper to remove a file or directory tree
def remove(fullname):
if os.path.isdir(fullname) and not os.path.islink(fullname):
try:
names = os.listdir(fullname)
except os.error:
names = []
ok = 1
for name in names:
if not remove(os.path.join(fullname, name)):
ok = 0
if not ok:
return 0
try:
os.rmdir(fullname)
except os.error as msg:
print("Can't remove local directory %r: %s" % (fullname, msg))
return 0
else:
try:
os.unlink(fullname)
except os.error as msg:
print("Can't remove local file %r: %s" % (fullname, msg))
return 0
return 1
# Wrapper around a file for writing to write a hash sign every block.
class LoggingFile:
def __init__(self, fp, blocksize, outfp):
self.fp = fp
self.bytes = 0
self.hashes = 0
self.blocksize = blocksize
self.outfp = outfp
def write(self, data):
self.bytes = self.bytes + len(data)
hashes = int(self.bytes) / self.blocksize
while hashes > self.hashes:
self.outfp.write('#')
self.outfp.flush()
self.hashes = self.hashes + 1
self.fp.write(data)
def close(self):
self.outfp.write('\n')
def raw_input(prompt):
sys.stdout.write(prompt)
sys.stdout.flush()
return sys.stdin.readline()
# Ask permission to download a file.
def askabout(filetype, filename, pwd):
prompt = 'Retrieve %s %s from %s ? [ny] ' % (filetype, filename, pwd)
while 1:
reply = raw_input(prompt).strip().lower()
if reply in ['y', 'ye', 'yes']:
return 1
if reply in ['', 'n', 'no', 'nop', 'nope']:
return 0
print('Please answer yes or no.')
# Create a directory if it doesn't exist. Recursively create the
# parent directory as well if needed.
def makedir(pathname):
if os.path.isdir(pathname):
return
dirname = os.path.dirname(pathname)
if dirname: makedir(dirname)
os.mkdir(pathname, 0o777)
# Write a dictionary to a file in a way that can be read back using
# rval() but is still somewhat readable (i.e. not a single long line).
# Also creates a backup file.
def writedict(dict, filename):
dir, fname = os.path.split(filename)
tempname = os.path.join(dir, '@' + fname)
backup = os.path.join(dir, fname + '~')
try:
os.unlink(backup)
except os.error:
pass
fp = open(tempname, 'w')
fp.write('{\n')
for key, value in dict.items():
fp.write('%r: %r,\n' % (key, value))
fp.write('}\n')
fp.close()
try:
os.rename(filename, backup)
except os.error:
pass
os.rename(tempname, filename)
if __name__ == '__main__':
main()
| apache-2.0 |
zerobatu/edx-platform | lms/djangoapps/teams/migrations/0004_auto__add_field_courseteam_discussion_topic_id__add_field_courseteam_l.py | 46 | 6547 | # -*- coding: utf-8 -*-
import pytz
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseTeam.last_activity_at'
db.add_column('teams_courseteam', 'last_activity_at',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 8, 17, 0, 0).replace(tzinfo=pytz.utc)),
keep_default=False)
# Adding field 'CourseTeamMembership.last_activity_at'
db.add_column('teams_courseteammembership', 'last_activity_at',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 8, 17, 0, 0).replace(tzinfo=pytz.utc)),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseTeam.last_activity_at'
db.delete_column('teams_courseteam', 'last_activity_at')
# Deleting field 'CourseTeamMembership.last_activity_at'
db.delete_column('teams_courseteammembership', 'last_activity_at')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.courseteam': {
'Meta': {'object_name': 'CourseTeam'},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'discussion_topic_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'language': ('student.models.LanguageField', [], {'max_length': '16', 'blank': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'team_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'topic_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.CourseTeamMembership']", 'to': "orm['auth.User']"})
},
'teams.courseteammembership': {
'Meta': {'unique_together': "(('user', 'team'),)", 'object_name': 'CourseTeamMembership'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'membership'", 'to': "orm['teams.CourseTeam']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['teams']
| agpl-3.0 |
nrwahl2/ansible | lib/ansible/modules/cloud/centurylink/clc_group.py | 26 | 16745 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_group
short_description: Create/delete Server Groups at Centurylink Cloud
description:
- Create or delete Server Groups at Centurylink Centurylink Cloud
version_added: "2.0"
options:
name:
description:
- The name of the Server Group
required: True
description:
description:
- A description of the Server Group
required: False
parent:
description:
- The parent group of the server group. If parent is not provided, it creates the group at top level.
required: False
location:
description:
- Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
associated with the account
required: False
state:
description:
- Whether to create or delete the group
default: present
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the tasks to finish before returning.
choices: [ True, False ]
default: True
required: False
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Create a Server Group
---
- name: Create Server Group
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create / Verify a Server Group at CenturyLink Cloud
clc_group:
name: My Cool Server Group
parent: Default Group
state: present
register: clc
- name: debug
debug:
var: clc
# Delete a Server Group
---
- name: Delete Server Group
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete / Verify Absent a Server Group at CenturyLink Cloud
clc_group:
name: My Cool Server Group
parent: Default Group
state: absent
register: clc
- name: debug
debug:
var: clc
'''
RETURN = '''
group:
description: The group information
returned: success
type: dict
sample:
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":"2015-07-29T18:52:47Z",
"modifiedBy":"service.wfad",
"modifiedDate":"2015-07-29T18:52:47Z"
},
"customFields":[
],
"description":"test group",
"groups":[
],
"id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
"links":[
{
"href":"/v2/groups/wfad",
"rel":"createGroup",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad",
"rel":"createServer",
"verbs":[
"POST"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"parentGroup"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
"rel":"defaults",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
"rel":"billing"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
"rel":"archiveGroupAction"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
"rel":"statistics"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
"rel":"horizontalAutoscalePolicyMapping",
"verbs":[
"GET",
"PUT",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
}
],
"locationId":"UC1",
"name":"test group",
"status":"active",
"type":"default"
}
'''
__version__ = '${version}'
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcGroup(object):
clc = None
root_group = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.group_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
location = self.module.params.get('location')
group_name = self.module.params.get('name')
parent_name = self.module.params.get('parent')
group_description = self.module.params.get('description')
state = self.module.params.get('state')
self._set_clc_credentials_from_env()
self.group_dict = self._get_group_tree_for_datacenter(
datacenter=location)
if state == "absent":
changed, group, requests = self._ensure_group_is_absent(
group_name=group_name, parent_name=parent_name)
if requests:
self._wait_for_requests_to_complete(requests)
else:
changed, group = self._ensure_group_is_present(
group_name=group_name, parent_name=parent_name, group_description=group_description)
try:
group = group.data
except AttributeError:
group = group_name
self.module.exit_json(changed=changed, group=group)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(required=True),
description=dict(default=None),
parent=dict(default=None),
location=dict(default=None),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=True))
return argument_spec
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_group_is_absent(self, group_name, parent_name):
"""
Ensure that group_name is absent by deleting it if necessary
:param group_name: string - the name of the clc server group to delete
:param parent_name: string - the name of the parent group for group_name
:return: changed, group
"""
changed = False
group = []
results = []
if self._group_exists(group_name=group_name, parent_name=parent_name):
if not self.module.check_mode:
group.append(group_name)
result = self._delete_group(group_name)
results.append(result)
changed = True
return changed, group, results
def _delete_group(self, group_name):
"""
Delete the provided server group
:param group_name: string - the server group to delete
:return: none
"""
response = None
group, parent = self.group_dict.get(group_name)
try:
response = group.Delete()
except CLCException as ex:
self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
group_name, ex.response_text
))
return response
def _ensure_group_is_present(
self,
group_name,
parent_name,
group_description):
"""
Checks to see if a server group exists, creates it if it doesn't.
:param group_name: the name of the group to validate/create
:param parent_name: the name of the parent group for group_name
:param group_description: a short description of the server group (used when creating)
:return: (changed, group) -
changed: Boolean- whether a change was made,
group: A clc group object for the group
"""
assert self.root_group, "Implementation Error: Root Group not set"
parent = parent_name if parent_name is not None else self.root_group.name
description = group_description
changed = False
group = group_name
parent_exists = self._group_exists(group_name=parent, parent_name=None)
child_exists = self._group_exists(
group_name=group_name,
parent_name=parent)
if parent_exists and child_exists:
group, parent = self.group_dict[group_name]
changed = False
elif parent_exists and not child_exists:
if not self.module.check_mode:
group = self._create_group(
group=group,
parent=parent,
description=description)
changed = True
else:
self.module.fail_json(
msg="parent group: " +
parent +
" does not exist")
return changed, group
def _create_group(self, group, parent, description):
"""
Create the provided server group
:param group: clc_sdk.Group - the group to create
:param parent: clc_sdk.Parent - the parent group for {group}
:param description: string - a text description of the group
:return: clc_sdk.Group - the created group
"""
response = None
(parent, grandparent) = self.group_dict[parent]
try:
response = parent.Create(name=group, description=description)
except CLCException as ex:
self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
group, ex.response_text))
return response
def _group_exists(self, group_name, parent_name):
"""
Check to see if a group exists
:param group_name: string - the group to check
:param parent_name: string - the parent of group_name
:return: boolean - whether the group exists
"""
result = False
if group_name in self.group_dict:
(group, parent) = self.group_dict[group_name]
if parent_name is None or parent_name == parent.name:
result = True
return result
def _get_group_tree_for_datacenter(self, datacenter=None):
"""
Walk the tree of groups for a datacenter
:param datacenter: string - the datacenter to walk (ex: 'UC1')
:return: a dictionary of groups and parents
"""
self.root_group = self.clc.v2.Datacenter(
location=datacenter).RootGroup()
return self._walk_groups_recursive(
parent_group=None,
child_group=self.root_group)
def _walk_groups_recursive(self, parent_group, child_group):
"""
Walk a parent-child tree of groups, starting with the provided child group
:param parent_group: clc_sdk.Group - the parent group to start the walk
:param child_group: clc_sdk.Group - the child group to start the walk
:return: a dictionary of groups and parents
"""
result = {str(child_group): (child_group, parent_group)}
groups = child_group.Subgroups().groups
if len(groups) > 0:
for group in groups:
if group.type != 'default':
continue
result.update(self._walk_groups_recursive(child_group, group))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process group request')
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcGroup._define_module_argument_spec(),
supports_check_mode=True)
clc_group = ClcGroup(module)
clc_group.process_request()
if __name__ == '__main__':
main()
| gpl-3.0 |
SlimRoms/android_external_chromium_org | build/mac/tweak_info_plist.py | 42 | 10163 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Xcode supports build variable substitutions and CPP; sadly, that doesn't work
# because:
#
# 1. Xcode wants to do the Info.plist work before it runs any build phases,
# this means if we were to generate a .h file for INFOPLIST_PREFIX_HEADER
# we'd have to put it in another target so it runs in time.
# 2. Xcode also doesn't check to see if the header being used as a prefix for
# the Info.plist has changed. So even if we updated it, it's only looking
# at the modtime of the info.plist to see if that's changed.
#
# So, we work around all of this by making a script build phase that will run
# during the app build, and simply update the info.plist in place. This way
# by the time the app target is done, the info.plist is correct.
#
import optparse
import os
from os import environ as env
import plistlib
import re
import subprocess
import sys
import tempfile
TOP = os.path.join(env['SRCROOT'], '..')
def _GetOutput(args):
"""Runs a subprocess and waits for termination. Returns (stdout, returncode)
of the process. stderr is attached to the parent."""
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return (stdout, proc.returncode)
def _GetOutputNoError(args):
"""Similar to _GetOutput() but ignores stderr. If there's an error launching
the child (like file not found), the exception will be caught and (None, 1)
will be returned to mimic quiet failure."""
try:
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
return (None, 1)
(stdout, stderr) = proc.communicate()
return (stdout, proc.returncode)
def _RemoveKeys(plist, *keys):
"""Removes a varargs of keys from the plist."""
for key in keys:
try:
del plist[key]
except KeyError:
pass
def _AddVersionKeys(plist, version=None):
"""Adds the product version number into the plist. Returns True on success and
False on error. The error will be printed to stderr."""
if version:
match = re.match('\d+\.\d+\.(\d+\.\d+)$', version)
if not match:
print >>sys.stderr, 'Invalid version string specified: "%s"' % version
return False
full_version = match.group(0)
bundle_version = match.group(1)
else:
# Pull in the Chrome version number.
VERSION_TOOL = os.path.join(TOP, 'build/util/version.py')
VERSION_FILE = os.path.join(TOP, 'chrome/VERSION')
(stdout, retval1) = _GetOutput([VERSION_TOOL, '-f', VERSION_FILE, '-t',
'@MAJOR@.@MINOR@.@BUILD@.@PATCH@'])
full_version = stdout.rstrip()
(stdout, retval2) = _GetOutput([VERSION_TOOL, '-f', VERSION_FILE, '-t',
'@BUILD@.@PATCH@'])
bundle_version = stdout.rstrip()
# If either of the two version commands finished with non-zero returncode,
# report the error up.
if retval1 or retval2:
return False
# Add public version info so "Get Info" works.
plist['CFBundleShortVersionString'] = full_version
# Honor the 429496.72.95 limit. The maximum comes from splitting 2^32 - 1
# into 6, 2, 2 digits. The limitation was present in Tiger, but it could
# have been fixed in later OS release, but hasn't been tested (it's easy
# enough to find out with "lsregister -dump).
# http://lists.apple.com/archives/carbon-dev/2006/Jun/msg00139.html
# BUILD will always be an increasing value, so BUILD_PATH gives us something
# unique that meetings what LS wants.
plist['CFBundleVersion'] = bundle_version
# Return with no error.
return True
def _DoSCMKeys(plist, add_keys):
"""Adds the SCM information, visible in about:version, to property list. If
|add_keys| is True, it will insert the keys, otherwise it will remove them."""
scm_revision = None
if add_keys:
# Pull in the Chrome revision number.
VERSION_TOOL = os.path.join(TOP, 'build/util/version.py')
LASTCHANGE_FILE = os.path.join(TOP, 'build/util/LASTCHANGE')
(stdout, retval) = _GetOutput([VERSION_TOOL, '-f', LASTCHANGE_FILE, '-t',
'@LASTCHANGE@'])
if retval:
return False
scm_revision = stdout.rstrip()
# See if the operation failed.
_RemoveKeys(plist, 'SCMRevision')
if scm_revision != None:
plist['SCMRevision'] = scm_revision
elif add_keys:
print >>sys.stderr, 'Could not determine SCM revision. This may be OK.'
return True
def _AddBreakpadKeys(plist, branding):
"""Adds the Breakpad keys. This must be called AFTER _AddVersionKeys() and
also requires the |branding| argument."""
plist['BreakpadReportInterval'] = '3600' # Deliberately a string.
plist['BreakpadProduct'] = '%s_Mac' % branding
plist['BreakpadProductDisplay'] = branding
plist['BreakpadVersion'] = plist['CFBundleShortVersionString']
# These are both deliberately strings and not boolean.
plist['BreakpadSendAndExit'] = 'YES'
plist['BreakpadSkipConfirm'] = 'YES'
def _RemoveBreakpadKeys(plist):
"""Removes any set Breakpad keys."""
_RemoveKeys(plist,
'BreakpadURL',
'BreakpadReportInterval',
'BreakpadProduct',
'BreakpadProductDisplay',
'BreakpadVersion',
'BreakpadSendAndExit',
'BreakpadSkipConfirm')
def _TagSuffixes():
# Keep this list sorted in the order that tag suffix components are to
# appear in a tag value. That is to say, it should be sorted per ASCII.
components = ('32bit', 'full')
assert tuple(sorted(components)) == components
components_len = len(components)
combinations = 1 << components_len
tag_suffixes = []
for combination in xrange(0, combinations):
tag_suffix = ''
for component_index in xrange(0, components_len):
if combination & (1 << component_index):
tag_suffix += '-' + components[component_index]
tag_suffixes.append(tag_suffix)
return tag_suffixes
def _AddKeystoneKeys(plist, bundle_identifier):
"""Adds the Keystone keys. This must be called AFTER _AddVersionKeys() and
also requires the |bundle_identifier| argument (com.example.product)."""
plist['KSVersion'] = plist['CFBundleShortVersionString']
plist['KSProductID'] = bundle_identifier
plist['KSUpdateURL'] = 'https://tools.google.com/service/update2'
_RemoveKeys(plist, 'KSChannelID')
for tag_suffix in _TagSuffixes():
if tag_suffix:
plist['KSChannelID' + tag_suffix] = tag_suffix
def _RemoveKeystoneKeys(plist):
"""Removes any set Keystone keys."""
_RemoveKeys(plist,
'KSVersion',
'KSProductID',
'KSUpdateURL')
tag_keys = []
for tag_suffix in _TagSuffixes():
tag_keys.append('KSChannelID' + tag_suffix)
_RemoveKeys(plist, *tag_keys)
def Main(argv):
parser = optparse.OptionParser('%prog [options]')
parser.add_option('--breakpad', dest='use_breakpad', action='store',
type='int', default=False, help='Enable Breakpad [1 or 0]')
parser.add_option('--breakpad_uploads', dest='breakpad_uploads',
action='store', type='int', default=False,
help='Enable Breakpad\'s uploading of crash dumps [1 or 0]')
parser.add_option('--keystone', dest='use_keystone', action='store',
type='int', default=False, help='Enable Keystone [1 or 0]')
parser.add_option('--scm', dest='add_scm_info', action='store', type='int',
default=True, help='Add SCM metadata [1 or 0]')
parser.add_option('--branding', dest='branding', action='store',
type='string', default=None, help='The branding of the binary')
parser.add_option('--bundle_id', dest='bundle_identifier',
action='store', type='string', default=None,
help='The bundle id of the binary')
parser.add_option('--version', dest='version', action='store', type='string',
default=None, help='The version string [major.minor.build.patch]')
(options, args) = parser.parse_args(argv)
if len(args) > 0:
print >>sys.stderr, parser.get_usage()
return 1
# Read the plist into its parsed format.
DEST_INFO_PLIST = os.path.join(env['TARGET_BUILD_DIR'], env['INFOPLIST_PATH'])
plist = plistlib.readPlist(DEST_INFO_PLIST)
# Insert the product version.
if not _AddVersionKeys(plist, version=options.version):
return 2
# Add Breakpad if configured to do so.
if options.use_breakpad:
if options.branding is None:
print >>sys.stderr, 'Use of Breakpad requires branding.'
return 1
_AddBreakpadKeys(plist, options.branding)
if options.breakpad_uploads:
plist['BreakpadURL'] = 'https://clients2.google.com/cr/report'
else:
# This allows crash dumping to a file without uploading the
# dump, for testing purposes. Breakpad does not recognise
# "none" as a special value, but this does stop crash dump
# uploading from happening. We need to specify something
# because if "BreakpadURL" is not present, Breakpad will not
# register its crash handler and no crash dumping will occur.
plist['BreakpadURL'] = 'none'
else:
_RemoveBreakpadKeys(plist)
# Only add Keystone in Release builds.
if options.use_keystone and env['CONFIGURATION'] == 'Release':
if options.bundle_identifier is None:
print >>sys.stderr, 'Use of Keystone requires the bundle id.'
return 1
_AddKeystoneKeys(plist, options.bundle_identifier)
else:
_RemoveKeystoneKeys(plist)
# Adds or removes any SCM keys.
if not _DoSCMKeys(plist, options.add_scm_info):
return 3
# Now that all keys have been mutated, rewrite the file.
temp_info_plist = tempfile.NamedTemporaryFile()
plistlib.writePlist(plist, temp_info_plist.name)
# Info.plist will work perfectly well in any plist format, but traditionally
# applications use xml1 for this, so convert it to ensure that it's valid.
proc = subprocess.Popen(['plutil', '-convert', 'xml1', '-o', DEST_INFO_PLIST,
temp_info_plist.name])
proc.wait()
return proc.returncode
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
simone/django-gb | django/core/serializers/pyyaml.py | 115 | 2635 | """
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
import decimal
import yaml
import sys
from io import StringIO
from django.db import models
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import six
# Use the C (faster) implementation if possible
try:
from yaml import CSafeLoader as SafeLoader
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
class DjangoSafeDumper(SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
if isinstance(stream_or_string, six.string_types):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.load(stream, Loader=SafeLoader), **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
| bsd-3-clause |
bclau/nova | nova/virt/powervm/lpar.py | 11 | 5106 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""PowerVM Logical Partition (LPAR)
PowerVM LPAR configuration attributes.
"""
import shlex
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.powervm import exception
LOG = logging.getLogger(__name__)
def load_from_conf_data(conf_data):
"""LPAR configuration data parser.
The configuration data is a string representation of
the attributes of a Logical Partition. The attributes
consists of name/value pairs, which are in command separated
value format.
Example format: name=lpar_name,lpar_id=1,lpar_env=aixlinux
:param conf_data: string containing the LPAR configuration data.
:returns: LPAR -- LPAR object.
"""
# config_data can contain comma separated values within
# double quotes, example: virtual_serial_adapters
# and virtual_scsi_adapters attributes. So can't simply
# split them by ','.
cf_splitter = shlex.shlex(conf_data, posix=True)
cf_splitter.whitespace = ','
cf_splitter.whitespace_split = True
attribs = dict(item.split("=") for item in list(cf_splitter))
lpar = LPAR()
for (key, value) in attribs.items():
try:
lpar[key] = value
except exception.PowerVMLPARAttributeNotFound:
LOG.info(_('Encountered unknown LPAR attribute: %s\n'
'Continuing without storing') % key)
return lpar
class LPAR(object):
"""
Simple class representing a logical partition and the attributes
for the partition and/or its selected profile.
"""
# Attributes for all logical partitions
LPAR_ATTRS = (
'name',
'lpar_id',
'lpar_env',
'state',
'resource_config',
'os_version',
'logical_serial_num',
'default_profile',
'profile_name',
'curr_profile',
'work_group_id',
'allow_perf_collection',
'power_ctrl_lpar_ids',
'boot_mode',
'lpar_keylock',
'auto_start',
'uptime',
'lpar_avail_priority',
'desired_lpar_proc_compat_mode',
'curr_lpar_proc_compat_mode',
'virtual_eth_mac_base_value',
'rmc_ipaddr'
)
# Logical partitions may contain one or more profiles, which
# may have the following attributes
LPAR_PROFILE_ATTRS = (
'name',
'lpar_name',
'lpar_id',
'os_type',
'all_resources',
'mem_mode',
'min_mem',
'desired_mem',
'max_mem',
'proc_mode',
'min_proc_units',
'desired_proc_units',
'max_proc_units',
'min_procs',
'desired_procs',
'max_procs',
'sharing_mode',
'uncap_weight',
'io_slots',
'lpar_io_pool_ids',
'max_virtual_slots',
'virtual_serial_adapters',
'virtual_scsi_adapters',
'virtual_eth_adapters',
'boot_mode',
'conn_monitoring',
'auto_start',
'power_ctrl_lpar_ids',
'lhea_logical_ports',
'lhea_capabilities',
'lpar_proc_compat_mode',
'virtual_fc_adapters'
)
def __init__(self, **kwargs):
self.attributes = dict([k, None] for k in self.LPAR_ATTRS)
self.profile_attributes = dict([k, None] for k
in self.LPAR_PROFILE_ATTRS)
self.attributes.update(kwargs)
self.profile_attributes.update(kwargs)
self.all_attrs = dict(self.attributes.items()
+ self.profile_attributes.items())
def __getitem__(self, key):
if key not in self.all_attrs.keys():
raise exception.PowerVMLPARAttributeNotFound(key)
return self.all_attrs.get(key)
def __setitem__(self, key, value):
if key not in self.all_attrs.keys():
raise exception.PowerVMLPARAttributeNotFound(key)
self.all_attrs[key] = value
def __delitem__(self, key):
if key not in self.all_attrs.keys():
raise exception.PowerVMLPARAttributeNotFound(key)
# We set to None instead of removing the key...
self.all_attrs[key] = None
def to_string(self, exclude_attribs=[]):
conf_data = []
for (key, value) in self.all_attrs.items():
if key in exclude_attribs or value is None:
continue
conf_data.append('%s=%s' % (key, value))
return ','.join(conf_data)
| apache-2.0 |
CyanogenMod/android_external_chromium_org | third_party/protobuf/python/google/protobuf/internal/text_format_test.py | 162 | 23727 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.text_format."""
__author__ = '[email protected] (Kenton Varda)'
import difflib
import re
import unittest
from google.protobuf import text_format
from google.protobuf.internal import test_util
from google.protobuf import unittest_pb2
from google.protobuf import unittest_mset_pb2
class TextFormatTest(unittest.TestCase):
def ReadGolden(self, golden_filename):
f = test_util.GoldenFile(golden_filename)
golden_lines = f.readlines()
f.close()
return golden_lines
def CompareToGoldenFile(self, text, golden_filename):
golden_lines = self.ReadGolden(golden_filename)
self.CompareToGoldenLines(text, golden_lines)
def CompareToGoldenText(self, text, golden_text):
self.CompareToGoldenLines(text, golden_text.splitlines(1))
def CompareToGoldenLines(self, text, golden_lines):
actual_lines = text.splitlines(1)
self.assertEqual(golden_lines, actual_lines,
"Text doesn't match golden. Diff:\n" +
''.join(difflib.ndiff(golden_lines, actual_lines)))
def testPrintAllFields(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_data.txt')
def testPrintAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_extensions_data.txt')
def testPrintMessageSet(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(text_format.MessageToString(message),
'message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
def testPrintBadEnumValue(self):
message = unittest_pb2.TestAllTypes()
message.optional_nested_enum = 100
message.optional_foreign_enum = 101
message.optional_import_enum = 102
self.CompareToGoldenText(
text_format.MessageToString(message),
'optional_nested_enum: 100\n'
'optional_foreign_enum: 101\n'
'optional_import_enum: 102\n')
def testPrintBadEnumValueExtensions(self):
message = unittest_pb2.TestAllExtensions()
message.Extensions[unittest_pb2.optional_nested_enum_extension] = 100
message.Extensions[unittest_pb2.optional_foreign_enum_extension] = 101
message.Extensions[unittest_pb2.optional_import_enum_extension] = 102
self.CompareToGoldenText(
text_format.MessageToString(message),
'[protobuf_unittest.optional_nested_enum_extension]: 100\n'
'[protobuf_unittest.optional_foreign_enum_extension]: 101\n'
'[protobuf_unittest.optional_import_enum_extension]: 102\n')
def testPrintExotic(self):
message = unittest_pb2.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: '
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintNestedMessageAsOneLine(self):
message = unittest_pb2.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42;
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_nested_message { bb: 42 }')
def testPrintRepeatedFieldsAsOneLine(self):
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(1)
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_string.append("Google")
message.repeated_string.append("Zurich")
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_int32: 1 repeated_int32: 1 repeated_int32: 3 '
'repeated_string: "Google" repeated_string: "Zurich"')
def testPrintNestedNewLineInStringAsOneLine(self):
message = unittest_pb2.TestAllTypes()
message.optional_string = "a\nnew\nline"
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'optional_string: "a\\nnew\\nline"')
def testPrintMessageSetAsOneLine(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'message_set {'
' [protobuf_unittest.TestMessageSetExtension1] {'
' i: 23'
' }'
' [protobuf_unittest.TestMessageSetExtension2] {'
' str: \"foo\"'
' }'
' }')
def testPrintExoticAsOneLine(self):
message = unittest_pb2.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(
text_format.MessageToString(message, as_one_line=True)),
'repeated_int64: -9223372036854775808'
' repeated_uint64: 18446744073709551615'
' repeated_double: 123.456'
' repeated_double: 1.23e+22'
' repeated_double: 1.23e-18'
' repeated_string: '
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""'
' repeated_string: "\\303\\274\\352\\234\\237"')
def testRoundTripExoticAsOneLine(self):
message = unittest_pb2.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
# Test as_utf8 = False.
wire_text = text_format.MessageToString(
message, as_one_line=True, as_utf8=False)
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(wire_text, parsed_message)
self.assertEquals(message, parsed_message)
# Test as_utf8 = True.
wire_text = text_format.MessageToString(
message, as_one_line=True, as_utf8=True)
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(wire_text, parsed_message)
self.assertEquals(message, parsed_message)
def testPrintRawUtf8String(self):
message = unittest_pb2.TestAllTypes()
message.repeated_string.append(u'\u00fc\ua71f')
text = text_format.MessageToString(message, as_utf8 = True)
self.CompareToGoldenText(text, 'repeated_string: "\303\274\352\234\237"\n')
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(text, parsed_message)
self.assertEquals(message, parsed_message)
def testMessageToString(self):
message = unittest_pb2.ForeignMessage()
message.c = 123
self.assertEqual('c: 123\n', str(message))
def RemoveRedundantZeros(self, text):
# Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove
# these zeros in order to match the golden file.
text = text.replace('e+0','e+').replace('e+0','e+') \
.replace('e-0','e-').replace('e-0','e-')
# Floating point fields are printed with .0 suffix even if they are
# actualy integer numbers.
text = re.compile('\.0$', re.MULTILINE).sub('', text)
return text
def testMergeGolden(self):
golden_text = '\n'.join(self.ReadGolden('text_format_unittest_data.txt'))
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(golden_text, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEquals(message, parsed_message)
def testMergeGoldenExtensions(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_extensions_data.txt'))
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Merge(golden_text, parsed_message)
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.assertEquals(message, parsed_message)
def testMergeAllFields(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
ascii_text = text_format.MessageToString(message)
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
test_util.ExpectAllFieldsSet(self, message)
def testMergeAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
ascii_text = text_format.MessageToString(message)
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Merge(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
def testMergeMessageSet(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_uint64: 1\n'
'repeated_uint64: 2\n')
text_format.Merge(text, message)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Merge(text, message)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEquals(23, message.message_set.Extensions[ext1].i)
self.assertEquals('foo', message.message_set.Extensions[ext2].str)
def testMergeExotic(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: \n'
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "foo" \'corge\' "grault"\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n'
'repeated_string: "\\xc3\\xbc"\n'
'repeated_string: "\xc3\xbc"\n')
text_format.Merge(text, message)
self.assertEqual(-9223372036854775808, message.repeated_int64[0])
self.assertEqual(18446744073709551615, message.repeated_uint64[0])
self.assertEqual(123.456, message.repeated_double[0])
self.assertEqual(1.23e22, message.repeated_double[1])
self.assertEqual(1.23e-18, message.repeated_double[2])
self.assertEqual(
'\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0])
self.assertEqual('foocorgegrault', message.repeated_string[1])
self.assertEqual(u'\u00fc\ua71f', message.repeated_string[2])
self.assertEqual(u'\u00fc', message.repeated_string[3])
def testMergeEmptyText(self):
message = unittest_pb2.TestAllTypes()
text = ''
text_format.Merge(text, message)
self.assertEquals(unittest_pb2.TestAllTypes(), message)
def testMergeInvalidUtf8(self):
message = unittest_pb2.TestAllTypes()
text = 'repeated_string: "\\xc3\\xc3"'
self.assertRaises(text_format.ParseError, text_format.Merge, text, message)
def testMergeSingleWord(self):
message = unittest_pb2.TestAllTypes()
text = 'foo'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:1 : Message type "protobuf_unittest.TestAllTypes" has no field named '
'"foo".'),
text_format.Merge, text, message)
def testMergeUnknownField(self):
message = unittest_pb2.TestAllTypes()
text = 'unknown_field: 8\n'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:1 : Message type "protobuf_unittest.TestAllTypes" has no field named '
'"unknown_field".'),
text_format.Merge, text, message)
def testMergeBadExtension(self):
message = unittest_pb2.TestAllExtensions()
text = '[unknown_extension]: 8\n'
self.assertRaisesWithMessage(
text_format.ParseError,
'1:2 : Extension "unknown_extension" not registered.',
text_format.Merge, text, message)
message = unittest_pb2.TestAllTypes()
self.assertRaisesWithMessage(
text_format.ParseError,
('1:2 : Message type "protobuf_unittest.TestAllTypes" does not have '
'extensions.'),
text_format.Merge, text, message)
def testMergeGroupNotClosed(self):
message = unittest_pb2.TestAllTypes()
text = 'RepeatedGroup: <'
self.assertRaisesWithMessage(
text_format.ParseError, '1:16 : Expected ">".',
text_format.Merge, text, message)
text = 'RepeatedGroup: {'
self.assertRaisesWithMessage(
text_format.ParseError, '1:16 : Expected "}".',
text_format.Merge, text, message)
def testMergeEmptyGroup(self):
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: {}'
text_format.Merge(text, message)
self.assertTrue(message.HasField('optionalgroup'))
message.Clear()
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: <>'
text_format.Merge(text, message)
self.assertTrue(message.HasField('optionalgroup'))
def testMergeBadEnumValue(self):
message = unittest_pb2.TestAllTypes()
text = 'optional_nested_enum: BARR'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:23 : Enum type "protobuf_unittest.TestAllTypes.NestedEnum" '
'has no value named BARR.'),
text_format.Merge, text, message)
message = unittest_pb2.TestAllTypes()
text = 'optional_nested_enum: 100'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:23 : Enum type "protobuf_unittest.TestAllTypes.NestedEnum" '
'has no value with number 100.'),
text_format.Merge, text, message)
def testMergeBadIntValue(self):
message = unittest_pb2.TestAllTypes()
text = 'optional_int32: bork'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:17 : Couldn\'t parse integer: bork'),
text_format.Merge, text, message)
def assertRaisesWithMessage(self, e_class, e, func, *args, **kwargs):
"""Same as assertRaises, but also compares the exception message."""
if hasattr(e_class, '__name__'):
exc_name = e_class.__name__
else:
exc_name = str(e_class)
try:
func(*args, **kwargs)
except e_class as expr:
if str(expr) != e:
msg = '%s raised, but with wrong message: "%s" instead of "%s"'
raise self.failureException(msg % (exc_name,
str(expr).encode('string_escape'),
e.encode('string_escape')))
return
else:
raise self.failureException('%s not raised' % exc_name)
class TokenizerTest(unittest.TestCase):
def testSimpleTokenCases(self):
text = ('identifier1:"string1"\n \n\n'
'identifier2 : \n \n123 \n identifier3 :\'string\'\n'
'identifiER_4 : 1.1e+2 ID5:-0.23 ID6:\'aaaa\\\'bbbb\'\n'
'ID7 : "aa\\"bb"\n\n\n\n ID8: {A:inf B:-inf C:true D:false}\n'
'ID9: 22 ID10: -111111111111111111 ID11: -22\n'
'ID12: 2222222222222222222 ID13: 1.23456f ID14: 1.2e+2f '
'false_bool: 0 true_BOOL:t \n true_bool1: 1 false_BOOL1:f ' )
tokenizer = text_format._Tokenizer(text)
methods = [(tokenizer.ConsumeIdentifier, 'identifier1'),
':',
(tokenizer.ConsumeString, 'string1'),
(tokenizer.ConsumeIdentifier, 'identifier2'),
':',
(tokenizer.ConsumeInt32, 123),
(tokenizer.ConsumeIdentifier, 'identifier3'),
':',
(tokenizer.ConsumeString, 'string'),
(tokenizer.ConsumeIdentifier, 'identifiER_4'),
':',
(tokenizer.ConsumeFloat, 1.1e+2),
(tokenizer.ConsumeIdentifier, 'ID5'),
':',
(tokenizer.ConsumeFloat, -0.23),
(tokenizer.ConsumeIdentifier, 'ID6'),
':',
(tokenizer.ConsumeString, 'aaaa\'bbbb'),
(tokenizer.ConsumeIdentifier, 'ID7'),
':',
(tokenizer.ConsumeString, 'aa\"bb'),
(tokenizer.ConsumeIdentifier, 'ID8'),
':',
'{',
(tokenizer.ConsumeIdentifier, 'A'),
':',
(tokenizer.ConsumeFloat, float('inf')),
(tokenizer.ConsumeIdentifier, 'B'),
':',
(tokenizer.ConsumeFloat, -float('inf')),
(tokenizer.ConsumeIdentifier, 'C'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'D'),
':',
(tokenizer.ConsumeBool, False),
'}',
(tokenizer.ConsumeIdentifier, 'ID9'),
':',
(tokenizer.ConsumeUint32, 22),
(tokenizer.ConsumeIdentifier, 'ID10'),
':',
(tokenizer.ConsumeInt64, -111111111111111111),
(tokenizer.ConsumeIdentifier, 'ID11'),
':',
(tokenizer.ConsumeInt32, -22),
(tokenizer.ConsumeIdentifier, 'ID12'),
':',
(tokenizer.ConsumeUint64, 2222222222222222222),
(tokenizer.ConsumeIdentifier, 'ID13'),
':',
(tokenizer.ConsumeFloat, 1.23456),
(tokenizer.ConsumeIdentifier, 'ID14'),
':',
(tokenizer.ConsumeFloat, 1.2e+2),
(tokenizer.ConsumeIdentifier, 'false_bool'),
':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'true_BOOL'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'true_bool1'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'false_BOOL1'),
':',
(tokenizer.ConsumeBool, False)]
i = 0
while not tokenizer.AtEnd():
m = methods[i]
if type(m) == str:
token = tokenizer.token
self.assertEqual(token, m)
tokenizer.NextToken()
else:
self.assertEqual(m[1], m[0]())
i += 1
def testConsumeIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint64)
self.assertEqual(-1, tokenizer.ConsumeInt32())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt32)
self.assertEqual(uint32_max + 1, tokenizer.ConsumeInt64())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt64)
self.assertEqual(int64_max + 1, tokenizer.ConsumeUint64())
self.assertTrue(tokenizer.AtEnd())
text = '-0 -0 0 0'
tokenizer = text_format._Tokenizer(text)
self.assertEqual(0, tokenizer.ConsumeUint32())
self.assertEqual(0, tokenizer.ConsumeUint64())
self.assertEqual(0, tokenizer.ConsumeUint32())
self.assertEqual(0, tokenizer.ConsumeUint64())
self.assertTrue(tokenizer.AtEnd())
def testConsumeByteString(self):
text = '"string1\''
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = 'string1"'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\xt"'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\"'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\x"'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
def testConsumeBool(self):
text = 'not-a-bool'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
janocat/odoo | addons/base_report_designer/__init__.py | 421 | 1136 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import base_report_designer
import installer
import openerp_sxw2rml
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mancoast/CPythonPyc_test | cpython/273_test_memoryio.py | 31 | 25664 | """Unit tests for memory-based file-like objects.
StringIO -- for unicode strings
BytesIO -- for bytes
"""
from __future__ import unicode_literals
from __future__ import print_function
import unittest
from test import test_support as support
import io
import _pyio as pyio
import pickle
class MemorySeekTestMixin:
def testInit(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
def testRead(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(buf[:1], bytesIo.read(1))
self.assertEqual(buf[1:5], bytesIo.read(4))
self.assertEqual(buf[5:], bytesIo.read(900))
self.assertEqual(self.EOF, bytesIo.read())
def testReadNoArgs(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(buf, bytesIo.read())
self.assertEqual(self.EOF, bytesIo.read())
def testSeek(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
bytesIo.read(5)
bytesIo.seek(0)
self.assertEqual(buf, bytesIo.read())
bytesIo.seek(3)
self.assertEqual(buf[3:], bytesIo.read())
self.assertRaises(TypeError, bytesIo.seek, 0.0)
def testTell(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(0, bytesIo.tell())
bytesIo.seek(5)
self.assertEqual(5, bytesIo.tell())
bytesIo.seek(10000)
self.assertEqual(10000, bytesIo.tell())
class MemoryTestMixin:
def test_detach(self):
buf = self.ioclass()
self.assertRaises(self.UnsupportedOperation, buf.detach)
def write_ops(self, f, t):
self.assertEqual(f.write(t("blah.")), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("Hello.")), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(5), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(t(" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("h")), 1)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 1)
def test_write(self):
buf = self.buftype("hello world\n")
memio = self.ioclass(buf)
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass()
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.write, None)
memio.close()
self.assertRaises(ValueError, memio.write, self.buftype(""))
def test_writelines(self):
buf = self.buftype("1234567890")
memio = self.ioclass()
self.assertEqual(memio.writelines([buf] * 100), None)
self.assertEqual(memio.getvalue(), buf * 100)
memio.writelines([])
self.assertEqual(memio.getvalue(), buf * 100)
memio = self.ioclass()
self.assertRaises(TypeError, memio.writelines, [buf] + [1])
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.writelines, None)
memio.close()
self.assertRaises(ValueError, memio.writelines, [])
def test_writelines_error(self):
memio = self.ioclass()
def error_gen():
yield self.buftype('spam')
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, memio.writelines, error_gen())
def test_truncate(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(ValueError, memio.truncate, -1)
memio.seek(6)
self.assertEqual(memio.truncate(), 6)
self.assertEqual(memio.getvalue(), buf[:6])
self.assertEqual(memio.truncate(4), 4)
self.assertEqual(memio.getvalue(), buf[:4])
# truncate() accepts long objects
self.assertEqual(memio.truncate(4L), 4)
self.assertEqual(memio.getvalue(), buf[:4])
self.assertEqual(memio.tell(), 6)
memio.seek(0, 2)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf[:4] + buf)
pos = memio.tell()
self.assertEqual(memio.truncate(None), pos)
self.assertEqual(memio.tell(), pos)
self.assertRaises(TypeError, memio.truncate, '0')
memio.close()
self.assertRaises(ValueError, memio.truncate, 0)
def test_init(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass(None)
self.assertEqual(memio.getvalue(), self.EOF)
memio.__init__(buf * 2)
self.assertEqual(memio.getvalue(), buf * 2)
memio.__init__(buf)
self.assertEqual(memio.getvalue(), buf)
def test_read(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.read(0), self.EOF)
self.assertEqual(memio.read(1), buf[:1])
# read() accepts long objects
self.assertEqual(memio.read(4L), buf[1:5])
self.assertEqual(memio.read(900), buf[5:])
self.assertEqual(memio.read(), self.EOF)
memio.seek(0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 10)
memio.seek(0)
self.assertEqual(memio.read(-1), buf)
memio.seek(0)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(100)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(0)
self.assertEqual(memio.read(None), buf)
self.assertRaises(TypeError, memio.read, '')
memio.close()
self.assertRaises(ValueError, memio.read)
def test_readline(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 2)
self.assertEqual(memio.readline(0), self.EOF)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(memio.readline(5), buf[:5])
# readline() accepts long objects
self.assertEqual(memio.readline(5L), buf[5:10])
self.assertEqual(memio.readline(5), buf[10:15])
memio.seek(0)
self.assertEqual(memio.readline(-1), buf)
memio.seek(0)
self.assertEqual(memio.readline(0), self.EOF)
buf = self.buftype("1234567890\n")
memio = self.ioclass((buf * 3)[:-1])
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf[:-1])
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(type(memio.readline()), type(buf))
self.assertEqual(memio.readline(), buf)
self.assertRaises(TypeError, memio.readline, '')
memio.close()
self.assertRaises(ValueError, memio.readline)
def test_readlines(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(memio.readlines(), [buf] * 10)
memio.seek(5)
self.assertEqual(memio.readlines(), [buf[5:]] + [buf] * 9)
memio.seek(0)
# readlines() accepts long objects
self.assertEqual(memio.readlines(15L), [buf] * 2)
memio.seek(0)
self.assertEqual(memio.readlines(-1), [buf] * 10)
memio.seek(0)
self.assertEqual(memio.readlines(0), [buf] * 10)
memio.seek(0)
self.assertEqual(type(memio.readlines()[0]), type(buf))
memio.seek(0)
self.assertEqual(memio.readlines(None), [buf] * 10)
self.assertRaises(TypeError, memio.readlines, '')
memio.close()
self.assertRaises(ValueError, memio.readlines)
def test_iterator(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(iter(memio), memio)
self.assertTrue(hasattr(memio, '__iter__'))
self.assertTrue(hasattr(memio, 'next'))
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio.seek(0)
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio = self.ioclass(buf * 2)
memio.close()
self.assertRaises(ValueError, next, memio)
def test_getvalue(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio.read()
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(type(memio.getvalue()), type(buf))
memio = self.ioclass(buf * 1000)
self.assertEqual(memio.getvalue()[-3:], self.buftype("890"))
memio = self.ioclass(buf)
memio.close()
self.assertRaises(ValueError, memio.getvalue)
def test_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
memio.read(5)
self.assertRaises(ValueError, memio.seek, -1)
self.assertRaises(ValueError, memio.seek, 1, -1)
self.assertRaises(ValueError, memio.seek, 1, 3)
self.assertEqual(memio.seek(0), 0)
self.assertEqual(memio.seek(0, 0), 0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.seek(3), 3)
# seek() accepts long objects
self.assertEqual(memio.seek(3L), 3)
self.assertEqual(memio.seek(0, 1), 3)
self.assertEqual(memio.read(), buf[3:])
self.assertEqual(memio.seek(len(buf)), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.seek(len(buf) + 1)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.seek(0, 2), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.close()
self.assertRaises(ValueError, memio.seek, 0)
def test_overseek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(len(buf) + 1), 11)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 11)
self.assertEqual(memio.getvalue(), buf)
memio.write(self.EOF)
self.assertEqual(memio.getvalue(), buf)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf + self.buftype('\0') + buf)
def test_tell(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.tell(), 0)
memio.seek(5)
self.assertEqual(memio.tell(), 5)
memio.seek(10000)
self.assertEqual(memio.tell(), 10000)
memio.close()
self.assertRaises(ValueError, memio.tell)
def test_flush(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.flush(), None)
def test_flags(self):
memio = self.ioclass()
self.assertEqual(memio.writable(), True)
self.assertEqual(memio.readable(), True)
self.assertEqual(memio.seekable(), True)
self.assertEqual(memio.isatty(), False)
self.assertEqual(memio.closed, False)
memio.close()
self.assertEqual(memio.writable(), True)
self.assertEqual(memio.readable(), True)
self.assertEqual(memio.seekable(), True)
self.assertRaises(ValueError, memio.isatty)
self.assertEqual(memio.closed, True)
def test_subclassing(self):
buf = self.buftype("1234567890")
def test1():
class MemIO(self.ioclass):
pass
m = MemIO(buf)
return m.getvalue()
def test2():
class MemIO(self.ioclass):
def __init__(me, a, b):
self.ioclass.__init__(me, a)
m = MemIO(buf, None)
return m.getvalue()
self.assertEqual(test1(), buf)
self.assertEqual(test2(), buf)
def test_instance_dict_leak(self):
# Test case for issue #6242.
# This will be caught by regrtest.py -R if this leak.
for _ in range(100):
memio = self.ioclass()
memio.foo = 1
def test_pickling(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
memio.foo = 42
memio.seek(2)
class PickleTestMemIO(self.ioclass):
def __init__(me, initvalue, foo):
self.ioclass.__init__(me, initvalue)
me.foo = foo
# __getnewargs__ is undefined on purpose. This checks that PEP 307
# is used to provide pickling support.
# Pickle expects the class to be on the module level. Here we use a
# little hack to allow the PickleTestMemIO class to derive from
# self.ioclass without having to define all combinations explicitly on
# the module-level.
import __main__
PickleTestMemIO.__module__ = '__main__'
__main__.PickleTestMemIO = PickleTestMemIO
submemio = PickleTestMemIO(buf, 80)
submemio.seek(2)
# We only support pickle protocol 2 and onward since we use extended
# __reduce__ API of PEP 307 to provide pickling support.
for proto in range(2, pickle.HIGHEST_PROTOCOL):
for obj in (memio, submemio):
obj2 = pickle.loads(pickle.dumps(obj, protocol=proto))
self.assertEqual(obj.getvalue(), obj2.getvalue())
self.assertEqual(obj.__class__, obj2.__class__)
self.assertEqual(obj.foo, obj2.foo)
self.assertEqual(obj.tell(), obj2.tell())
obj.close()
self.assertRaises(ValueError, pickle.dumps, obj, proto)
del __main__.PickleTestMemIO
class PyBytesIOTest(MemoryTestMixin, MemorySeekTestMixin, unittest.TestCase):
UnsupportedOperation = pyio.UnsupportedOperation
@staticmethod
def buftype(s):
return s.encode("ascii")
ioclass = pyio.BytesIO
EOF = b""
def test_read1(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(TypeError, memio.read1)
self.assertEqual(memio.read(), buf)
def test_readinto(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
b = bytearray(b"hello")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"12345")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"67890")
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"67890")
b = bytearray(b"hello world")
memio.seek(0)
self.assertEqual(memio.readinto(b), 10)
self.assertEqual(b, b"1234567890d")
b = bytearray(b"")
memio.seek(0)
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"")
self.assertRaises(TypeError, memio.readinto, '')
import array
a = array.array(b'b', b"hello world")
memio = self.ioclass(buf)
memio.readinto(a)
self.assertEqual(a.tostring(), b"1234567890d")
memio.close()
self.assertRaises(ValueError, memio.readinto, b)
memio = self.ioclass(b"123")
b = bytearray()
memio.seek(42)
memio.readinto(b)
self.assertEqual(b, b"")
def test_relative_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(-1, 1), 0)
self.assertEqual(memio.seek(3, 1), 3)
self.assertEqual(memio.seek(-4, 1), 0)
self.assertEqual(memio.seek(-1, 2), 9)
self.assertEqual(memio.seek(1, 1), 10)
self.assertEqual(memio.seek(1, 2), 11)
memio.seek(-3, 2)
self.assertEqual(memio.read(), buf[-3:])
memio.seek(0)
memio.seek(1, 1)
self.assertEqual(memio.read(), buf[1:])
def test_unicode(self):
memio = self.ioclass()
self.assertRaises(TypeError, self.ioclass, "1234567890")
self.assertRaises(TypeError, memio.write, "1234567890")
self.assertRaises(TypeError, memio.writelines, ["1234567890"])
def test_bytes_array(self):
buf = b"1234567890"
import array
a = array.array(b'b', buf)
memio = self.ioclass(a)
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(a), 10)
self.assertEqual(memio.getvalue(), buf)
def test_issue5449(self):
buf = self.buftype("1234567890")
self.ioclass(initial_bytes=buf)
self.assertRaises(TypeError, self.ioclass, buf, foo=None)
class TextIOTestMixin:
def test_newlines_property(self):
memio = self.ioclass(newline=None)
# The C StringIO decodes newlines in write() calls, but the Python
# implementation only does when reading. This function forces them to
# be decoded for testing.
def force_decode():
memio.seek(0)
memio.read()
self.assertEqual(memio.newlines, None)
memio.write("a\n")
force_decode()
self.assertEqual(memio.newlines, "\n")
memio.write("b\r\n")
force_decode()
self.assertEqual(memio.newlines, ("\n", "\r\n"))
memio.write("c\rd")
force_decode()
self.assertEqual(memio.newlines, ("\r", "\n", "\r\n"))
def test_relative_seek(self):
memio = self.ioclass()
self.assertRaises(IOError, memio.seek, -1, 1)
self.assertRaises(IOError, memio.seek, 3, 1)
self.assertRaises(IOError, memio.seek, -3, 1)
self.assertRaises(IOError, memio.seek, -1, 2)
self.assertRaises(IOError, memio.seek, 1, 1)
self.assertRaises(IOError, memio.seek, 1, 2)
def test_textio_properties(self):
memio = self.ioclass()
# These are just dummy values but we nevertheless check them for fear
# of unexpected breakage.
self.assertIsNone(memio.encoding)
self.assertIsNone(memio.errors)
self.assertFalse(memio.line_buffering)
def test_newline_none(self):
# newline=None
memio = self.ioclass("a\nb\r\nc\rd", newline=None)
self.assertEqual(list(memio), ["a\n", "b\n", "c\n", "d"])
memio.seek(0)
self.assertEqual(memio.read(1), "a")
self.assertEqual(memio.read(2), "\nb")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\n")
memio = self.ioclass(newline=None)
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(3, memio.write("b\r\n"))
self.assertEqual(3, memio.write("c\rd"))
memio.seek(0)
self.assertEqual(memio.read(), "a\nb\nc\nd")
memio = self.ioclass("a\r\nb", newline=None)
self.assertEqual(memio.read(3), "a\nb")
def test_newline_empty(self):
# newline=""
memio = self.ioclass("a\nb\r\nc\rd", newline="")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"])
memio.seek(0)
self.assertEqual(memio.read(4), "a\nb\r")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\r")
memio = self.ioclass(newline="")
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(2, memio.write("b\r"))
self.assertEqual(2, memio.write("\nc"))
self.assertEqual(2, memio.write("\rd"))
memio.seek(0)
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"])
def test_newline_lf(self):
# newline="\n"
memio = self.ioclass("a\nb\r\nc\rd")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"])
def test_newline_cr(self):
# newline="\r"
memio = self.ioclass("a\nb\r\nc\rd", newline="\r")
self.assertEqual(memio.read(), "a\rb\r\rc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r", "b\r", "\r", "c\r", "d"])
def test_newline_crlf(self):
# newline="\r\n"
memio = self.ioclass("a\nb\r\nc\rd", newline="\r\n")
self.assertEqual(memio.read(), "a\r\nb\r\r\nc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r\n", "b\r\r\n", "c\rd"])
def test_issue5265(self):
# StringIO can duplicate newlines in universal newlines mode
memio = self.ioclass("a\r\nb\r\n", newline=None)
self.assertEqual(memio.read(5), "a\nb\n")
class PyStringIOTest(MemoryTestMixin, MemorySeekTestMixin,
TextIOTestMixin, unittest.TestCase):
buftype = unicode
ioclass = pyio.StringIO
UnsupportedOperation = pyio.UnsupportedOperation
EOF = ""
class PyStringIOPickleTest(TextIOTestMixin, unittest.TestCase):
"""Test if pickle restores properly the internal state of StringIO.
"""
buftype = unicode
UnsupportedOperation = pyio.UnsupportedOperation
EOF = ""
class ioclass(pyio.StringIO):
def __new__(cls, *args, **kwargs):
return pickle.loads(pickle.dumps(pyio.StringIO(*args, **kwargs)))
def __init__(self, *args, **kwargs):
pass
class CBytesIOTest(PyBytesIOTest):
ioclass = io.BytesIO
UnsupportedOperation = io.UnsupportedOperation
test_bytes_array = unittest.skip(
"array.array() does not have the new buffer API"
)(PyBytesIOTest.test_bytes_array)
def test_getstate(self):
memio = self.ioclass()
state = memio.__getstate__()
self.assertEqual(len(state), 3)
bytearray(state[0]) # Check if state[0] supports the buffer interface.
self.assertIsInstance(state[1], int)
self.assertTrue(isinstance(state[2], dict) or state[2] is None)
memio.close()
self.assertRaises(ValueError, memio.__getstate__)
def test_setstate(self):
# This checks whether __setstate__ does proper input validation.
memio = self.ioclass()
memio.__setstate__((b"no error", 0, None))
memio.__setstate__((bytearray(b"no error"), 0, None))
memio.__setstate__((b"no error", 0, {'spam': 3}))
self.assertRaises(ValueError, memio.__setstate__, (b"", -1, None))
self.assertRaises(TypeError, memio.__setstate__, ("unicode", 0, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", 0.0, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", 0, 0))
self.assertRaises(TypeError, memio.__setstate__, (b"len-test", 0))
self.assertRaises(TypeError, memio.__setstate__)
self.assertRaises(TypeError, memio.__setstate__, 0)
memio.close()
self.assertRaises(ValueError, memio.__setstate__, (b"closed", 0, None))
class CStringIOTest(PyStringIOTest):
ioclass = io.StringIO
UnsupportedOperation = io.UnsupportedOperation
# XXX: For the Python version of io.StringIO, this is highly
# dependent on the encoding used for the underlying buffer.
def test_widechar(self):
buf = self.buftype("\U0002030a\U00020347")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.tell(), len(buf))
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.tell(), len(buf) * 2)
self.assertEqual(memio.getvalue(), buf + buf)
def test_getstate(self):
memio = self.ioclass()
state = memio.__getstate__()
self.assertEqual(len(state), 4)
self.assertIsInstance(state[0], unicode)
self.assertIsInstance(state[1], str)
self.assertIsInstance(state[2], int)
self.assertTrue(isinstance(state[3], dict) or state[3] is None)
memio.close()
self.assertRaises(ValueError, memio.__getstate__)
def test_setstate(self):
# This checks whether __setstate__ does proper input validation.
memio = self.ioclass()
memio.__setstate__(("no error", "\n", 0, None))
memio.__setstate__(("no error", "", 0, {'spam': 3}))
self.assertRaises(ValueError, memio.__setstate__, ("", "f", 0, None))
self.assertRaises(ValueError, memio.__setstate__, ("", "", -1, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", "", 0, None))
# trunk is more tolerant than py3k on the type of the newline param
#self.assertRaises(TypeError, memio.__setstate__, ("", b"", 0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", "", 0.0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", "", 0, 0))
self.assertRaises(TypeError, memio.__setstate__, ("len-test", 0))
self.assertRaises(TypeError, memio.__setstate__)
self.assertRaises(TypeError, memio.__setstate__, 0)
memio.close()
self.assertRaises(ValueError, memio.__setstate__, ("closed", "", 0, None))
class CStringIOPickleTest(PyStringIOPickleTest):
UnsupportedOperation = io.UnsupportedOperation
class ioclass(io.StringIO):
def __new__(cls, *args, **kwargs):
return pickle.loads(pickle.dumps(io.StringIO(*args, **kwargs),
protocol=2))
def __init__(self, *args, **kwargs):
pass
def test_main():
tests = [PyBytesIOTest, PyStringIOTest, CBytesIOTest, CStringIOTest,
PyStringIOPickleTest, CStringIOPickleTest]
support.run_unittest(*tests)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
rsignell-usgs/yaml2ncml | setup.py | 1 | 1966 | import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--verbose']
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
def extract_version():
version = None
fdir = os.path.dirname(__file__)
fnme = os.path.join(fdir, 'yaml2ncml', '__init__.py')
with open(fnme) as fd:
for line in fd:
if (line.startswith('__version__')):
_, version = line.split('=')
version = version.strip()[1:-1]
break
return version
rootpath = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
return open(os.path.join(rootpath, *parts), 'r').read()
long_description = '{}\n{}'.format(read('README.rst'), read('CHANGES.txt'))
LICENSE = read('LICENSE.txt')
with open('requirements.txt') as f:
require = f.readlines()
install_requires = [r.strip() for r in require]
setup(name='yaml2ncml',
version=extract_version(),
packages=['yaml2ncml'],
license=LICENSE,
description='ncML aggregation from YAML specifications',
long_description=long_description,
author='Rich Signell',
author_email='[email protected]',
install_requires=install_requires,
entry_points=dict(console_scripts=[
'yaml2ncml = yaml2ncml.yaml2ncml:main']
),
url='https://github.com/rsignell-usgs/yaml2ncml',
keywords=['YAML', 'ncml'],
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: MIT License'],
tests_require=['pytest'],
cmdclass=dict(test=PyTest),
zip_safe=False)
| mit |
harmy/kbengine | kbe/src/lib/python/Tools/scripts/texi2html.py | 48 | 70149 | #! /usr/bin/env python3
# Convert GNU texinfo files into HTML, one file per node.
# Based on Texinfo 2.14.
# Usage: texi2html [-d] [-d] [-c] inputfile outputdirectory
# The input file must be a complete texinfo file, e.g. emacs.texi.
# This creates many files (one per info node) in the output directory,
# overwriting existing files of the same name. All files created have
# ".html" as their extension.
# XXX To do:
# - handle @comment*** correctly
# - handle @xref {some words} correctly
# - handle @ftable correctly (items aren't indexed?)
# - handle @itemx properly
# - handle @exdent properly
# - add links directly to the proper line from indices
# - check against the definitive list of @-cmds; we still miss (among others):
# - @defindex (hard)
# - @c(omment) in the middle of a line (rarely used)
# - @this* (not really needed, only used in headers anyway)
# - @today{} (ever used outside title page?)
# More consistent handling of chapters/sections/etc.
# Lots of documentation
# Many more options:
# -top designate top node
# -links customize which types of links are included
# -split split at chapters or sections instead of nodes
# -name Allow different types of filename handling. Non unix systems
# will have problems with long node names
# ...
# Support the most recent texinfo version and take a good look at HTML 3.0
# More debugging output (customizable) and more flexible error handling
# How about icons ?
# rpyron 2002-05-07
# Robert Pyron <[email protected]>
# 1. BUGFIX: In function makefile(), strip blanks from the nodename.
# This is necessary to match the behavior of parser.makeref() and
# parser.do_node().
# 2. BUGFIX fixed KeyError in end_ifset (well, I may have just made
# it go away, rather than fix it)
# 3. BUGFIX allow @menu and menu items inside @ifset or @ifclear
# 4. Support added for:
# @uref URL reference
# @image image file reference (see note below)
# @multitable output an HTML table
# @vtable
# 5. Partial support for accents, to match MAKEINFO output
# 6. I added a new command-line option, '-H basename', to specify
# HTML Help output. This will cause three files to be created
# in the current directory:
# `basename`.hhp HTML Help Workshop project file
# `basename`.hhc Contents file for the project
# `basename`.hhk Index file for the project
# When fed into HTML Help Workshop, the resulting file will be
# named `basename`.chm.
# 7. A new class, HTMLHelp, to accomplish item 6.
# 8. Various calls to HTMLHelp functions.
# A NOTE ON IMAGES: Just as 'outputdirectory' must exist before
# running this program, all referenced images must already exist
# in outputdirectory.
import os
import sys
import string
import re
MAGIC = '\\input texinfo'
cmprog = re.compile('^@([a-z]+)([ \t]|$)') # Command (line-oriented)
blprog = re.compile('^[ \t]*$') # Blank line
kwprog = re.compile('@[a-z]+') # Keyword (embedded, usually
# with {} args)
spprog = re.compile('[\n@{}&<>]') # Special characters in
# running text
#
# menu item (Yuck!)
miprog = re.compile('^\* ([^:]*):(:|[ \t]*([^\t,\n.]+)([^ \t\n]*))[ \t\n]*')
# 0 1 1 2 3 34 42 0
# ----- ---------- ---------
# -|-----------------------------
# -----------------------------------------------------
class HTMLNode:
"""Some of the parser's functionality is separated into this class.
A Node accumulates its contents, takes care of links to other Nodes
and saves itself when it is finished and all links are resolved.
"""
DOCTYPE = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">'
type = 0
cont = ''
epilogue = '</BODY></HTML>\n'
def __init__(self, dir, name, topname, title, next, prev, up):
self.dirname = dir
self.name = name
if topname:
self.topname = topname
else:
self.topname = name
self.title = title
self.next = next
self.prev = prev
self.up = up
self.lines = []
def write(self, *lines):
for line in lines:
self.lines.append(line)
def flush(self):
fp = open(self.dirname + '/' + makefile(self.name), 'w')
fp.write(self.prologue)
fp.write(self.text)
fp.write(self.epilogue)
fp.close()
def link(self, label, nodename, rel=None, rev=None):
if nodename:
if nodename.lower() == '(dir)':
addr = '../dir.html'
title = ''
else:
addr = makefile(nodename)
title = ' TITLE="%s"' % nodename
self.write(label, ': <A HREF="', addr, '"', \
rel and (' REL=' + rel) or "", \
rev and (' REV=' + rev) or "", \
title, '>', nodename, '</A> \n')
def finalize(self):
length = len(self.lines)
self.text = ''.join(self.lines)
self.lines = []
self.open_links()
self.output_links()
self.close_links()
links = ''.join(self.lines)
self.lines = []
self.prologue = (
self.DOCTYPE +
'\n<HTML><HEAD>\n'
' <!-- Converted with texi2html and Python -->\n'
' <TITLE>' + self.title + '</TITLE>\n'
' <LINK REL=Next HREF="'
+ makefile(self.next) + '" TITLE="' + self.next + '">\n'
' <LINK REL=Previous HREF="'
+ makefile(self.prev) + '" TITLE="' + self.prev + '">\n'
' <LINK REL=Up HREF="'
+ makefile(self.up) + '" TITLE="' + self.up + '">\n'
'</HEAD><BODY>\n' +
links)
if length > 20:
self.epilogue = '<P>\n%s</BODY></HTML>\n' % links
def open_links(self):
self.write('<HR>\n')
def close_links(self):
self.write('<HR>\n')
def output_links(self):
if self.cont != self.next:
self.link(' Cont', self.cont)
self.link(' Next', self.next, rel='Next')
self.link(' Prev', self.prev, rel='Previous')
self.link(' Up', self.up, rel='Up')
if self.name != self.topname:
self.link(' Top', self.topname)
class HTML3Node(HTMLNode):
DOCTYPE = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML Level 3//EN//3.0">'
def open_links(self):
self.write('<DIV CLASS=Navigation>\n <HR>\n')
def close_links(self):
self.write(' <HR>\n</DIV>\n')
class TexinfoParser:
COPYRIGHT_SYMBOL = "©"
FN_ID_PATTERN = "(%(id)s)"
FN_SOURCE_PATTERN = '<A NAME=footnoteref%(id)s' \
' HREF="#footnotetext%(id)s">' \
+ FN_ID_PATTERN + '</A>'
FN_TARGET_PATTERN = '<A NAME=footnotetext%(id)s' \
' HREF="#footnoteref%(id)s">' \
+ FN_ID_PATTERN + '</A>\n%(text)s<P>\n'
FN_HEADER = '\n<P>\n<HR NOSHADE SIZE=1 WIDTH=200>\n' \
'<STRONG><EM>Footnotes</EM></STRONG>\n<P>'
Node = HTMLNode
# Initialize an instance
def __init__(self):
self.unknown = {} # statistics about unknown @-commands
self.filenames = {} # Check for identical filenames
self.debugging = 0 # larger values produce more output
self.print_headers = 0 # always print headers?
self.nodefp = None # open file we're writing to
self.nodelineno = 0 # Linenumber relative to node
self.links = None # Links from current node
self.savetext = None # If not None, save text head instead
self.savestack = [] # If not None, save text head instead
self.htmlhelp = None # html help data
self.dirname = 'tmp' # directory where files are created
self.includedir = '.' # directory to search @include files
self.nodename = '' # name of current node
self.topname = '' # name of top node (first node seen)
self.title = '' # title of this whole Texinfo tree
self.resetindex() # Reset all indices
self.contents = [] # Reset table of contents
self.numbering = [] # Reset section numbering counters
self.nofill = 0 # Normal operation: fill paragraphs
self.values={'html': 1} # Names that should be parsed in ifset
self.stackinfo={} # Keep track of state in the stack
# XXX The following should be reset per node?!
self.footnotes = [] # Reset list of footnotes
self.itemarg = None # Reset command used by @item
self.itemnumber = None # Reset number for @item in @enumerate
self.itemindex = None # Reset item index name
self.node = None
self.nodestack = []
self.cont = 0
self.includedepth = 0
# Set htmlhelp helper class
def sethtmlhelp(self, htmlhelp):
self.htmlhelp = htmlhelp
# Set (output) directory name
def setdirname(self, dirname):
self.dirname = dirname
# Set include directory name
def setincludedir(self, includedir):
self.includedir = includedir
# Parse the contents of an entire file
def parse(self, fp):
line = fp.readline()
lineno = 1
while line and (line[0] == '%' or blprog.match(line)):
line = fp.readline()
lineno = lineno + 1
if line[:len(MAGIC)] != MAGIC:
raise SyntaxError('file does not begin with %r' % (MAGIC,))
self.parserest(fp, lineno)
# Parse the contents of a file, not expecting a MAGIC header
def parserest(self, fp, initial_lineno):
lineno = initial_lineno
self.done = 0
self.skip = 0
self.stack = []
accu = []
while not self.done:
line = fp.readline()
self.nodelineno = self.nodelineno + 1
if not line:
if accu:
if not self.skip: self.process(accu)
accu = []
if initial_lineno > 0:
print('*** EOF before @bye')
break
lineno = lineno + 1
mo = cmprog.match(line)
if mo:
a, b = mo.span(1)
cmd = line[a:b]
if cmd in ('noindent', 'refill'):
accu.append(line)
else:
if accu:
if not self.skip:
self.process(accu)
accu = []
self.command(line, mo)
elif blprog.match(line) and \
'format' not in self.stack and \
'example' not in self.stack:
if accu:
if not self.skip:
self.process(accu)
if self.nofill:
self.write('\n')
else:
self.write('<P>\n')
accu = []
else:
# Append the line including trailing \n!
accu.append(line)
#
if self.skip:
print('*** Still skipping at the end')
if self.stack:
print('*** Stack not empty at the end')
print('***', self.stack)
if self.includedepth == 0:
while self.nodestack:
self.nodestack[-1].finalize()
self.nodestack[-1].flush()
del self.nodestack[-1]
# Start saving text in a buffer instead of writing it to a file
def startsaving(self):
if self.savetext != None:
self.savestack.append(self.savetext)
# print '*** Recursively saving text, expect trouble'
self.savetext = ''
# Return the text saved so far and start writing to file again
def collectsavings(self):
savetext = self.savetext
if len(self.savestack) > 0:
self.savetext = self.savestack[-1]
del self.savestack[-1]
else:
self.savetext = None
return savetext or ''
# Write text to file, or save it in a buffer, or ignore it
def write(self, *args):
try:
text = ''.join(args)
except:
print(args)
raise TypeError
if self.savetext != None:
self.savetext = self.savetext + text
elif self.nodefp:
self.nodefp.write(text)
elif self.node:
self.node.write(text)
# Complete the current node -- write footnotes and close file
def endnode(self):
if self.savetext != None:
print('*** Still saving text at end of node')
dummy = self.collectsavings()
if self.footnotes:
self.writefootnotes()
if self.nodefp:
if self.nodelineno > 20:
self.write('<HR>\n')
[name, next, prev, up] = self.nodelinks[:4]
self.link('Next', next)
self.link('Prev', prev)
self.link('Up', up)
if self.nodename != self.topname:
self.link('Top', self.topname)
self.write('<HR>\n')
self.write('</BODY>\n')
self.nodefp.close()
self.nodefp = None
elif self.node:
if not self.cont and \
(not self.node.type or \
(self.node.next and self.node.prev and self.node.up)):
self.node.finalize()
self.node.flush()
else:
self.nodestack.append(self.node)
self.node = None
self.nodename = ''
# Process a list of lines, expanding embedded @-commands
# This mostly distinguishes between menus and normal text
def process(self, accu):
if self.debugging > 1:
print('!'*self.debugging, 'process:', self.skip, self.stack, end=' ')
if accu: print(accu[0][:30], end=' ')
if accu[0][30:] or accu[1:]: print('...', end=' ')
print()
if self.inmenu():
# XXX should be done differently
for line in accu:
mo = miprog.match(line)
if not mo:
line = line.strip() + '\n'
self.expand(line)
continue
bgn, end = mo.span(0)
a, b = mo.span(1)
c, d = mo.span(2)
e, f = mo.span(3)
g, h = mo.span(4)
label = line[a:b]
nodename = line[c:d]
if nodename[0] == ':': nodename = label
else: nodename = line[e:f]
punct = line[g:h]
self.write(' <LI><A HREF="',
makefile(nodename),
'">', nodename,
'</A>', punct, '\n')
self.htmlhelp.menuitem(nodename)
self.expand(line[end:])
else:
text = ''.join(accu)
self.expand(text)
# find 'menu' (we might be inside 'ifset' or 'ifclear')
def inmenu(self):
#if 'menu' in self.stack:
# print 'inmenu :', self.skip, self.stack, self.stackinfo
stack = self.stack
while stack and stack[-1] in ('ifset','ifclear'):
try:
if self.stackinfo[len(stack)]:
return 0
except KeyError:
pass
stack = stack[:-1]
return (stack and stack[-1] == 'menu')
# Write a string, expanding embedded @-commands
def expand(self, text):
stack = []
i = 0
n = len(text)
while i < n:
start = i
mo = spprog.search(text, i)
if mo:
i = mo.start()
else:
self.write(text[start:])
break
self.write(text[start:i])
c = text[i]
i = i+1
if c == '\n':
self.write('\n')
continue
if c == '<':
self.write('<')
continue
if c == '>':
self.write('>')
continue
if c == '&':
self.write('&')
continue
if c == '{':
stack.append('')
continue
if c == '}':
if not stack:
print('*** Unmatched }')
self.write('}')
continue
cmd = stack[-1]
del stack[-1]
try:
method = getattr(self, 'close_' + cmd)
except AttributeError:
self.unknown_close(cmd)
continue
method()
continue
if c != '@':
# Cannot happen unless spprog is changed
raise RuntimeError('unexpected funny %r' % c)
start = i
while i < n and text[i] in string.ascii_letters: i = i+1
if i == start:
# @ plus non-letter: literal next character
i = i+1
c = text[start:i]
if c == ':':
# `@:' means no extra space after
# preceding `.', `?', `!' or `:'
pass
else:
# `@.' means a sentence-ending period;
# `@@', `@{', `@}' quote `@', `{', `}'
self.write(c)
continue
cmd = text[start:i]
if i < n and text[i] == '{':
i = i+1
stack.append(cmd)
try:
method = getattr(self, 'open_' + cmd)
except AttributeError:
self.unknown_open(cmd)
continue
method()
continue
try:
method = getattr(self, 'handle_' + cmd)
except AttributeError:
self.unknown_handle(cmd)
continue
method()
if stack:
print('*** Stack not empty at para:', stack)
# --- Handle unknown embedded @-commands ---
def unknown_open(self, cmd):
print('*** No open func for @' + cmd + '{...}')
cmd = cmd + '{'
self.write('@', cmd)
if cmd not in self.unknown:
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
def unknown_close(self, cmd):
print('*** No close func for @' + cmd + '{...}')
cmd = '}' + cmd
self.write('}')
if cmd not in self.unknown:
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
def unknown_handle(self, cmd):
print('*** No handler for @' + cmd)
self.write('@', cmd)
if cmd not in self.unknown:
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
# XXX The following sections should be ordered as the texinfo docs
# --- Embedded @-commands without {} argument list --
def handle_noindent(self): pass
def handle_refill(self): pass
# --- Include file handling ---
def do_include(self, args):
file = args
file = os.path.join(self.includedir, file)
try:
fp = open(file, 'r')
except IOError as msg:
print('*** Can\'t open include file', repr(file))
return
print('!'*self.debugging, '--> file', repr(file))
save_done = self.done
save_skip = self.skip
save_stack = self.stack
self.includedepth = self.includedepth + 1
self.parserest(fp, 0)
self.includedepth = self.includedepth - 1
fp.close()
self.done = save_done
self.skip = save_skip
self.stack = save_stack
print('!'*self.debugging, '<-- file', repr(file))
# --- Special Insertions ---
def open_dmn(self): pass
def close_dmn(self): pass
def open_dots(self): self.write('...')
def close_dots(self): pass
def open_bullet(self): pass
def close_bullet(self): pass
def open_TeX(self): self.write('TeX')
def close_TeX(self): pass
def handle_copyright(self): self.write(self.COPYRIGHT_SYMBOL)
def open_copyright(self): self.write(self.COPYRIGHT_SYMBOL)
def close_copyright(self): pass
def open_minus(self): self.write('-')
def close_minus(self): pass
# --- Accents ---
# rpyron 2002-05-07
# I would like to do at least as well as makeinfo when
# it is producing HTML output:
#
# input output
# @"o @"o umlaut accent
# @'o 'o acute accent
# @,{c} @,{c} cedilla accent
# @=o @=o macron/overbar accent
# @^o @^o circumflex accent
# @`o `o grave accent
# @~o @~o tilde accent
# @dotaccent{o} @dotaccent{o} overdot accent
# @H{o} @H{o} long Hungarian umlaut
# @ringaccent{o} @ringaccent{o} ring accent
# @tieaccent{oo} @tieaccent{oo} tie-after accent
# @u{o} @u{o} breve accent
# @ubaraccent{o} @ubaraccent{o} underbar accent
# @udotaccent{o} @udotaccent{o} underdot accent
# @v{o} @v{o} hacek or check accent
# @exclamdown{} ¡ upside-down !
# @questiondown{} ¿ upside-down ?
# @aa{},@AA{} å,Å a,A with circle
# @ae{},@AE{} æ,Æ ae,AE ligatures
# @dotless{i} @dotless{i} dotless i
# @dotless{j} @dotless{j} dotless j
# @l{},@L{} l/,L/ suppressed-L,l
# @o{},@O{} ø,Ø O,o with slash
# @oe{},@OE{} oe,OE oe,OE ligatures
# @ss{} ß es-zet or sharp S
#
# The following character codes and approximations have been
# copied from makeinfo's HTML output.
def open_exclamdown(self): self.write('¡') # upside-down !
def close_exclamdown(self): pass
def open_questiondown(self): self.write('¿') # upside-down ?
def close_questiondown(self): pass
def open_aa(self): self.write('å') # a with circle
def close_aa(self): pass
def open_AA(self): self.write('Å') # A with circle
def close_AA(self): pass
def open_ae(self): self.write('æ') # ae ligatures
def close_ae(self): pass
def open_AE(self): self.write('Æ') # AE ligatures
def close_AE(self): pass
def open_o(self): self.write('ø') # o with slash
def close_o(self): pass
def open_O(self): self.write('Ø') # O with slash
def close_O(self): pass
def open_ss(self): self.write('ß') # es-zet or sharp S
def close_ss(self): pass
def open_oe(self): self.write('oe') # oe ligatures
def close_oe(self): pass
def open_OE(self): self.write('OE') # OE ligatures
def close_OE(self): pass
def open_l(self): self.write('l/') # suppressed-l
def close_l(self): pass
def open_L(self): self.write('L/') # suppressed-L
def close_L(self): pass
# --- Special Glyphs for Examples ---
def open_result(self): self.write('=>')
def close_result(self): pass
def open_expansion(self): self.write('==>')
def close_expansion(self): pass
def open_print(self): self.write('-|')
def close_print(self): pass
def open_error(self): self.write('error-->')
def close_error(self): pass
def open_equiv(self): self.write('==')
def close_equiv(self): pass
def open_point(self): self.write('-!-')
def close_point(self): pass
# --- Cross References ---
def open_pxref(self):
self.write('see ')
self.startsaving()
def close_pxref(self):
self.makeref()
def open_xref(self):
self.write('See ')
self.startsaving()
def close_xref(self):
self.makeref()
def open_ref(self):
self.startsaving()
def close_ref(self):
self.makeref()
def open_inforef(self):
self.write('See info file ')
self.startsaving()
def close_inforef(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 3: args.append('')
node = args[0]
file = args[2]
self.write('`', file, '\', node `', node, '\'')
def makeref(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 5: args.append('')
nodename = label = args[0]
if args[2]: label = args[2]
file = args[3]
title = args[4]
href = makefile(nodename)
if file:
href = '../' + file + '/' + href
self.write('<A HREF="', href, '">', label, '</A>')
# rpyron 2002-05-07 uref support
def open_uref(self):
self.startsaving()
def close_uref(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 2: args.append('')
href = args[0]
label = args[1]
if not label: label = href
self.write('<A HREF="', href, '">', label, '</A>')
# rpyron 2002-05-07 image support
# GNU makeinfo producing HTML output tries `filename.png'; if
# that does not exist, it tries `filename.jpg'. If that does
# not exist either, it complains. GNU makeinfo does not handle
# GIF files; however, I include GIF support here because
# MySQL documentation uses GIF files.
def open_image(self):
self.startsaving()
def close_image(self):
self.makeimage()
def makeimage(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 5: args.append('')
filename = args[0]
width = args[1]
height = args[2]
alt = args[3]
ext = args[4]
# The HTML output will have a reference to the image
# that is relative to the HTML output directory,
# which is what 'filename' gives us. However, we need
# to find it relative to our own current directory,
# so we construct 'imagename'.
imagelocation = self.dirname + '/' + filename
if os.path.exists(imagelocation+'.png'):
filename += '.png'
elif os.path.exists(imagelocation+'.jpg'):
filename += '.jpg'
elif os.path.exists(imagelocation+'.gif'): # MySQL uses GIF files
filename += '.gif'
else:
print("*** Cannot find image " + imagelocation)
#TODO: what is 'ext'?
self.write('<IMG SRC="', filename, '"', \
width and (' WIDTH="' + width + '"') or "", \
height and (' HEIGHT="' + height + '"') or "", \
alt and (' ALT="' + alt + '"') or "", \
'/>' )
self.htmlhelp.addimage(imagelocation)
# --- Marking Words and Phrases ---
# --- Other @xxx{...} commands ---
def open_(self): pass # Used by {text enclosed in braces}
def close_(self): pass
open_asis = open_
close_asis = close_
def open_cite(self): self.write('<CITE>')
def close_cite(self): self.write('</CITE>')
def open_code(self): self.write('<CODE>')
def close_code(self): self.write('</CODE>')
def open_t(self): self.write('<TT>')
def close_t(self): self.write('</TT>')
def open_dfn(self): self.write('<DFN>')
def close_dfn(self): self.write('</DFN>')
def open_emph(self): self.write('<EM>')
def close_emph(self): self.write('</EM>')
def open_i(self): self.write('<I>')
def close_i(self): self.write('</I>')
def open_footnote(self):
# if self.savetext <> None:
# print '*** Recursive footnote -- expect weirdness'
id = len(self.footnotes) + 1
self.write(self.FN_SOURCE_PATTERN % {'id': repr(id)})
self.startsaving()
def close_footnote(self):
id = len(self.footnotes) + 1
self.footnotes.append((id, self.collectsavings()))
def writefootnotes(self):
self.write(self.FN_HEADER)
for id, text in self.footnotes:
self.write(self.FN_TARGET_PATTERN
% {'id': repr(id), 'text': text})
self.footnotes = []
def open_file(self): self.write('<CODE>')
def close_file(self): self.write('</CODE>')
def open_kbd(self): self.write('<KBD>')
def close_kbd(self): self.write('</KBD>')
def open_key(self): self.write('<KEY>')
def close_key(self): self.write('</KEY>')
def open_r(self): self.write('<R>')
def close_r(self): self.write('</R>')
def open_samp(self): self.write('`<SAMP>')
def close_samp(self): self.write('</SAMP>\'')
def open_sc(self): self.write('<SMALLCAPS>')
def close_sc(self): self.write('</SMALLCAPS>')
def open_strong(self): self.write('<STRONG>')
def close_strong(self): self.write('</STRONG>')
def open_b(self): self.write('<B>')
def close_b(self): self.write('</B>')
def open_var(self): self.write('<VAR>')
def close_var(self): self.write('</VAR>')
def open_w(self): self.write('<NOBREAK>')
def close_w(self): self.write('</NOBREAK>')
def open_url(self): self.startsaving()
def close_url(self):
text = self.collectsavings()
self.write('<A HREF="', text, '">', text, '</A>')
def open_email(self): self.startsaving()
def close_email(self):
text = self.collectsavings()
self.write('<A HREF="mailto:', text, '">', text, '</A>')
open_titlefont = open_
close_titlefont = close_
def open_small(self): pass
def close_small(self): pass
def command(self, line, mo):
a, b = mo.span(1)
cmd = line[a:b]
args = line[b:].strip()
if self.debugging > 1:
print('!'*self.debugging, 'command:', self.skip, self.stack, \
'@' + cmd, args)
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
try:
func = getattr(self, 'bgn_' + cmd)
except AttributeError:
# don't complain if we are skipping anyway
if not self.skip:
self.unknown_cmd(cmd, args)
return
self.stack.append(cmd)
func(args)
return
if not self.skip or cmd == 'end':
func(args)
def unknown_cmd(self, cmd, args):
print('*** unknown', '@' + cmd, args)
if cmd not in self.unknown:
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
def do_end(self, args):
words = args.split()
if not words:
print('*** @end w/o args')
else:
cmd = words[0]
if not self.stack or self.stack[-1] != cmd:
print('*** @end', cmd, 'unexpected')
else:
del self.stack[-1]
try:
func = getattr(self, 'end_' + cmd)
except AttributeError:
self.unknown_end(cmd)
return
func()
def unknown_end(self, cmd):
cmd = 'end ' + cmd
print('*** unknown', '@' + cmd)
if cmd not in self.unknown:
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
# --- Comments ---
def do_comment(self, args): pass
do_c = do_comment
# --- Conditional processing ---
def bgn_ifinfo(self, args): pass
def end_ifinfo(self): pass
def bgn_iftex(self, args): self.skip = self.skip + 1
def end_iftex(self): self.skip = self.skip - 1
def bgn_ignore(self, args): self.skip = self.skip + 1
def end_ignore(self): self.skip = self.skip - 1
def bgn_tex(self, args): self.skip = self.skip + 1
def end_tex(self): self.skip = self.skip - 1
def do_set(self, args):
fields = args.split(' ')
key = fields[0]
if len(fields) == 1:
value = 1
else:
value = ' '.join(fields[1:])
self.values[key] = value
def do_clear(self, args):
self.values[args] = None
def bgn_ifset(self, args):
if args not in self.values or self.values[args] is None:
self.skip = self.skip + 1
self.stackinfo[len(self.stack)] = 1
else:
self.stackinfo[len(self.stack)] = 0
def end_ifset(self):
try:
if self.stackinfo[len(self.stack) + 1]:
self.skip = self.skip - 1
del self.stackinfo[len(self.stack) + 1]
except KeyError:
print('*** end_ifset: KeyError :', len(self.stack) + 1)
def bgn_ifclear(self, args):
if args in self.values and self.values[args] is not None:
self.skip = self.skip + 1
self.stackinfo[len(self.stack)] = 1
else:
self.stackinfo[len(self.stack)] = 0
def end_ifclear(self):
try:
if self.stackinfo[len(self.stack) + 1]:
self.skip = self.skip - 1
del self.stackinfo[len(self.stack) + 1]
except KeyError:
print('*** end_ifclear: KeyError :', len(self.stack) + 1)
def open_value(self):
self.startsaving()
def close_value(self):
key = self.collectsavings()
if key in self.values:
self.write(self.values[key])
else:
print('*** Undefined value: ', key)
# --- Beginning a file ---
do_finalout = do_comment
do_setchapternewpage = do_comment
do_setfilename = do_comment
def do_settitle(self, args):
self.startsaving()
self.expand(args)
self.title = self.collectsavings()
def do_parskip(self, args): pass
# --- Ending a file ---
def do_bye(self, args):
self.endnode()
self.done = 1
# --- Title page ---
def bgn_titlepage(self, args): self.skip = self.skip + 1
def end_titlepage(self): self.skip = self.skip - 1
def do_shorttitlepage(self, args): pass
def do_center(self, args):
# Actually not used outside title page...
self.write('<H1>')
self.expand(args)
self.write('</H1>\n')
do_title = do_center
do_subtitle = do_center
do_author = do_center
do_vskip = do_comment
do_vfill = do_comment
do_smallbook = do_comment
do_paragraphindent = do_comment
do_setchapternewpage = do_comment
do_headings = do_comment
do_footnotestyle = do_comment
do_evenheading = do_comment
do_evenfooting = do_comment
do_oddheading = do_comment
do_oddfooting = do_comment
do_everyheading = do_comment
do_everyfooting = do_comment
# --- Nodes ---
def do_node(self, args):
self.endnode()
self.nodelineno = 0
parts = [s.strip() for s in args.split(',')]
while len(parts) < 4: parts.append('')
self.nodelinks = parts
[name, next, prev, up] = parts[:4]
file = self.dirname + '/' + makefile(name)
if file in self.filenames:
print('*** Filename already in use: ', file)
else:
if self.debugging: print('!'*self.debugging, '--- writing', file)
self.filenames[file] = 1
# self.nodefp = open(file, 'w')
self.nodename = name
if self.cont and self.nodestack:
self.nodestack[-1].cont = self.nodename
if not self.topname: self.topname = name
title = name
if self.title: title = title + ' -- ' + self.title
self.node = self.Node(self.dirname, self.nodename, self.topname,
title, next, prev, up)
self.htmlhelp.addnode(self.nodename,next,prev,up,file)
def link(self, label, nodename):
if nodename:
if nodename.lower() == '(dir)':
addr = '../dir.html'
else:
addr = makefile(nodename)
self.write(label, ': <A HREF="', addr, '" TYPE="',
label, '">', nodename, '</A> \n')
# --- Sectioning commands ---
def popstack(self, type):
if (self.node):
self.node.type = type
while self.nodestack:
if self.nodestack[-1].type > type:
self.nodestack[-1].finalize()
self.nodestack[-1].flush()
del self.nodestack[-1]
elif self.nodestack[-1].type == type:
if not self.nodestack[-1].next:
self.nodestack[-1].next = self.node.name
if not self.node.prev:
self.node.prev = self.nodestack[-1].name
self.nodestack[-1].finalize()
self.nodestack[-1].flush()
del self.nodestack[-1]
else:
if type > 1 and not self.node.up:
self.node.up = self.nodestack[-1].name
break
def do_chapter(self, args):
self.heading('H1', args, 0)
self.popstack(1)
def do_unnumbered(self, args):
self.heading('H1', args, -1)
self.popstack(1)
def do_appendix(self, args):
self.heading('H1', args, -1)
self.popstack(1)
def do_top(self, args):
self.heading('H1', args, -1)
def do_chapheading(self, args):
self.heading('H1', args, -1)
def do_majorheading(self, args):
self.heading('H1', args, -1)
def do_section(self, args):
self.heading('H1', args, 1)
self.popstack(2)
def do_unnumberedsec(self, args):
self.heading('H1', args, -1)
self.popstack(2)
def do_appendixsec(self, args):
self.heading('H1', args, -1)
self.popstack(2)
do_appendixsection = do_appendixsec
def do_heading(self, args):
self.heading('H1', args, -1)
def do_subsection(self, args):
self.heading('H2', args, 2)
self.popstack(3)
def do_unnumberedsubsec(self, args):
self.heading('H2', args, -1)
self.popstack(3)
def do_appendixsubsec(self, args):
self.heading('H2', args, -1)
self.popstack(3)
def do_subheading(self, args):
self.heading('H2', args, -1)
def do_subsubsection(self, args):
self.heading('H3', args, 3)
self.popstack(4)
def do_unnumberedsubsubsec(self, args):
self.heading('H3', args, -1)
self.popstack(4)
def do_appendixsubsubsec(self, args):
self.heading('H3', args, -1)
self.popstack(4)
def do_subsubheading(self, args):
self.heading('H3', args, -1)
def heading(self, type, args, level):
if level >= 0:
while len(self.numbering) <= level:
self.numbering.append(0)
del self.numbering[level+1:]
self.numbering[level] = self.numbering[level] + 1
x = ''
for i in self.numbering:
x = x + repr(i) + '.'
args = x + ' ' + args
self.contents.append((level, args, self.nodename))
self.write('<', type, '>')
self.expand(args)
self.write('</', type, '>\n')
if self.debugging or self.print_headers:
print('---', args)
def do_contents(self, args):
# pass
self.listcontents('Table of Contents', 999)
def do_shortcontents(self, args):
pass
# self.listcontents('Short Contents', 0)
do_summarycontents = do_shortcontents
def listcontents(self, title, maxlevel):
self.write('<H1>', title, '</H1>\n<UL COMPACT PLAIN>\n')
prevlevels = [0]
for level, title, node in self.contents:
if level > maxlevel:
continue
if level > prevlevels[-1]:
# can only advance one level at a time
self.write(' '*prevlevels[-1], '<UL PLAIN>\n')
prevlevels.append(level)
elif level < prevlevels[-1]:
# might drop back multiple levels
while level < prevlevels[-1]:
del prevlevels[-1]
self.write(' '*prevlevels[-1],
'</UL>\n')
self.write(' '*level, '<LI> <A HREF="',
makefile(node), '">')
self.expand(title)
self.write('</A>\n')
self.write('</UL>\n' * len(prevlevels))
# --- Page lay-out ---
# These commands are only meaningful in printed text
def do_page(self, args): pass
def do_need(self, args): pass
def bgn_group(self, args): pass
def end_group(self): pass
# --- Line lay-out ---
def do_sp(self, args):
if self.nofill:
self.write('\n')
else:
self.write('<P>\n')
def do_hline(self, args):
self.write('<HR>')
# --- Function and variable definitions ---
def bgn_deffn(self, args):
self.write('<DL>')
self.do_deffnx(args)
def end_deffn(self):
self.write('</DL>\n')
def do_deffnx(self, args):
self.write('<DT>')
words = splitwords(args, 2)
[category, name], rest = words[:2], words[2:]
self.expand('@b{%s}' % name)
for word in rest: self.expand(' ' + makevar(word))
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('fn', name)
def bgn_defun(self, args): self.bgn_deffn('Function ' + args)
end_defun = end_deffn
def do_defunx(self, args): self.do_deffnx('Function ' + args)
def bgn_defmac(self, args): self.bgn_deffn('Macro ' + args)
end_defmac = end_deffn
def do_defmacx(self, args): self.do_deffnx('Macro ' + args)
def bgn_defspec(self, args): self.bgn_deffn('{Special Form} ' + args)
end_defspec = end_deffn
def do_defspecx(self, args): self.do_deffnx('{Special Form} ' + args)
def bgn_defvr(self, args):
self.write('<DL>')
self.do_defvrx(args)
end_defvr = end_deffn
def do_defvrx(self, args):
self.write('<DT>')
words = splitwords(args, 2)
[category, name], rest = words[:2], words[2:]
self.expand('@code{%s}' % name)
# If there are too many arguments, show them
for word in rest: self.expand(' ' + word)
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('vr', name)
def bgn_defvar(self, args): self.bgn_defvr('Variable ' + args)
end_defvar = end_defvr
def do_defvarx(self, args): self.do_defvrx('Variable ' + args)
def bgn_defopt(self, args): self.bgn_defvr('{User Option} ' + args)
end_defopt = end_defvr
def do_defoptx(self, args): self.do_defvrx('{User Option} ' + args)
# --- Ditto for typed languages ---
def bgn_deftypefn(self, args):
self.write('<DL>')
self.do_deftypefnx(args)
end_deftypefn = end_deffn
def do_deftypefnx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, datatype, name], rest = words[:3], words[3:]
self.expand('@code{%s} @b{%s}' % (datatype, name))
for word in rest: self.expand(' ' + makevar(word))
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('fn', name)
def bgn_deftypefun(self, args): self.bgn_deftypefn('Function ' + args)
end_deftypefun = end_deftypefn
def do_deftypefunx(self, args): self.do_deftypefnx('Function ' + args)
def bgn_deftypevr(self, args):
self.write('<DL>')
self.do_deftypevrx(args)
end_deftypevr = end_deftypefn
def do_deftypevrx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, datatype, name], rest = words[:3], words[3:]
self.expand('@code{%s} @b{%s}' % (datatype, name))
# If there are too many arguments, show them
for word in rest: self.expand(' ' + word)
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('fn', name)
def bgn_deftypevar(self, args):
self.bgn_deftypevr('Variable ' + args)
end_deftypevar = end_deftypevr
def do_deftypevarx(self, args):
self.do_deftypevrx('Variable ' + args)
# --- Ditto for object-oriented languages ---
def bgn_defcv(self, args):
self.write('<DL>')
self.do_defcvx(args)
end_defcv = end_deftypevr
def do_defcvx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, classname, name], rest = words[:3], words[3:]
self.expand('@b{%s}' % name)
# If there are too many arguments, show them
for word in rest: self.expand(' ' + word)
#self.expand(' -- %s of @code{%s}' % (category, classname))
self.write('\n<DD>')
self.index('vr', '%s @r{on %s}' % (name, classname))
def bgn_defivar(self, args):
self.bgn_defcv('{Instance Variable} ' + args)
end_defivar = end_defcv
def do_defivarx(self, args):
self.do_defcvx('{Instance Variable} ' + args)
def bgn_defop(self, args):
self.write('<DL>')
self.do_defopx(args)
end_defop = end_defcv
def do_defopx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, classname, name], rest = words[:3], words[3:]
self.expand('@b{%s}' % name)
for word in rest: self.expand(' ' + makevar(word))
#self.expand(' -- %s of @code{%s}' % (category, classname))
self.write('\n<DD>')
self.index('fn', '%s @r{on %s}' % (name, classname))
def bgn_defmethod(self, args):
self.bgn_defop('Method ' + args)
end_defmethod = end_defop
def do_defmethodx(self, args):
self.do_defopx('Method ' + args)
# --- Ditto for data types ---
def bgn_deftp(self, args):
self.write('<DL>')
self.do_deftpx(args)
end_deftp = end_defcv
def do_deftpx(self, args):
self.write('<DT>')
words = splitwords(args, 2)
[category, name], rest = words[:2], words[2:]
self.expand('@b{%s}' % name)
for word in rest: self.expand(' ' + word)
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('tp', name)
# --- Making Lists and Tables
def bgn_enumerate(self, args):
if not args:
self.write('<OL>\n')
self.stackinfo[len(self.stack)] = '</OL>\n'
else:
self.itemnumber = args
self.write('<UL>\n')
self.stackinfo[len(self.stack)] = '</UL>\n'
def end_enumerate(self):
self.itemnumber = None
self.write(self.stackinfo[len(self.stack) + 1])
del self.stackinfo[len(self.stack) + 1]
def bgn_itemize(self, args):
self.itemarg = args
self.write('<UL>\n')
def end_itemize(self):
self.itemarg = None
self.write('</UL>\n')
def bgn_table(self, args):
self.itemarg = args
self.write('<DL>\n')
def end_table(self):
self.itemarg = None
self.write('</DL>\n')
def bgn_ftable(self, args):
self.itemindex = 'fn'
self.bgn_table(args)
def end_ftable(self):
self.itemindex = None
self.end_table()
def bgn_vtable(self, args):
self.itemindex = 'vr'
self.bgn_table(args)
def end_vtable(self):
self.itemindex = None
self.end_table()
def do_item(self, args):
if self.itemindex: self.index(self.itemindex, args)
if self.itemarg:
if self.itemarg[0] == '@' and self.itemarg[1] and \
self.itemarg[1] in string.ascii_letters:
args = self.itemarg + '{' + args + '}'
else:
# some other character, e.g. '-'
args = self.itemarg + ' ' + args
if self.itemnumber != None:
args = self.itemnumber + '. ' + args
self.itemnumber = increment(self.itemnumber)
if self.stack and self.stack[-1] == 'table':
self.write('<DT>')
self.expand(args)
self.write('\n<DD>')
elif self.stack and self.stack[-1] == 'multitable':
self.write('<TR><TD>')
self.expand(args)
self.write('</TD>\n</TR>\n')
else:
self.write('<LI>')
self.expand(args)
self.write(' ')
do_itemx = do_item # XXX Should suppress leading blank line
# rpyron 2002-05-07 multitable support
def bgn_multitable(self, args):
self.itemarg = None # should be handled by columnfractions
self.write('<TABLE BORDER="">\n')
def end_multitable(self):
self.itemarg = None
self.write('</TABLE>\n<BR>\n')
def handle_columnfractions(self):
# It would be better to handle this, but for now it's in the way...
self.itemarg = None
def handle_tab(self):
self.write('</TD>\n <TD>')
# --- Enumerations, displays, quotations ---
# XXX Most of these should increase the indentation somehow
def bgn_quotation(self, args): self.write('<BLOCKQUOTE>')
def end_quotation(self): self.write('</BLOCKQUOTE>\n')
def bgn_example(self, args):
self.nofill = self.nofill + 1
self.write('<PRE>')
def end_example(self):
self.write('</PRE>\n')
self.nofill = self.nofill - 1
bgn_lisp = bgn_example # Synonym when contents are executable lisp code
end_lisp = end_example
bgn_smallexample = bgn_example # XXX Should use smaller font
end_smallexample = end_example
bgn_smalllisp = bgn_lisp # Ditto
end_smalllisp = end_lisp
bgn_display = bgn_example
end_display = end_example
bgn_format = bgn_display
end_format = end_display
def do_exdent(self, args): self.expand(args + '\n')
# XXX Should really mess with indentation
def bgn_flushleft(self, args):
self.nofill = self.nofill + 1
self.write('<PRE>\n')
def end_flushleft(self):
self.write('</PRE>\n')
self.nofill = self.nofill - 1
def bgn_flushright(self, args):
self.nofill = self.nofill + 1
self.write('<ADDRESS COMPACT>\n')
def end_flushright(self):
self.write('</ADDRESS>\n')
self.nofill = self.nofill - 1
def bgn_menu(self, args):
self.write('<DIR>\n')
self.write(' <STRONG><EM>Menu</EM></STRONG><P>\n')
self.htmlhelp.beginmenu()
def end_menu(self):
self.write('</DIR>\n')
self.htmlhelp.endmenu()
def bgn_cartouche(self, args): pass
def end_cartouche(self): pass
# --- Indices ---
def resetindex(self):
self.noncodeindices = ['cp']
self.indextitle = {}
self.indextitle['cp'] = 'Concept'
self.indextitle['fn'] = 'Function'
self.indextitle['ky'] = 'Keyword'
self.indextitle['pg'] = 'Program'
self.indextitle['tp'] = 'Type'
self.indextitle['vr'] = 'Variable'
#
self.whichindex = {}
for name in self.indextitle:
self.whichindex[name] = []
def user_index(self, name, args):
if name in self.whichindex:
self.index(name, args)
else:
print('*** No index named', repr(name))
def do_cindex(self, args): self.index('cp', args)
def do_findex(self, args): self.index('fn', args)
def do_kindex(self, args): self.index('ky', args)
def do_pindex(self, args): self.index('pg', args)
def do_tindex(self, args): self.index('tp', args)
def do_vindex(self, args): self.index('vr', args)
def index(self, name, args):
self.whichindex[name].append((args, self.nodename))
self.htmlhelp.index(args, self.nodename)
def do_synindex(self, args):
words = args.split()
if len(words) != 2:
print('*** bad @synindex', args)
return
[old, new] = words
if old not in self.whichindex or \
new not in self.whichindex:
print('*** bad key(s) in @synindex', args)
return
if old != new and \
self.whichindex[old] is not self.whichindex[new]:
inew = self.whichindex[new]
inew[len(inew):] = self.whichindex[old]
self.whichindex[old] = inew
do_syncodeindex = do_synindex # XXX Should use code font
def do_printindex(self, args):
words = args.split()
for name in words:
if name in self.whichindex:
self.prindex(name)
else:
print('*** No index named', repr(name))
def prindex(self, name):
iscodeindex = (name not in self.noncodeindices)
index = self.whichindex[name]
if not index: return
if self.debugging:
print('!'*self.debugging, '--- Generating', \
self.indextitle[name], 'index')
# The node already provides a title
index1 = []
junkprog = re.compile('^(@[a-z]+)?{')
for key, node in index:
sortkey = key.lower()
# Remove leading `@cmd{' from sort key
# -- don't bother about the matching `}'
oldsortkey = sortkey
while 1:
mo = junkprog.match(sortkey)
if not mo:
break
i = mo.end()
sortkey = sortkey[i:]
index1.append((sortkey, key, node))
del index[:]
index1.sort()
self.write('<DL COMPACT>\n')
prevkey = prevnode = None
for sortkey, key, node in index1:
if (key, node) == (prevkey, prevnode):
continue
if self.debugging > 1: print('!'*self.debugging, key, ':', node)
self.write('<DT>')
if iscodeindex: key = '@code{' + key + '}'
if key != prevkey:
self.expand(key)
self.write('\n<DD><A HREF="%s">%s</A>\n' % (makefile(node), node))
prevkey, prevnode = key, node
self.write('</DL>\n')
# --- Final error reports ---
def report(self):
if self.unknown:
print('--- Unrecognized commands ---')
cmds = sorted(self.unknown.keys())
for cmd in cmds:
print(cmd.ljust(20), self.unknown[cmd])
class TexinfoParserHTML3(TexinfoParser):
COPYRIGHT_SYMBOL = "©"
FN_ID_PATTERN = "[%(id)s]"
FN_SOURCE_PATTERN = '<A ID=footnoteref%(id)s ' \
'HREF="#footnotetext%(id)s">' + FN_ID_PATTERN + '</A>'
FN_TARGET_PATTERN = '<FN ID=footnotetext%(id)s>\n' \
'<P><A HREF="#footnoteref%(id)s">' + FN_ID_PATTERN \
+ '</A>\n%(text)s</P></FN>\n'
FN_HEADER = '<DIV CLASS=footnotes>\n <HR NOSHADE WIDTH=200>\n' \
' <STRONG><EM>Footnotes</EM></STRONG>\n <P>\n'
Node = HTML3Node
def bgn_quotation(self, args): self.write('<BQ>')
def end_quotation(self): self.write('</BQ>\n')
def bgn_example(self, args):
# this use of <CODE> would not be legal in HTML 2.0,
# but is in more recent DTDs.
self.nofill = self.nofill + 1
self.write('<PRE CLASS=example><CODE>')
def end_example(self):
self.write("</CODE></PRE>\n")
self.nofill = self.nofill - 1
def bgn_flushleft(self, args):
self.nofill = self.nofill + 1
self.write('<PRE CLASS=flushleft>\n')
def bgn_flushright(self, args):
self.nofill = self.nofill + 1
self.write('<DIV ALIGN=right CLASS=flushright><ADDRESS COMPACT>\n')
def end_flushright(self):
self.write('</ADDRESS></DIV>\n')
self.nofill = self.nofill - 1
def bgn_menu(self, args):
self.write('<UL PLAIN CLASS=menu>\n')
self.write(' <LH>Menu</LH>\n')
def end_menu(self):
self.write('</UL>\n')
# rpyron 2002-05-07
class HTMLHelp:
"""
This class encapsulates support for HTML Help. Node names,
file names, menu items, index items, and image file names are
accumulated until a call to finalize(). At that time, three
output files are created in the current directory:
`helpbase`.hhp is a HTML Help Workshop project file.
It contains various information, some of
which I do not understand; I just copied
the default project info from a fresh
installation.
`helpbase`.hhc is the Contents file for the project.
`helpbase`.hhk is the Index file for the project.
When these files are used as input to HTML Help Workshop,
the resulting file will be named:
`helpbase`.chm
If none of the defaults in `helpbase`.hhp are changed,
the .CHM file will have Contents, Index, Search, and
Favorites tabs.
"""
codeprog = re.compile('@code{(.*?)}')
def __init__(self,helpbase,dirname):
self.helpbase = helpbase
self.dirname = dirname
self.projectfile = None
self.contentfile = None
self.indexfile = None
self.nodelist = []
self.nodenames = {} # nodename : index
self.nodeindex = {}
self.filenames = {} # filename : filename
self.indexlist = [] # (args,nodename) == (key,location)
self.current = ''
self.menudict = {}
self.dumped = {}
def addnode(self,name,next,prev,up,filename):
node = (name,next,prev,up,filename)
# add this file to dict
# retrieve list with self.filenames.values()
self.filenames[filename] = filename
# add this node to nodelist
self.nodeindex[name] = len(self.nodelist)
self.nodelist.append(node)
# set 'current' for menu items
self.current = name
self.menudict[self.current] = []
def menuitem(self,nodename):
menu = self.menudict[self.current]
menu.append(nodename)
def addimage(self,imagename):
self.filenames[imagename] = imagename
def index(self, args, nodename):
self.indexlist.append((args,nodename))
def beginmenu(self):
pass
def endmenu(self):
pass
def finalize(self):
if not self.helpbase:
return
# generate interesting filenames
resultfile = self.helpbase + '.chm'
projectfile = self.helpbase + '.hhp'
contentfile = self.helpbase + '.hhc'
indexfile = self.helpbase + '.hhk'
# generate a reasonable title
title = self.helpbase
# get the default topic file
(topname,topnext,topprev,topup,topfile) = self.nodelist[0]
defaulttopic = topfile
# PROJECT FILE
try:
fp = open(projectfile,'w')
print('[OPTIONS]', file=fp)
print('Auto Index=Yes', file=fp)
print('Binary TOC=No', file=fp)
print('Binary Index=Yes', file=fp)
print('Compatibility=1.1', file=fp)
print('Compiled file=' + resultfile + '', file=fp)
print('Contents file=' + contentfile + '', file=fp)
print('Default topic=' + defaulttopic + '', file=fp)
print('Error log file=ErrorLog.log', file=fp)
print('Index file=' + indexfile + '', file=fp)
print('Title=' + title + '', file=fp)
print('Display compile progress=Yes', file=fp)
print('Full-text search=Yes', file=fp)
print('Default window=main', file=fp)
print('', file=fp)
print('[WINDOWS]', file=fp)
print('main=,"' + contentfile + '","' + indexfile
+ '","","",,,,,0x23520,222,0x1046,[10,10,780,560],'
'0xB0000,,,,,,0', file=fp)
print('', file=fp)
print('[FILES]', file=fp)
print('', file=fp)
self.dumpfiles(fp)
fp.close()
except IOError as msg:
print(projectfile, ':', msg)
sys.exit(1)
# CONTENT FILE
try:
fp = open(contentfile,'w')
print('<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">', file=fp)
print('<!-- This file defines the table of contents -->', file=fp)
print('<HTML>', file=fp)
print('<HEAD>', file=fp)
print('<meta name="GENERATOR"'
'content="Microsoft® HTML Help Workshop 4.1">', file=fp)
print('<!-- Sitemap 1.0 -->', file=fp)
print('</HEAD>', file=fp)
print('<BODY>', file=fp)
print(' <OBJECT type="text/site properties">', file=fp)
print(' <param name="Window Styles" value="0x800025">', file=fp)
print(' <param name="comment" value="title:">', file=fp)
print(' <param name="comment" value="base:">', file=fp)
print(' </OBJECT>', file=fp)
self.dumpnodes(fp)
print('</BODY>', file=fp)
print('</HTML>', file=fp)
fp.close()
except IOError as msg:
print(contentfile, ':', msg)
sys.exit(1)
# INDEX FILE
try:
fp = open(indexfile ,'w')
print('<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">', file=fp)
print('<!-- This file defines the index -->', file=fp)
print('<HTML>', file=fp)
print('<HEAD>', file=fp)
print('<meta name="GENERATOR"'
'content="Microsoft® HTML Help Workshop 4.1">', file=fp)
print('<!-- Sitemap 1.0 -->', file=fp)
print('</HEAD>', file=fp)
print('<BODY>', file=fp)
print('<OBJECT type="text/site properties">', file=fp)
print('</OBJECT>', file=fp)
self.dumpindex(fp)
print('</BODY>', file=fp)
print('</HTML>', file=fp)
fp.close()
except IOError as msg:
print(indexfile , ':', msg)
sys.exit(1)
def dumpfiles(self, outfile=sys.stdout):
filelist = sorted(self.filenames.values())
for filename in filelist:
print(filename, file=outfile)
def dumpnodes(self, outfile=sys.stdout):
self.dumped = {}
if self.nodelist:
nodename, dummy, dummy, dummy, dummy = self.nodelist[0]
self.topnode = nodename
print('<UL>', file=outfile)
for node in self.nodelist:
self.dumpnode(node,0,outfile)
print('</UL>', file=outfile)
def dumpnode(self, node, indent=0, outfile=sys.stdout):
if node:
# Retrieve info for this node
(nodename,next,prev,up,filename) = node
self.current = nodename
# Have we been dumped already?
if nodename in self.dumped:
return
self.dumped[nodename] = 1
# Print info for this node
print(' '*indent, end=' ', file=outfile)
print('<LI><OBJECT type="text/sitemap">', end=' ', file=outfile)
print('<param name="Name" value="' + nodename +'">', end=' ', file=outfile)
print('<param name="Local" value="'+ filename +'">', end=' ', file=outfile)
print('</OBJECT>', file=outfile)
# Does this node have menu items?
try:
menu = self.menudict[nodename]
self.dumpmenu(menu,indent+2,outfile)
except KeyError:
pass
def dumpmenu(self, menu, indent=0, outfile=sys.stdout):
if menu:
currentnode = self.current
if currentnode != self.topnode: # XXX this is a hack
print(' '*indent + '<UL>', file=outfile)
indent += 2
for item in menu:
menunode = self.getnode(item)
self.dumpnode(menunode,indent,outfile)
if currentnode != self.topnode: # XXX this is a hack
print(' '*indent + '</UL>', file=outfile)
indent -= 2
def getnode(self, nodename):
try:
index = self.nodeindex[nodename]
return self.nodelist[index]
except KeyError:
return None
except IndexError:
return None
# (args,nodename) == (key,location)
def dumpindex(self, outfile=sys.stdout):
print('<UL>', file=outfile)
for (key,location) in self.indexlist:
key = self.codeexpand(key)
location = makefile(location)
location = self.dirname + '/' + location
print('<LI><OBJECT type="text/sitemap">', end=' ', file=outfile)
print('<param name="Name" value="' + key + '">', end=' ', file=outfile)
print('<param name="Local" value="' + location + '">', end=' ', file=outfile)
print('</OBJECT>', file=outfile)
print('</UL>', file=outfile)
def codeexpand(self, line):
co = self.codeprog.match(line)
if not co:
return line
bgn, end = co.span(0)
a, b = co.span(1)
line = line[:bgn] + line[a:b] + line[end:]
return line
# Put @var{} around alphabetic substrings
def makevar(str):
return '@var{'+str+'}'
# Split a string in "words" according to findwordend
def splitwords(str, minlength):
words = []
i = 0
n = len(str)
while i < n:
while i < n and str[i] in ' \t\n': i = i+1
if i >= n: break
start = i
i = findwordend(str, i, n)
words.append(str[start:i])
while len(words) < minlength: words.append('')
return words
# Find the end of a "word", matching braces and interpreting @@ @{ @}
fwprog = re.compile('[@{} ]')
def findwordend(str, i, n):
level = 0
while i < n:
mo = fwprog.search(str, i)
if not mo:
break
i = mo.start()
c = str[i]; i = i+1
if c == '@': i = i+1 # Next character is not special
elif c == '{': level = level+1
elif c == '}': level = level-1
elif c == ' ' and level <= 0: return i-1
return n
# Convert a node name into a file name
def makefile(nodename):
nodename = nodename.strip()
return fixfunnychars(nodename) + '.html'
# Characters that are perfectly safe in filenames and hyperlinks
goodchars = string.ascii_letters + string.digits + '!@-=+.'
# Replace characters that aren't perfectly safe by dashes
# Underscores are bad since Cern HTTPD treats them as delimiters for
# encoding times, so you get mismatches if you compress your files:
# a.html.gz will map to a_b.html.gz
def fixfunnychars(addr):
i = 0
while i < len(addr):
c = addr[i]
if c not in goodchars:
c = '-'
addr = addr[:i] + c + addr[i+1:]
i = i + len(c)
return addr
# Increment a string used as an enumeration
def increment(s):
if not s:
return '1'
for sequence in string.digits, string.ascii_lowercase, string.ascii_uppercase:
lastc = s[-1]
if lastc in sequence:
i = sequence.index(lastc) + 1
if i >= len(sequence):
if len(s) == 1:
s = sequence[0]*2
if s == '00':
s = '10'
else:
s = increment(s[:-1]) + sequence[0]
else:
s = s[:-1] + sequence[i]
return s
return s # Don't increment
def test():
import sys
debugging = 0
print_headers = 0
cont = 0
html3 = 0
htmlhelp = ''
while sys.argv[1] == ['-d']:
debugging = debugging + 1
del sys.argv[1]
if sys.argv[1] == '-p':
print_headers = 1
del sys.argv[1]
if sys.argv[1] == '-c':
cont = 1
del sys.argv[1]
if sys.argv[1] == '-3':
html3 = 1
del sys.argv[1]
if sys.argv[1] == '-H':
helpbase = sys.argv[2]
del sys.argv[1:3]
if len(sys.argv) != 3:
print('usage: texi2hh [-d [-d]] [-p] [-c] [-3] [-H htmlhelp]', \
'inputfile outputdirectory')
sys.exit(2)
if html3:
parser = TexinfoParserHTML3()
else:
parser = TexinfoParser()
parser.cont = cont
parser.debugging = debugging
parser.print_headers = print_headers
file = sys.argv[1]
dirname = sys.argv[2]
parser.setdirname(dirname)
parser.setincludedir(os.path.dirname(file))
htmlhelp = HTMLHelp(helpbase, dirname)
parser.sethtmlhelp(htmlhelp)
try:
fp = open(file, 'r')
except IOError as msg:
print(file, ':', msg)
sys.exit(1)
parser.parse(fp)
fp.close()
parser.report()
htmlhelp.finalize()
if __name__ == "__main__":
test()
| lgpl-3.0 |